From c2a3683eb1e0d7504883163b23c5e2af58b6b2e4 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 16 Oct 2025 18:54:09 +0100 Subject: [PATCH 01/67] initial commit --- Cargo.lock | 7 +++++++ Cargo.toml | 1 + zkvm_execution_layer/Cargo.toml | 9 +++++++++ zkvm_execution_layer/src/lib.rs | 4 ++++ 4 files changed, 21 insertions(+) create mode 100644 zkvm_execution_layer/Cargo.toml create mode 100644 zkvm_execution_layer/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 59e7bda170a..75758e28499 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11405,6 +11405,13 @@ dependencies = [ "zstd 0.11.2+zstd.1.5.2", ] +[[package]] +name = "zkvm_execution_layer" +version = "0.1.0" +dependencies = [ + "types", +] + [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" diff --git a/Cargo.toml b/Cargo.toml index ae84d645bb9..6ef5455eb62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,7 @@ members = [ "validator_client/validator_metrics", "validator_client/validator_services", "validator_manager", + "zkvm_execution_layer", ] resolver = "2" diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml new file mode 100644 index 00000000000..079c18acd92 --- /dev/null +++ b/zkvm_execution_layer/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "zkvm_execution_layer" +version = "0.1.0" +edition = "2021" + +[dependencies] +types = { path = "../consensus/types" } + +[dev-dependencies] diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs new file mode 100644 index 00000000000..679fcdb8849 --- /dev/null +++ b/zkvm_execution_layer/src/lib.rs @@ -0,0 +1,4 @@ +#[test] +fn add() { + assert!(1 + 1 == 2) +} \ No newline at end of file From f9f4517408662ba0bd0de8f8193b6e7d2a9eb4f1 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 16 Oct 2025 19:19:02 +0100 Subject: [PATCH 02/67] add consensus types --- consensus/types/src/execution_proof.rs | 123 +++++++++++++++ .../types/src/execution_proof_subnet_id.rs | 149 ++++++++++++++++++ consensus/types/src/lib.rs | 4 + 3 files changed, 276 insertions(+) create mode 100644 consensus/types/src/execution_proof.rs create mode 100644 consensus/types/src/execution_proof_subnet_id.rs diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs new file mode 100644 index 00000000000..ce12730a515 --- /dev/null +++ b/consensus/types/src/execution_proof.rs @@ -0,0 +1,123 @@ +use crate::{ExecutionBlockHash, Hash256, VariableList}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::typenum; +use std::fmt::{self, Debug}; +use tree_hash_derive::TreeHash; + +use super::ExecutionProofSubnetId; + +/// Maximum size of proof data in bytes +/// +/// Note: Most proofs will fit within 300KB. Some zkVMs have 1MB proofs (currently) +/// and so this number was set to accommodate for the most zkVMs. +pub const MAX_PROOF_DATA_BYTES: usize = 1_048_576; + +type ProofData = VariableList; + +/// ExecutionProof represents a cryptographic `proof of execution` that +/// an execution payload is valid. +/// +/// In short, it is proof that if we were to run a particular execution layer client +/// with the given execution payload, they would return the output values that are attached +/// to the proof. +/// +/// Each proof is associated with a specific subnet_id, which identifies the +/// zkVM and EL combination used to generate it. Multiple proofs from different +/// subnets can exist for the same execution payload, providing both client and EL diversity. +#[derive(Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Eq)] +pub struct ExecutionProof { + /// Which subnet/zkVM this proof belongs to + /// TODO(zkproofs): The node should provide this in themselves since they + /// know what subnet the proof came from. + pub subnet_id: ExecutionProofSubnetId, + + /// The block hash of the execution payload this proof validates + pub block_hash: ExecutionBlockHash, + + /// The beacon block root corresponding to the beacon block + /// with the execution payload, that this proof attests to. + pub block_root: Hash256, + + /// The actual proof data + pub proof_data: ProofData, +} + +impl ExecutionProof { + pub fn new( + subnet_id: ExecutionProofSubnetId, + block_hash: ExecutionBlockHash, + block_root: Hash256, + proof_data: Vec, + ) -> Result { + let proof_data = ProofData::new(proof_data) + .map_err(|e| format!("Failed to create proof data: {:?}", e))?; + + Ok(Self { + subnet_id, + block_hash, + block_root, + proof_data, + }) + } + + /// Returns the size of the proof data in bytes + pub fn proof_data_size(&self) -> usize { + self.proof_data.len() + } + + /// Get a reference to the proof data as a slice + pub fn proof_data_slice(&self) -> &[u8] { + &self.proof_data + } + + /// Check if this proof is for a specific execution block hash + pub fn is_for_block(&self, block_hash: &ExecutionBlockHash) -> bool { + &self.block_hash == block_hash + } + + /// Check if this proof is from a specific subnet + pub fn is_from_subnet(&self, subnet_id: ExecutionProofSubnetId) -> bool { + self.subnet_id == subnet_id + } +} + +impl Debug for ExecutionProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ExecutionProof") + .field("subnet_id", &self.subnet_id) + .field("block_hash", &self.block_hash) + .field("block_root", &self.block_root) + .field("proof_data_size", &self.proof_data.len()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_execution_proof_too_large() { + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash = ExecutionBlockHash::zero(); + let block_root = Hash256::zero(); + let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES + 1]; + + let result = ExecutionProof::new(subnet_id, block_hash, block_root, proof_data); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("Proof data too large")); + } + + #[test] + fn test_execution_proof_max_size() { + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash = ExecutionBlockHash::zero(); + let block_root = Hash256::zero(); + let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES]; + + let result = ExecutionProof::new(subnet_id, block_hash, block_root, proof_data); + assert!(result.is_ok()); + } + +} \ No newline at end of file diff --git a/consensus/types/src/execution_proof_subnet_id.rs b/consensus/types/src/execution_proof_subnet_id.rs new file mode 100644 index 00000000000..ad1e612ed18 --- /dev/null +++ b/consensus/types/src/execution_proof_subnet_id.rs @@ -0,0 +1,149 @@ +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt::{self, Display}; +use tree_hash::TreeHash; + +/// Number of execution proof subnets +/// Each subnet represents a different zkVM+EL combination +/// +/// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future +pub const EXECUTION_PROOF_SUBNET_COUNT: u8 = 8; + +/// ExecutionProofSubnetId identifies which zkVM/proof system subnet a proof belongs to. +/// +/// Note: There is a 1-1 mapping between subnet ID and a unique proof. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub struct ExecutionProofSubnetId(u8); + +impl Encode for ExecutionProofSubnetId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + + fn as_ssz_bytes(&self) -> Vec { + self.0.as_ssz_bytes() + } +} + +impl Decode for ExecutionProofSubnetId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let value = u8::from_ssz_bytes(bytes)?; + Self::new(value).map_err(DecodeError::BytesInvalid) + } +} + +impl TreeHash for ExecutionProofSubnetId { + fn tree_hash_type() -> tree_hash::TreeHashType { + ::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + ::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl ExecutionProofSubnetId { + /// Creates a new ExecutionProofSubnetId if the value is valid + pub fn new(id: u8) -> Result { + // TODO(zkproofs): Do we need this check or can we + // get the subnet ID from the subnet we received the proof from + // making id always < the maximum amount of subnets. + if id < EXECUTION_PROOF_SUBNET_COUNT { + Ok(Self(id)) + } else { + Err(format!( + "Invalid ExecutionProofSubnetId: {}, must be < {}", + id, EXECUTION_PROOF_SUBNET_COUNT + )) + } + } + + /// Returns the inner u8 value + pub fn as_u8(&self) -> u8 { + self.0 + } + + /// Returns the subnet ID as a usize + pub fn as_usize(&self) -> usize { + self.0 as usize + } + + /// Returns all valid subnet IDs + pub fn all() -> Vec { + (0..EXECUTION_PROOF_SUBNET_COUNT).map(Self).collect() + } +} + +impl Display for ExecutionProofSubnetId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for u8 { + fn from(subnet_id: ExecutionProofSubnetId) -> u8 { + subnet_id.0 + } +} + +impl TryFrom for ExecutionProofSubnetId { + type Error = String; + + fn try_from(value: u8) -> Result { + Self::new(value) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_valid_subnet_ids() { + for id in 0..EXECUTION_PROOF_SUBNET_COUNT { + assert!(ExecutionProofSubnetId::new(id).is_ok()); + } + } + + #[test] + fn test_invalid_subnet_ids() { + assert!(ExecutionProofSubnetId::new(EXECUTION_PROOF_SUBNET_COUNT).is_err()); + } + + #[test] + fn test_all_subnet_ids() { + let all = ExecutionProofSubnetId::all(); + assert_eq!(all.len(), EXECUTION_PROOF_SUBNET_COUNT as usize); + for (idx, subnet_id) in all.iter().enumerate() { + assert_eq!(subnet_id.as_usize(), idx); + } + } +} \ No newline at end of file diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8e83fed1d9a..64202f99eb7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -42,6 +42,8 @@ pub mod eth_spec; pub mod execution_block_hash; pub mod execution_payload; pub mod execution_payload_header; +pub mod execution_proof; +pub mod execution_proof_subnet_id; pub mod fork; pub mod fork_data; pub mod fork_name; @@ -177,6 +179,8 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES}; +pub use crate::execution_proof_subnet_id::{EXECUTION_PROOF_SUBNET_COUNT, ExecutionProofSubnetId}; pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; From accc82960d39b08a48d474de80f0c5b6dadaa3af Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 16 Oct 2025 19:51:32 +0100 Subject: [PATCH 03/67] add config --- Cargo.lock | 1 + zkvm_execution_layer/Cargo.toml | 2 + zkvm_execution_layer/src/config.rs | 249 +++++++++++++++++++++++++++++ zkvm_execution_layer/src/lib.rs | 4 +- 4 files changed, 255 insertions(+), 1 deletion(-) create mode 100644 zkvm_execution_layer/src/config.rs diff --git a/Cargo.lock b/Cargo.lock index 75758e28499..543f91ae067 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11409,6 +11409,7 @@ dependencies = [ name = "zkvm_execution_layer" version = "0.1.0" dependencies = [ + "serde", "types", ] diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index 079c18acd92..d44648203d7 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -6,4 +6,6 @@ edition = "2021" [dependencies] types = { path = "../consensus/types" } +serde = { version = "1.0", features = ["derive"] } + [dev-dependencies] diff --git a/zkvm_execution_layer/src/config.rs b/zkvm_execution_layer/src/config.rs new file mode 100644 index 00000000000..dee8cdb5aa9 --- /dev/null +++ b/zkvm_execution_layer/src/config.rs @@ -0,0 +1,249 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; +use std::time::Duration; +use types::ExecutionProofSubnetId; + +const DEFAULT_PROOF_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); + +const DEFAULT_GOSSIP_GRACE_PERIOD: Duration = Duration::from_millis(4000); + +/// Configuration for the zkVM Execution Layer +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ZKVMExecutionLayerConfig { + /// Which subnets/proofs that we are subscribed to and therefore need to + /// know how to verify + pub subscribed_subnets: HashSet, + + /// Minimum number of proofs required from _different_ subnets + /// in order for the node to mark an execution payload as VALID. + pub min_proofs_required: usize, + + /// Which subnets to generate proofs for (empty if not generating proofs) + pub generation_subnets: HashSet, + + /// Proof cache size (number of execution block hashes to cache proofs for) + pub proof_cache_size: usize, + + /// Timeout for proof requests via RPC + /// + /// Note: This is needed for the case that we need to request proofs via + /// RPC because we didn't receive them via gossip within `gossip_grace_period` + pub proof_request_timeout: Duration, + + /// Delay before falling back to RPC (gossip grace period) + /// During this time, we wait for `min_proofs_required` proofs to arrive via gossip + /// + /// TODO(zkproofs): This starts counting down from when the user receives the execution payload + pub gossip_grace_period: Duration, +} + +impl Default for ZKVMExecutionLayerConfig { + fn default() -> Self { + Self { + subscribed_subnets: HashSet::new(), + min_proofs_required: 1, + generation_subnets: HashSet::new(), + // TODO(zkproofs): This is somewhat arbitrary. The number was computed + // by NUMBER_OF_BLOCKS_BEFORE_FINALIZATION * NUM_PROOFS_PER_BLOCK = 64 * 8 + // We can change it to be more rigorous/scientific + proof_cache_size: 64 * 8, + // TODO(zkproofs): Also arbitrary + proof_request_timeout: DEFAULT_PROOF_REQUEST_TIMEOUT, + // TODO(zkproofs): Also arbitrary + gossip_grace_period: DEFAULT_GOSSIP_GRACE_PERIOD, + } + } +} + +impl ZKVMExecutionLayerConfig { + pub fn validate(&self) -> Result<(), String> { + if self.min_proofs_required == 0 { + return Err("min_proofs_required must be at least 1".to_string()); + } + + if self.proof_cache_size == 0 { + return Err("proof_cache_size must be at least 1".to_string()); + } + + // Ensure we subscribe to enough subnets to meet min_proofs_required + if self.subscribed_subnets.len() < self.min_proofs_required { + return Err(format!( + "subscribed_subnets ({}) must be >= min_proofs_required ({})", + self.subscribed_subnets.len(), + self.min_proofs_required + )); + } + + // Node can only generate proofs for subnets they are subscribed to + for subnet in &self.generation_subnets { + if !self.subscribed_subnets.contains(subnet) { + return Err(format!( + "generation_subnets must be a subset of subscribed_subnets (subnet {} not subscribed)", + subnet + )); + } + } + + Ok(()) + } + + /// Create a builder for the config (mostly for convenience, we can remove) + pub fn builder() -> StatelessExecutionLayerConfigBuilder { + StatelessExecutionLayerConfigBuilder::default() + } +} + +#[derive(Default)] +pub struct StatelessExecutionLayerConfigBuilder { + subscribed_subnets: HashSet, + min_proofs_required: Option, + generation_subnets: HashSet, + proof_cache_size: Option, + proof_request_timeout: Option, + gossip_grace_period: Option, +} + +impl StatelessExecutionLayerConfigBuilder { + pub fn subscribed_subnets(mut self, subnets: HashSet) -> Self { + self.subscribed_subnets = subnets; + self + } + + pub fn add_subscribed_subnet(mut self, subnet: ExecutionProofSubnetId) -> Self { + self.subscribed_subnets.insert(subnet); + self + } + + pub fn min_proofs_required(mut self, min: usize) -> Self { + self.min_proofs_required = Some(min); + self + } + + pub fn generation_subnets(mut self, subnets: HashSet) -> Self { + self.generation_subnets = subnets; + self + } + + pub fn add_generation_subnet(mut self, subnet: ExecutionProofSubnetId) -> Self { + self.generation_subnets.insert(subnet); + self + } + + pub fn proof_cache_size(mut self, size: usize) -> Self { + self.proof_cache_size = Some(size); + self + } + + pub fn proof_request_timeout(mut self, timeout: Duration) -> Self { + self.proof_request_timeout = Some(timeout); + self + } + + pub fn gossip_grace_period(mut self, period: Duration) -> Self { + self.gossip_grace_period = Some(period); + self + } + + /// Build the configuration + pub fn build(self) -> Result { + let config = ZKVMExecutionLayerConfig { + subscribed_subnets: self.subscribed_subnets, + min_proofs_required: self.min_proofs_required.unwrap_or(1), + generation_subnets: self.generation_subnets, + proof_cache_size: self.proof_cache_size.unwrap_or(1024), + proof_request_timeout: self + .proof_request_timeout + .unwrap_or_else(|| DEFAULT_PROOF_REQUEST_TIMEOUT), + gossip_grace_period: self + .gossip_grace_period + .unwrap_or_else(|| DEFAULT_GOSSIP_GRACE_PERIOD), + }; + + config.validate()?; + Ok(config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_config_validation() { + let config = ZKVMExecutionLayerConfig::default(); + // Default config should fail validation due to no subnets subscribed to + assert!(config.validate().is_err()); + } + + #[test] + fn test_valid_config() { + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_subscribed_subnet(subnet_0) + .add_subscribed_subnet(subnet_1) + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + } + + #[test] + fn test_min_proofs_too_high() { + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_subscribed_subnet(subnet_0) + .min_proofs_required(2) // Requires 2 but only subscribed to 1 subnet + .build(); + + assert!(config.is_err()); + } + + #[test] + fn test_generation_subnet_not_subscribed() { + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_subscribed_subnet(subnet_0) + .add_generation_subnet(subnet_1) // Generate for subnet 1 but not subscribed + .build(); + + assert!(config.is_err()); + } + + #[test] + fn test_valid_config_with_generation() { + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_subscribed_subnet(subnet_0) + .add_subscribed_subnet(subnet_1) + .add_generation_subnet(subnet_0) + .min_proofs_required(1) + .proof_cache_size(512) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.subscribed_subnets.len(), 2); + assert_eq!(config.generation_subnets.len(), 1); + assert_eq!(config.min_proofs_required, 1); + assert_eq!(config.proof_cache_size, 512); + } + + #[test] + fn test_min_proofs_required_zero() { + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_subscribed_subnet(subnet_0) + .min_proofs_required(0) // Invalid: must be > 0 + .build(); + + assert!(config.is_err()); + } +} diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 679fcdb8849..ee8360e8bce 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -1,4 +1,6 @@ +pub mod config; + #[test] fn add() { assert!(1 + 1 == 2) -} \ No newline at end of file +} From 7694d9e82dfed45a8bf28ab75c236816c26092b1 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 16 Oct 2025 20:22:33 +0100 Subject: [PATCH 04/67] add proof cache --- Cargo.lock | 2 + zkvm_execution_layer/Cargo.toml | 4 + zkvm_execution_layer/src/lib.rs | 1 + zkvm_execution_layer/src/proof_cache.rs | 317 ++++++++++++++++++++++++ 4 files changed, 324 insertions(+) create mode 100644 zkvm_execution_layer/src/proof_cache.rs diff --git a/Cargo.lock b/Cargo.lock index 543f91ae067..b2411bc0d68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11409,7 +11409,9 @@ dependencies = [ name = "zkvm_execution_layer" version = "0.1.0" dependencies = [ + "lru", "serde", + "tokio", "types", ] diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index d44648203d7..e43f7257777 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -8,4 +8,8 @@ types = { path = "../consensus/types" } serde = { version = "1.0", features = ["derive"] } +tokio = { version = "1", features = ["full"] } + +lru = "0.12" + [dev-dependencies] diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index ee8360e8bce..3165a2948af 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -1,4 +1,5 @@ pub mod config; +pub mod proof_cache; #[test] fn add() { diff --git a/zkvm_execution_layer/src/proof_cache.rs b/zkvm_execution_layer/src/proof_cache.rs new file mode 100644 index 00000000000..573261548d5 --- /dev/null +++ b/zkvm_execution_layer/src/proof_cache.rs @@ -0,0 +1,317 @@ +use lru::LruCache; +use std::num::NonZeroUsize; +use std::sync::Arc; +use tokio::sync::RwLock; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofSubnetId}; + +/// Thread-safe LRU cache for execution proofs +/// +/// Stores proofs indexed by execution block hash. +/// +/// Note: Multiple proofs from different subnets can exist for the same block hash. +pub struct ProofCache { + cache: Arc>>>, +} + +impl ProofCache { + /// Create a new proof cache with the specified capacity + pub fn new(capacity: usize) -> Self { + let capacity = NonZeroUsize::new(capacity).expect("Cache capacity must be > 0"); + Self { + cache: Arc::new(RwLock::new(LruCache::new(capacity))), + } + } + + /// Insert a proof into the cache + /// + /// TODO(zkproofs): Add more docs + pub async fn insert(&self, proof: ExecutionProof) { + let block_hash = proof.block_hash; + let mut cache = self.cache.write().await; + + cache + .get_or_insert_mut(block_hash, Vec::new) + // TODO(zkproofs): can replace this with a HashSet so we don't need this + .retain(|p| p.subnet_id != proof.subnet_id); + + cache.get_mut(&block_hash).unwrap().push(proof); + } + + /// Get all proofs for a specific block hash + pub async fn get(&self, block_hash: &ExecutionBlockHash) -> Option> { + let cache = self.cache.read().await; + cache.peek(block_hash).cloned() + } + + /// Get proofs for a specific block hash from specific subnets + /// + /// TODO(zkproofs): This is cloning proofs, so can be expensive + pub async fn get_from_subnets( + &self, + block_hash: &ExecutionBlockHash, + subnet_ids: &[ExecutionProofSubnetId], + ) -> Vec { + let cache = self.cache.read().await; + + cache + .peek(block_hash) + .map(|proofs| { + proofs + .iter() + .filter(|p| subnet_ids.contains(&p.subnet_id)) + .cloned() + .collect() + }) + .unwrap_or_default() + } + + /// Check if we have the minimum required number of proofs from _different_ subnets + pub async fn has_required_proofs( + &self, + block_hash: &ExecutionBlockHash, + min_required: usize, + ) -> bool { + let cache = self.cache.read().await; + + cache + .peek(block_hash) + .map(|proofs| proofs.len() >= min_required) + .unwrap_or(false) + } + + /// Get the number of unique subnets/proofs we have for a particular execution payload + pub async fn subnet_count(&self, block_hash: &ExecutionBlockHash) -> usize { + let cache = self.cache.read().await; + + cache + .peek(block_hash) + .map(|proofs| proofs.len()) + .unwrap_or(0) + } + + /// Check if a proof exists from a specific subnet for a block + pub async fn has_proof_from_subnet( + &self, + block_hash: &ExecutionBlockHash, + subnet_id: ExecutionProofSubnetId, + ) -> bool { + let cache = self.cache.read().await; + + cache + .peek(block_hash) + .map(|proofs| proofs.iter().any(|p| p.subnet_id == subnet_id)) + .unwrap_or(false) + } + + /// Remove all proofs for a specific block hash + pub async fn remove(&self, block_hash: &ExecutionBlockHash) -> Option> { + let mut cache = self.cache.write().await; + cache.pop(block_hash) + } + + /// Clear all cached proofs + pub async fn clear(&self) { + let mut cache = self.cache.write().await; + cache.clear(); + } + + /// Get the current number of entries in the cache + pub async fn len(&self) -> usize { + let cache = self.cache.read().await; + cache.len() + } + + /// Check if the cache is empty + pub async fn is_empty(&self) -> bool { + let cache = self.cache.read().await; + cache.is_empty() + } +} + +impl Clone for ProofCache { + fn clone(&self) -> Self { + Self { + cache: Arc::clone(&self.cache), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::Hash256; + + fn create_test_proof( + subnet_id: ExecutionProofSubnetId, + block_hash: ExecutionBlockHash, + ) -> ExecutionProof { + use types::FixedBytesExtended; + ExecutionProof::new(subnet_id, block_hash, Hash256::zero(), vec![1, 2, 3]).unwrap() + } + + #[tokio::test] + async fn test_cache_insert_and_get() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let proof = create_test_proof(subnet_0, block_hash); + + cache.insert(proof.clone()).await; + + let retrieved = cache.get(&block_hash).await; + assert!(retrieved.is_some()); + assert_eq!(retrieved.unwrap().len(), 1); + } + + #[tokio::test] + async fn test_cache_multiple_subnets() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + + let proof_0 = create_test_proof(subnet_0, block_hash); + let proof_1 = create_test_proof(subnet_1, block_hash); + + cache.insert(proof_0).await; + cache.insert(proof_1).await; + + let proofs = cache.get(&block_hash).await.unwrap(); + assert_eq!(proofs.len(), 2); + assert_eq!(cache.subnet_count(&block_hash).await, 2); + } + + #[tokio::test] + async fn test_cache_replace_same_subnet() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + + let mut proof_1 = create_test_proof(subnet_0, block_hash); + proof_1.proof_data = vec![1].into(); // modify proof_data, so its a different execution proof + let proof_2 = create_test_proof(subnet_0, block_hash); + + cache.insert(proof_1).await; + cache.insert(proof_2.clone()).await; + + let proofs = cache.get(&block_hash).await.unwrap(); + assert_eq!(proofs.len(), 1); // Should only have one proof from subnet 0 + + assert_eq!(proofs[0], proof_2); // proof_2 should replace proof_1, since they are for the same subnet and blockhash + } + + #[tokio::test] + async fn test_has_required_proofs() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + + assert!(!cache.has_required_proofs(&block_hash, 2).await); + + cache.insert(create_test_proof(subnet_0, block_hash)).await; + assert!(!cache.has_required_proofs(&block_hash, 2).await); + + cache.insert(create_test_proof(subnet_1, block_hash)).await; + assert!(cache.has_required_proofs(&block_hash, 2).await); + } + + #[tokio::test] + async fn test_has_proof_from_subnet() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + + assert!(!cache.has_proof_from_subnet(&block_hash, subnet_0).await); + + cache.insert(create_test_proof(subnet_0, block_hash)).await; + + assert!(cache.has_proof_from_subnet(&block_hash, subnet_0).await); + assert!(!cache.has_proof_from_subnet(&block_hash, subnet_1).await); + } + + #[tokio::test] + async fn test_get_from_subnets() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let subnet_2 = ExecutionProofSubnetId::new(2).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + + cache.insert(create_test_proof(subnet_0, block_hash)).await; + cache.insert(create_test_proof(subnet_1, block_hash)).await; + cache.insert(create_test_proof(subnet_2, block_hash)).await; + + let proofs = cache + .get_from_subnets(&block_hash, &[subnet_0, subnet_2]) + .await; + assert_eq!(proofs.len(), 2); + assert!(proofs.iter().any(|p| p.subnet_id == subnet_0)); + assert!(proofs.iter().any(|p| p.subnet_id == subnet_2)); + assert!(!proofs.iter().any(|p| p.subnet_id == subnet_1)); + } + + #[tokio::test] + async fn test_cache_remove() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash = ExecutionBlockHash::repeat_byte(1); + + cache.insert(create_test_proof(subnet_0, block_hash)).await; + assert!(cache.get(&block_hash).await.is_some()); + + let removed = cache.remove(&block_hash).await; + assert!(removed.is_some()); + assert!(cache.get(&block_hash).await.is_none()); + } + + #[tokio::test] + async fn test_cache_clear() { + let cache = ProofCache::new(10); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash_1 = ExecutionBlockHash::repeat_byte(1); + let block_hash_2 = ExecutionBlockHash::repeat_byte(2); + + cache + .insert(create_test_proof(subnet_0, block_hash_1)) + .await; + cache + .insert(create_test_proof(subnet_0, block_hash_2)) + .await; + + assert_eq!(cache.len().await, 2); + + cache.clear().await; + + assert_eq!(cache.len().await, 0); + assert!(cache.is_empty().await); + } + + #[tokio::test] + async fn test_cache_lru_eviction() { + let cache = ProofCache::new(2); + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let block_hash_1 = ExecutionBlockHash::repeat_byte(1); + let block_hash_2 = ExecutionBlockHash::repeat_byte(2); + let block_hash_3 = ExecutionBlockHash::repeat_byte(3); + + cache + .insert(create_test_proof(subnet_0, block_hash_1)) + .await; + cache + .insert(create_test_proof(subnet_0, block_hash_2)) + .await; + cache + .insert(create_test_proof(subnet_0, block_hash_3)) + .await; + + // Cache should only hold 2 entries + assert_eq!(cache.len().await, 2); + + // block_hash_1 should be evicted (last recently used) + assert!(cache.get(&block_hash_1).await.is_none()); + assert!(cache.get(&block_hash_2).await.is_some()); + assert!(cache.get(&block_hash_3).await.is_some()); + } +} From 3a372386ae8b81a810d523a2f5d20a5678387b79 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 16 Oct 2025 22:06:41 +0100 Subject: [PATCH 05/67] add proof gen/verify traits --- Cargo.lock | 2 + zkvm_execution_layer/Cargo.toml | 8 +-- zkvm_execution_layer/src/lib.rs | 3 ++ zkvm_execution_layer/src/proof_generation.rs | 53 +++++++++++++++++++ .../src/proof_verification.rs | 46 ++++++++++++++++ 5 files changed, 108 insertions(+), 4 deletions(-) create mode 100644 zkvm_execution_layer/src/proof_generation.rs create mode 100644 zkvm_execution_layer/src/proof_verification.rs diff --git a/Cargo.lock b/Cargo.lock index b2411bc0d68..d952566318d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11409,8 +11409,10 @@ dependencies = [ name = "zkvm_execution_layer" version = "0.1.0" dependencies = [ + "async-trait", "lru", "serde", + "thiserror 2.0.12", "tokio", "types", ] diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index e43f7257777..e17cf9a61e0 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -4,12 +4,12 @@ version = "0.1.0" edition = "2021" [dependencies] -types = { path = "../consensus/types" } - +async-trait = "0.1" #TODO(zkproofs): Remove +lru = "0.12" serde = { version = "1.0", features = ["derive"] } - tokio = { version = "1", features = ["full"] } +thiserror = "2" +types = { path = "../consensus/types" } -lru = "0.12" [dev-dependencies] diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 3165a2948af..e8bc8d5bb8d 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -1,6 +1,9 @@ pub mod config; pub mod proof_cache; +pub mod proof_generation; +pub mod proof_verification; + #[test] fn add() { assert!(1 + 1 == 2) diff --git a/zkvm_execution_layer/src/proof_generation.rs b/zkvm_execution_layer/src/proof_generation.rs new file mode 100644 index 00000000000..62c3361d90b --- /dev/null +++ b/zkvm_execution_layer/src/proof_generation.rs @@ -0,0 +1,53 @@ +use async_trait::async_trait; +use std::sync::Arc; +use thiserror::Error; +use types::{ExecutionProof, ExecutionProofSubnetId}; + +/// Result type for proof generation operations +pub type ProofGenerationResult = Result; + +/// Errors that can occur during proof generation +#[derive(Debug, Error)] +pub enum ProofGenerationError { + #[error("Proof generation failed: {0}")] + ProofGenerationFailed(String), + + #[error("Missing execution witness data: {0}")] + MissingWitnessData(String), + + #[error("Invalid execution witness: {0}")] + InvalidWitness(String), + + #[error("Proof generation timeout")] + Timeout, + + #[error("Insufficient resources: {0}")] + InsufficientResources(String), + + #[error("Internal error: {0}")] + Internal(String), +} + +/// Trait for proof generation (one implementation per zkVM) +/// +/// Each proof system (RISC Zero, SP1, etc.) implements this trait +/// to generate proofs for execution payloads from their subnet. +#[async_trait] +pub trait ProofGenerator: Send + Sync { + /// Generate a proof for the given execution payload + /// + /// Note: This is a computationally expensive operation and should be run + /// in a background task. + async fn generate( + &self, + payload_hash: &types::ExecutionBlockHash, + block_root: &types::Hash256, + ) -> ProofGenerationResult; + + /// Get the subnet ID this generator produces proofs for + fn subnet_id(&self) -> ExecutionProofSubnetId; +} + +/// Type-erased proof generator mainly for convenience +/// TODO(zkproofs): Check if we can remove this +pub type DynProofGenerator = Arc; diff --git a/zkvm_execution_layer/src/proof_verification.rs b/zkvm_execution_layer/src/proof_verification.rs new file mode 100644 index 00000000000..dc768e13c5f --- /dev/null +++ b/zkvm_execution_layer/src/proof_verification.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; +use thiserror::Error; +use types::{ExecutionProof, ExecutionProofSubnetId}; + +/// Result type for proof verification operations +pub type ProofVerificationResult = Result; + +/// Errors that can occur during proof verification +#[derive(Debug, Error)] +pub enum VerificationError { + #[error("Proof verification failed: {0}")] + VerificationFailed(String), + + #[error("Invalid proof format: {0}")] + InvalidProofFormat(String), + + #[error("Unsupported subnet: {0}")] + UnsupportedSubnet(ExecutionProofSubnetId), + + #[error("Proof size mismatch: expected {expected}, got {actual}")] + ProofSizeMismatch { expected: usize, actual: usize }, + + #[error("Internal error: {0}")] + Internal(String), +} + +/// Trait for proof verification (one implementation per zkVM+EL combination) +pub trait ProofVerifier: Send + Sync { + /// Verify that the proof is valid for the given execution payload + /// + /// Returns : + /// - Ok(true) if valid, + /// - Ok(false) if invalid (but well-formed) + /// - Err if the proof is malformed or verification cannot be performed. + /// TODO(zkproofs): Maybe make Ok(false) an enum variant + fn verify( + &self, + payload_hash: &types::ExecutionBlockHash, + proof: &ExecutionProof, + ) -> ProofVerificationResult; + + fn subnet_id(&self) -> ExecutionProofSubnetId; +} + +/// Type-erased proof verifier +pub type DynProofVerifier = Arc; From 7368ba2f4cca05285f1cbdaf6039e0c1635985ee Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 16 Oct 2025 22:55:49 +0100 Subject: [PATCH 06/67] add registry proof gen/verification --- Cargo.lock | 1 + zkvm_execution_layer/Cargo.toml | 1 + zkvm_execution_layer/src/dummy_proof_gen.rs | 118 +++++++++++++++ .../src/dummy_proof_verifier.rs | 120 +++++++++++++++ zkvm_execution_layer/src/lib.rs | 6 + .../src/registry_proof_gen.rs | 134 +++++++++++++++++ .../src/registry_proof_verification.rs | 138 ++++++++++++++++++ 7 files changed, 518 insertions(+) create mode 100644 zkvm_execution_layer/src/dummy_proof_gen.rs create mode 100644 zkvm_execution_layer/src/dummy_proof_verifier.rs create mode 100644 zkvm_execution_layer/src/registry_proof_gen.rs create mode 100644 zkvm_execution_layer/src/registry_proof_verification.rs diff --git a/Cargo.lock b/Cargo.lock index d952566318d..97d2c9c8e2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11410,6 +11410,7 @@ name = "zkvm_execution_layer" version = "0.1.0" dependencies = [ "async-trait", + "hashbrown 0.15.2", "lru", "serde", "thiserror 2.0.12", diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index e17cf9a61e0..9d7882369e4 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] async-trait = "0.1" #TODO(zkproofs): Remove lru = "0.12" +hashbrown = "0.15" serde = { version = "1.0", features = ["derive"] } tokio = { version = "1", features = ["full"] } thiserror = "2" diff --git a/zkvm_execution_layer/src/dummy_proof_gen.rs b/zkvm_execution_layer/src/dummy_proof_gen.rs new file mode 100644 index 00000000000..5e4437c1485 --- /dev/null +++ b/zkvm_execution_layer/src/dummy_proof_gen.rs @@ -0,0 +1,118 @@ +use crate::proof_generation::{ProofGenerationError, ProofGenerationResult, ProofGenerator}; +use async_trait::async_trait; +use std::time::Duration; +use tokio::time::sleep; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofSubnetId, Hash256}; + +/// Dummy proof generator for testing +/// +/// This generator simulates the proof generation process with a configurable delay +/// and creates dummy proofs. +pub struct DummyProofGenerator { + subnet_id: ExecutionProofSubnetId, + generation_delay: Duration, +} + +impl DummyProofGenerator { + /// Create a new dummy generator for the specified subnet + pub fn new(subnet_id: ExecutionProofSubnetId) -> Self { + Self { + subnet_id, + generation_delay: Duration::from_millis(50), // Simulate some work + } + } + + /// Create a new dummy generator with custom generation delay + pub fn with_delay(subnet_id: ExecutionProofSubnetId, delay: Duration) -> Self { + Self { + subnet_id, + generation_delay: delay, + } + } +} + +#[async_trait] +impl ProofGenerator for DummyProofGenerator { + async fn generate( + &self, + payload_hash: &ExecutionBlockHash, + block_root: &Hash256, + ) -> ProofGenerationResult { + // Simulate proof generation work + if !self.generation_delay.is_zero() { + sleep(self.generation_delay).await; + } + + // Create a dummy proof with some deterministic data + let proof_data = vec![ + 0xFF, // Magic byte for dummy proof + self.subnet_id.as_u8(), + // Include some payload hash bytes + payload_hash.0[0], + payload_hash.0[1], + payload_hash.0[2], + payload_hash.0[3], + ]; + + ExecutionProof::new(self.subnet_id, *payload_hash, *block_root, proof_data) + .map_err(ProofGenerationError::ProofGenerationFailed) + } + + fn subnet_id(&self) -> ExecutionProofSubnetId { + self.subnet_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_dummy_generator_success() { + let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let generator = DummyProofGenerator::new(subnet); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let block_root = Hash256::repeat_byte(2); + + let result = generator.generate(&block_hash, &block_root).await; + assert!(result.is_ok()); + + let proof = result.unwrap(); + assert_eq!(proof.subnet_id, subnet); + assert_eq!(proof.block_hash, block_hash); + assert_eq!(proof.block_root, block_root); + assert!(proof.proof_data_size() > 0); + } + + #[tokio::test] + async fn test_dummy_generator_deterministic() { + let subnet = ExecutionProofSubnetId::new(1).unwrap(); + let generator = DummyProofGenerator::new(subnet); + let block_hash = ExecutionBlockHash::repeat_byte(42); + let block_root = Hash256::repeat_byte(99); + + // Generate twice + let proof1 = generator.generate(&block_hash, &block_root).await.unwrap(); + let proof2 = generator.generate(&block_hash, &block_root).await.unwrap(); + + // Should be identical + assert_eq!(proof1.proof_data_slice(), proof2.proof_data_slice()); + } + + #[tokio::test] + async fn test_dummy_generator_custom_delay() { + // TODO(zkproofs): Maybe remove, mainly need it as a temp check + let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let delay = Duration::from_millis(1); + let generator = DummyProofGenerator::with_delay(subnet, delay); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let block_root = Hash256::repeat_byte(2); + + let start = tokio::time::Instant::now(); + let result = generator.generate(&block_hash, &block_root).await; + let elapsed = start.elapsed(); + + assert!(result.is_ok()); + assert!(elapsed >= delay); + } +} diff --git a/zkvm_execution_layer/src/dummy_proof_verifier.rs b/zkvm_execution_layer/src/dummy_proof_verifier.rs new file mode 100644 index 00000000000..d5dae89e4ba --- /dev/null +++ b/zkvm_execution_layer/src/dummy_proof_verifier.rs @@ -0,0 +1,120 @@ +use crate::proof_verification::{ProofVerificationResult, ProofVerifier, VerificationError}; +use std::time::Duration; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofSubnetId}; + +/// Dummy proof verifier for testing +/// +/// This verifier simulates the verification process with a configurable delay +/// and always returns successful verification. +pub struct DummyVerifier { + subnet_id: ExecutionProofSubnetId, + verification_delay: Duration, +} + +impl DummyVerifier { + /// Create a new dummy verifier for the specified subnet + pub fn new(subnet_id: ExecutionProofSubnetId) -> Self { + Self { + subnet_id, + verification_delay: Duration::from_millis(10), + } + } + + /// Create a new dummy verifier with custom verification delay + pub fn with_delay(subnet_id: ExecutionProofSubnetId, delay: Duration) -> Self { + Self { + subnet_id, + verification_delay: delay, + } + } +} + +impl ProofVerifier for DummyVerifier { + fn verify( + &self, + payload_hash: &ExecutionBlockHash, + proof: &ExecutionProof, + ) -> ProofVerificationResult { + // Check that the proof is for the correct subnet + if proof.subnet_id != self.subnet_id { + return Err(VerificationError::UnsupportedSubnet(proof.subnet_id)); + } + + // Check that the proof is for the correct payload + if &proof.block_hash != payload_hash { + return Err(VerificationError::VerificationFailed(format!( + "Proof block hash mismatch: expected {}, got {}", + payload_hash, proof.block_hash + ))); + } + + // Simulate verification work + if !self.verification_delay.is_zero() { + std::thread::sleep(self.verification_delay); + } + + // Dummy verifier always succeeds + Ok(true) + } + + fn subnet_id(&self) -> ExecutionProofSubnetId { + self.subnet_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{FixedBytesExtended, Hash256}; + + fn create_test_proof( + subnet_id: ExecutionProofSubnetId, + block_hash: ExecutionBlockHash, + ) -> ExecutionProof { + ExecutionProof::new(subnet_id, block_hash, Hash256::zero(), vec![1, 2, 3, 4]).unwrap() + } + + #[tokio::test] + async fn test_dummy_verifier_success() { + let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let verifier = DummyVerifier::new(subnet); + let block_hash = ExecutionBlockHash::zero(); + let proof = create_test_proof(subnet, block_hash); + + let result = verifier.verify(&block_hash, &proof); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), true); + } + + #[tokio::test] + async fn test_dummy_verifier_wrong_subnet() { + let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let verifier = DummyVerifier::new(subnet_0); + let block_hash = ExecutionBlockHash::zero(); + let proof = create_test_proof(subnet_1, block_hash); + + let result = verifier.verify(&block_hash, &proof); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + VerificationError::UnsupportedSubnet(_) + )); + } + + #[tokio::test] + async fn test_dummy_verifier_wrong_block_hash() { + let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let verifier = DummyVerifier::new(subnet); + let block_hash_1 = ExecutionBlockHash::repeat_byte(1); + let block_hash_2 = ExecutionBlockHash::repeat_byte(2); + let proof = create_test_proof(subnet, block_hash_1); + + let result = verifier.verify(&block_hash_2, &proof); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + VerificationError::VerificationFailed(_) + )); + } +} diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index e8bc8d5bb8d..194750989e8 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -4,6 +4,12 @@ pub mod proof_cache; pub mod proof_generation; pub mod proof_verification; +pub mod registry_proof_gen; +pub mod registry_proof_verification; + +pub mod dummy_proof_gen; +pub mod dummy_proof_verifier; + #[test] fn add() { assert!(1 + 1 == 2) diff --git a/zkvm_execution_layer/src/registry_proof_gen.rs b/zkvm_execution_layer/src/registry_proof_gen.rs new file mode 100644 index 00000000000..90f768c9207 --- /dev/null +++ b/zkvm_execution_layer/src/registry_proof_gen.rs @@ -0,0 +1,134 @@ +use crate::dummy_proof_gen::DummyProofGenerator; +use crate::proof_generation::DynProofGenerator; +use hashbrown::HashMap; +use std::collections::HashSet; +use std::sync::Arc; +use types::ExecutionProofSubnetId; + +/// Registry mapping subnet IDs to proof generators +/// +/// Each subnet can have a different zkVM/proof system, and this registry +/// maintains the mapping from subnet ID to the appropriate generator implementation. +/// Not all subnets need generators - nodes can verify without generating. +#[derive(Clone)] +pub struct GeneratorRegistry { + generators: HashMap, +} + +impl GeneratorRegistry { + /// Create a new empty generator registry + pub fn new() -> Self { + Self { + generators: HashMap::new(), + } + } + + /// Create a registry with dummy generators for specified subnets + /// This is useful for Phase 1 testing + pub fn new_with_dummy_generators(enabled_subnets: HashSet) -> Self { + let mut generators = HashMap::new(); + + for subnet_id in enabled_subnets { + generators.insert( + subnet_id, + Arc::new(DummyProofGenerator::new(subnet_id)) as DynProofGenerator, + ); + } + + Self { generators } + } + + pub fn register_generator(&mut self, generator: DynProofGenerator) { + let subnet_id = generator.subnet_id(); + self.generators.insert(subnet_id, generator); + } + + pub fn get_generator(&self, subnet_id: ExecutionProofSubnetId) -> Option { + self.generators.get(&subnet_id).cloned() + } + + /// Check if a generator is registered for a subnet + pub fn has_generator(&self, subnet_id: ExecutionProofSubnetId) -> bool { + self.generators.contains_key(&subnet_id) + } + + /// Get the number of registered generators + pub fn len(&self) -> usize { + self.generators.len() + } + + /// Check if the registry is empty + pub fn is_empty(&self) -> bool { + self.generators.is_empty() + } + + pub fn subnet_ids(&self) -> Vec { + self.generators.keys().copied().collect() + } +} + +impl Default for GeneratorRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dummy_generators_registry() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofSubnetId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofSubnetId::new(1).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); + assert!(!registry.is_empty()); + assert_eq!(registry.len(), 2); + + assert!(registry.has_generator(ExecutionProofSubnetId::new(0).unwrap())); + assert!(registry.has_generator(ExecutionProofSubnetId::new(1).unwrap())); + assert!(!registry.has_generator(ExecutionProofSubnetId::new(2).unwrap())); + } + + #[test] + fn test_register_generator() { + let mut registry = GeneratorRegistry::new(); + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let generator = Arc::new(DummyProofGenerator::new(subnet_id)); + + registry.register_generator(generator); + + assert_eq!(registry.len(), 1); + assert!(registry.has_generator(subnet_id)); + } + + #[test] + fn test_get_generator() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofSubnetId::new(3).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); + let subnet_id = ExecutionProofSubnetId::new(3).unwrap(); + + let generator = registry.get_generator(subnet_id); + assert!(generator.is_some()); + assert_eq!(generator.unwrap().subnet_id(), subnet_id); + } + + #[test] + fn test_subnet_ids() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofSubnetId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofSubnetId::new(5).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets.clone()); + let subnet_ids = registry.subnet_ids(); + + assert_eq!(subnet_ids.len(), 2); + for subnet_id in enabled_subnets { + assert!(subnet_ids.contains(&subnet_id)); + } + } +} diff --git a/zkvm_execution_layer/src/registry_proof_verification.rs b/zkvm_execution_layer/src/registry_proof_verification.rs new file mode 100644 index 00000000000..4017e44e8f6 --- /dev/null +++ b/zkvm_execution_layer/src/registry_proof_verification.rs @@ -0,0 +1,138 @@ +use crate::dummy_proof_verifier::DummyVerifier; +use crate::proof_verification::DynProofVerifier; +use hashbrown::HashMap; +use std::sync::Arc; +use types::ExecutionProofSubnetId; + +/// Registry mapping subnet IDs to proof verifiers +/// +/// Each subnet can have a different zkVM/proof system, and this registry +/// maintains the mapping from subnet ID to the appropriate verifier implementation. +#[derive(Clone)] +pub struct VerifierRegistry { + verifiers: HashMap, +} + +impl VerifierRegistry { + /// Create a new empty verifier registry + pub fn new() -> Self { + Self { + verifiers: HashMap::new(), + } + } + + /// Create a registry with dummy verifiers for all subnets + /// This is useful for Phase 1 testing + pub fn new_with_dummy_verifiers() -> Self { + let mut verifiers = HashMap::new(); + + // Register dummy verifiers for all 8 subnets + for id in 0..types::EXECUTION_PROOF_SUBNET_COUNT { + if let Ok(subnet_id) = ExecutionProofSubnetId::new(id) { + verifiers.insert( + subnet_id, + Arc::new(DummyVerifier::new(subnet_id)) as DynProofVerifier, + ); + } + } + + Self { verifiers } + } + + /// Register a verifier for a specific subnet + pub fn register_verifier(&mut self, verifier: DynProofVerifier) { + let subnet_id = verifier.subnet_id(); + self.verifiers.insert(subnet_id, verifier); + } + + /// Get a verifier for a specific subnet + pub fn get_verifier(&self, subnet_id: ExecutionProofSubnetId) -> Option { + self.verifiers.get(&subnet_id).cloned() + } + + /// Check if a verifier is registered for a subnet + pub fn has_verifier(&self, subnet_id: ExecutionProofSubnetId) -> bool { + self.verifiers.contains_key(&subnet_id) + } + + /// Get the number of registered verifiers + pub fn len(&self) -> usize { + self.verifiers.len() + } + + /// Check if the registry is empty + pub fn is_empty(&self) -> bool { + self.verifiers.is_empty() + } + + /// Get all registered subnet IDs + pub fn subnet_ids(&self) -> Vec { + self.verifiers.keys().copied().collect() + } +} + +impl Default for VerifierRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_empty_registry() { + let registry = VerifierRegistry::new(); + assert!(registry.is_empty()); + assert_eq!(registry.len(), 0); + } + + #[test] + fn test_dummy_verifiers_registry() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + assert!(!registry.is_empty()); + assert_eq!(registry.len(), 8); // All 8 subnets + + // Check all subnets are registered + for id in 0..8 { + let subnet_id = ExecutionProofSubnetId::new(id).unwrap(); + assert!(registry.has_verifier(subnet_id)); + assert!(registry.get_verifier(subnet_id).is_some()); + } + } + + #[test] + fn test_register_verifier() { + let mut registry = VerifierRegistry::new(); + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let verifier = Arc::new(DummyVerifier::new(subnet_id)); + + registry.register_verifier(verifier); + + assert_eq!(registry.len(), 1); + assert!(registry.has_verifier(subnet_id)); + } + + #[test] + fn test_get_verifier() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + let subnet_id = ExecutionProofSubnetId::new(3).unwrap(); + + let verifier = registry.get_verifier(subnet_id); + assert!(verifier.is_some()); + assert_eq!(verifier.unwrap().subnet_id(), subnet_id); + } + + #[test] + fn test_subnet_ids() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + let subnet_ids = registry.subnet_ids(); + + assert_eq!(subnet_ids.len(), 8); + for id in 0..8 { + let subnet_id = ExecutionProofSubnetId::new(id).unwrap(); + assert!(subnet_ids.contains(&subnet_id)); + } + } +} From ba14574a38ab36033c84cf41fb2118500eb04171 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 20 Oct 2025 01:37:52 +0100 Subject: [PATCH 07/67] add execution proofs to da_checker --- .../src/data_availability_checker.rs | 14 ++ .../overflow_lru_cache.rs | 127 +++++++++++++++++- 2 files changed, 139 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index c937c32c68e..9c783c8f234 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -339,6 +339,20 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, custody_columns) } + /// Put execution proofs into the availability cache as pending components. + /// + /// Returns `Availability` which has information about whether all components have been + /// received or more are required. + #[instrument(skip_all, level = "trace")] + pub fn put_verified_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_verified_execution_proofs(block_root, execution_proofs) + } + /// Check if we have all the blobs for a block. Returns `Availability` which has information /// about whether all components have been received or more are required. pub fn put_executed_block( diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 42f6dbd8567..3a3f030f0d2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -74,6 +74,7 @@ pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, + pub verified_execution_proofs: Vec, pub block: Option>, pub reconstruction_started: bool, span: Span, @@ -199,6 +200,50 @@ impl PendingComponents { Ok(()) } + /// Returns an immutable reference to the cached execution proofs. + pub fn get_cached_execution_proofs(&self) -> &[types::ExecutionProof] { + &self.verified_execution_proofs + } + + /// Check if we have a proof from a specific subnet + pub fn has_proof_from_subnet(&self, subnet_id: types::ExecutionProofSubnetId) -> bool { + self.verified_execution_proofs + .iter() + .any(|proof| proof.subnet_id == subnet_id) + } + + /// Get the number of unique subnet proofs we have + pub fn execution_proof_subnet_count(&self) -> usize { + self.verified_execution_proofs.len() + } + + /// Merges a single execution proof into the cache. + /// + /// Proofs are only inserted if: + /// 1. We don't already have a proof from this subnet for this block + /// 2. The proof's block_hash matches the cached block_root (if block exists) + pub fn merge_execution_proof(&mut self, proof: types::ExecutionProof) { + // Verify the proof is for the correct block + // ExecutionBlockHash is a wrapper around Hash256, so we need to convert + + // Don't insert duplicate proofs from the same subnet + if self.has_proof_from_subnet(proof.subnet_id) { + return; + } + + self.verified_execution_proofs.push(proof); + } + + /// Merges a given set of execution proofs into the cache. + pub fn merge_execution_proofs>( + &mut self, + execution_proofs: I, + ) { + for proof in execution_proofs { + self.merge_execution_proof(proof); + } + } + /// Inserts a new block and revalidates the existing blobs against it. /// /// Blobs that don't match the new block's commitments are evicted. @@ -213,10 +258,11 @@ impl PendingComponents { /// /// WARNING: This function can potentially take a lot of time if the state needs to be /// reconstructed from disk. Ensure you are not holding any write locks while calling this. - pub fn make_available( + fn make_available( &self, spec: &Arc, num_expected_columns_opt: Option, + min_execution_proofs_opt: Option, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -294,6 +340,15 @@ impl PendingComponents { return Ok(None); }; + // Check execution proof availability for ZK-VM mode + if let Some(min_proofs) = min_execution_proofs_opt { + let num_proofs = self.execution_proof_subnet_count(); + if num_proofs < min_proofs { + // Not enough execution proofs yet + return Ok(None); + } + } + // Block is available, construct `AvailableExecutedBlock` let blobs_available_timestamp = match blob_data { @@ -340,6 +395,7 @@ impl PendingComponents { block_root, verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], + verified_execution_proofs: vec![], block: None, reconstruction_started: false, span, @@ -372,7 +428,9 @@ impl PendingComponents { pub fn status_str(&self, num_expected_columns_opt: Option) -> String { let block_count = if self.block.is_some() { 1 } else { 0 }; - if let Some(num_expected_columns) = num_expected_columns_opt { + let proof_count = self.execution_proof_subnet_count(); + + let base_status = if let Some(num_expected_columns) = num_expected_columns_opt { format!( "block {} data_columns {}/{}", block_count, @@ -391,6 +449,13 @@ impl PendingComponents { self.verified_blobs.iter().flatten().count(), num_expected_blobs ) + }; + + // Append execution proof count if we have any + if proof_count > 0 { + format!("{} proofs {}", base_status, proof_count) + } else { + base_status } } } @@ -405,6 +470,9 @@ pub struct DataAvailabilityCheckerInner { state_cache: StateLRUCache, custody_context: Arc>, spec: Arc, + /// Minimum number of execution proofs required from different subnets. + /// If None, execution proof checking is disabled (standard execution engine). + min_execution_proofs_required: Option, } // This enum is only used internally within the crate in the reconstruction function to improve @@ -428,9 +496,16 @@ impl DataAvailabilityCheckerInner { state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, + // TODO(zkproofs): Add method to set this from ZKVM config + min_execution_proofs_required: None, }) } + /// Set the minimum number of execution proofs required for ZK-VM mode + pub fn set_min_execution_proofs_required(&mut self, min_proofs: Option) { + self.min_execution_proofs_required = min_proofs; + } + /// Returns true if the block root is known, without altering the LRU ordering pub fn get_cached_block(&self, block_root: &Hash256) -> Option> { self.critical @@ -575,6 +650,53 @@ impl DataAvailabilityCheckerInner { ) } + /// Puts execution proofs into the availability cache as pending components. + pub fn put_verified_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + let mut execution_proofs = execution_proofs.into_iter().peekable(); + + if execution_proofs.peek().is_none() { + // No proofs to process + return Ok(Availability::MissingComponents(block_root)); + } + + // Try to get epoch from existing pending components (if block already arrived) + // Otherwise use Epoch::new(0) as placeholder (will be corrected when block arrives) + // Also the component cannot be marked as available, if the block is missing + let epoch = self + .critical + .read() + .peek(&block_root) + .and_then(|pending| pending.epoch()) + .unwrap_or_else(|| types::Epoch::new(0)); + + let pending_components = + self.update_or_insert_pending_components(block_root, epoch, |pending_components| { + pending_components.merge_execution_proofs(execution_proofs); + Ok(()) + })?; + + let num_expected_columns_opt = self.get_num_expected_columns(epoch); + + pending_components.span.in_scope(|| { + debug!( + component = "execution_proofs", + status = pending_components.status_str(num_expected_columns_opt), + num_proofs = pending_components.execution_proof_subnet_count(), + "Component added to data availability checker" + ); + }); + + self.check_availability_and_cache_components( + block_root, + pending_components, + num_expected_columns_opt, + ) + } + fn check_availability_and_cache_components( &self, block_root: Hash256, @@ -584,6 +706,7 @@ impl DataAvailabilityCheckerInner { if let Some(available_block) = pending_components.make_available( &self.spec, num_expected_columns_opt, + self.min_execution_proofs_required, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { // Explicitly drop read lock before acquiring write lock From ec773ceee0014a1190b6efd4853f8e60d6664a4b Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 21 Oct 2025 11:34:58 +0100 Subject: [PATCH 08/67] commit execution layer stub --- Cargo.lock | 1 + zkvm_execution_layer/Cargo.toml | 1 + zkvm_execution_layer/src/engine_api.rs | 68 ++++++++++++++++++++++++++ zkvm_execution_layer/src/lib.rs | 6 +++ 4 files changed, 76 insertions(+) create mode 100644 zkvm_execution_layer/src/engine_api.rs diff --git a/Cargo.lock b/Cargo.lock index 97d2c9c8e2c..9e1806ec80d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11410,6 +11410,7 @@ name = "zkvm_execution_layer" version = "0.1.0" dependencies = [ "async-trait", + "execution_layer", "hashbrown 0.15.2", "lru", "serde", diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index 9d7882369e4..1603cd31e88 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -11,6 +11,7 @@ serde = { version = "1.0", features = ["derive"] } tokio = { version = "1", features = ["full"] } thiserror = "2" types = { path = "../consensus/types" } +execution_layer = { path = "../beacon_node/execution_layer" } [dev-dependencies] diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs new file mode 100644 index 00000000000..bb456e93a9f --- /dev/null +++ b/zkvm_execution_layer/src/engine_api.rs @@ -0,0 +1,68 @@ +/// Keeping this here for now to see if we can encapsulate any behaviour into this. module. + +use crate::proof_verification::DynProofVerifier; +// use crate::proof_cache::ProofCache; +use execution_layer::{PayloadStatus, Error as ExecutionLayerError, BlockProposalContentsType}; +use std::sync::Arc; +use tokio::sync::RwLock; +use types::{EthSpec, ExecutionBlockHash, ExecPayload}; + +type PayloadId = [u8; 8]; + +pub struct ZkVmEngineApi { + /// Cache for storing and retrieving ZK proofs + // TODO(zkproofs): Using the cache in the da_checker + // proof_cache: Arc>, + + /// Verifier for ZK proofs + proof_verifier: DynProofVerifier, + + /// Track the latest validated execution block hash + /// TODO(zkproofs): I think we can get this from the beacon chain and it + /// may not need to be here + // latest_valid_hash: Arc>>, + + _phantom: std::marker::PhantomData, +} + +impl ZkVmEngineApi { + pub fn new( + // proof_cache: Arc>, + proof_verifier: DynProofVerifier, + ) -> Self { + Self { + // proof_cache, + proof_verifier, + // latest_valid_hash: Arc::new(RwLock::new(None)), + _phantom: std::marker::PhantomData, + } + } + + /// Verify a new execution payload using ZK proof + pub async fn new_payload<'a>( + &self, + _execution_payload: &'a impl ExecPayload, + ) -> Result { + // TODO(zkproofs): There are some engine_api checks that should be made, but these should be + // done when we have the proof + Ok(PayloadStatus::Syncing) + } + + /// Update fork choice state + pub async fn forkchoice_updated( + &self, + _head_block_hash: ExecutionBlockHash, + ) -> Result { + // For now, just return Valid status + Ok(PayloadStatus::Valid) + } + + /// Get a payload for block production + pub async fn get_payload( + &self, + _payload_id: PayloadId, + ) -> Result, ExecutionLayerError> { + // TODO(zkproofs): use mev-boost + Err(ExecutionLayerError::CannotProduceHeader) + } +} diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 194750989e8..873bbfcbbc4 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -10,6 +10,12 @@ pub mod registry_proof_verification; pub mod dummy_proof_gen; pub mod dummy_proof_verifier; +/// Engine API implementation for ZK-VM execution +pub mod engine_api; + +/// Re-export the main ZK-VM engine API +pub use engine_api::ZkVmEngineApi; + #[test] fn add() { assert!(1 + 1 == 2) From 971c74357737d74e13bb6a4072b3c3840d74dd1d Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 21 Oct 2025 11:48:58 +0100 Subject: [PATCH 09/67] add execution proof boundaries similar to columns --- beacon_node/beacon_chain/src/beacon_chain.rs | 28 +++++++++++++++ .../src/data_availability_checker.rs | 35 +++++++++++++++++++ .../overflow_lru_cache.rs | 5 +++ 3 files changed, 68 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 85ccb96f693..deb11421e92 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7139,6 +7139,34 @@ impl BeaconChain { && self.spec.is_peer_das_enabled_for_epoch(block_epoch) } + /// Returns true if epoch is within the execution proof retention boundary + pub fn execution_proof_check_required_for_epoch(&self, epoch: Epoch) -> bool { + self.data_availability_checker + .execution_proof_check_required_for_epoch(epoch) + } + + /// Returns true if we should fetch execution proofs for this block + pub fn should_fetch_execution_proofs(&self, block_epoch: Epoch) -> bool { + // Check if ZK-VM mode is enabled + if self.min_execution_proofs_required().is_none() { + return false; + } + + // Only fetch proofs within retention window + self.execution_proof_check_required_for_epoch(block_epoch) + } + + /// Returns the minimum number of execution proofs required + pub fn min_execution_proofs_required(&self) -> Option { + self.data_availability_checker + .min_execution_proofs_required() + } + + /// Returns the execution proof retention boundary epoch + pub fn execution_proof_boundary(&self) -> Option { + self.data_availability_checker.execution_proof_boundary() + } + /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 9c783c8f234..80685342f99 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -56,6 +56,10 @@ pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(32); pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); +/// Minimum number of epochs to retain execution proofs for ZK-VM mode. +/// TODO(zkproofs): Consider making this a spec parameter like MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS +pub const MIN_EPOCHS_FOR_PROOF_RETENTION: u64 = 2; + /// Cache to hold fully valid data that can't be imported to fork-choice yet. After Dencun hard-fork /// blocks have a sidecar of data that is received separately from the network. We call the concept /// of a block "becoming available" when all of its import dependencies are inserted into this @@ -581,6 +585,37 @@ impl DataAvailabilityChecker { }) } + /// The epoch at which we expect execution proofs in block processing. + /// + /// Note: For optional proofs, we specify that proofs only need to be available for 2 epochs + /// ie not past finalization + /// + /// Returns `None` if ZK-VM mode is disabled. + pub fn execution_proof_boundary(&self) -> Option { + // Only enable if min_execution_proofs_required is set + if self.availability_cache.min_execution_proofs_required().is_none() { + return None; + } + + // TODO(zkproofs): Add zkvm_fork_epoch to ChainSpec once ZK-VM fork is defined + // This would be when proofs are mandatory. + // For now, calculate boundary based on current epoch + let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); + let retention_boundary = current_epoch.saturating_sub(MIN_EPOCHS_FOR_PROOF_RETENTION); + Some(retention_boundary) + } + + /// Returns true if the given epoch lies within the proof retention boundary. + pub fn execution_proof_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { + self.execution_proof_boundary() + .is_some_and(|boundary_epoch| block_epoch >= boundary_epoch) + } + + /// Returns the minimum number of execution proofs required for ZK-VM mode. + pub fn min_execution_proofs_required(&self) -> Option { + self.availability_cache.min_execution_proofs_required() + } + /// Collects metrics from the data availability checker. pub fn metrics(&self) -> DataAvailabilityCheckerMetrics { DataAvailabilityCheckerMetrics { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 3a3f030f0d2..9332e3f4344 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -506,6 +506,11 @@ impl DataAvailabilityCheckerInner { self.min_execution_proofs_required = min_proofs; } + /// Returns the minimum number of execution proofs required (if ZK-VM mode enabled) + pub fn min_execution_proofs_required(&self) -> Option { + self.min_execution_proofs_required + } + /// Returns true if the block root is known, without altering the LRU ordering pub fn get_cached_block(&self, block_root: &Hash256) -> Option> { self.critical From 86d0ffc9fc6877a4e0926b5a434133bcb8fd4fb1 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 21 Oct 2025 12:11:24 +0100 Subject: [PATCH 10/67] set min_execution_proof via the BeaconChain --- beacon_node/beacon_chain/src/builder.rs | 16 ++++++++++++++++ .../src/data_availability_checker.rs | 3 +++ .../overflow_lru_cache.rs | 10 +++------- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5564c7916fa..a8672983014 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -102,6 +102,12 @@ pub struct BeaconChainBuilder { validator_monitor_config: Option, import_all_data_columns: bool, rng: Option>, + /// Minimum number of execution proofs required for ZK-VM mode. + /// + /// TODO(zkproofs): When min_proofs is Some(_), the traditional ExecutionLayer should + /// be replaced with ZkVmEngineApi from zkvm_execution_layer. This would allow the + /// --execution-endpoint CLI flag to be optional when running in ZK-VM mode. + min_execution_proofs_required: Option, } impl @@ -141,6 +147,7 @@ where validator_monitor_config: None, import_all_data_columns: false, rng: None, + min_execution_proofs_required: None, } } @@ -646,6 +653,14 @@ where self } + /// Sets the minimum number of execution proofs required for ZK-VM mode. + /// If set to Some(n), the beacon chain will require `n` proofs from different subnets + /// before marking an execution payload as valid. + pub fn min_execution_proofs_required(mut self, min_proofs: Option) -> Self { + self.min_execution_proofs_required = min_proofs; + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -1019,6 +1034,7 @@ where store, custody_context, self.spec, + self.min_execution_proofs_required, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 80685342f99..b423d54cfa0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -130,12 +130,14 @@ impl DataAvailabilityChecker { store: BeaconStore, custody_context: Arc>, spec: Arc, + min_execution_proofs_required: Option, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, custody_context.clone(), spec.clone(), + min_execution_proofs_required, )?; Ok(Self { complete_blob_backfill, @@ -1259,6 +1261,7 @@ mod test { store, custody_context, spec, + None, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 9332e3f4344..71e86fcafe4 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -490,22 +490,17 @@ impl DataAvailabilityCheckerInner { beacon_store: BeaconStore, custody_context: Arc>, spec: Arc, + min_execution_proofs_required: Option, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, - // TODO(zkproofs): Add method to set this from ZKVM config - min_execution_proofs_required: None, + min_execution_proofs_required, }) } - /// Set the minimum number of execution proofs required for ZK-VM mode - pub fn set_min_execution_proofs_required(&mut self, min_proofs: Option) { - self.min_execution_proofs_required = min_proofs; - } - /// Returns the minimum number of execution proofs required (if ZK-VM mode enabled) pub fn min_execution_proofs_required(&self) -> Option { self.min_execution_proofs_required @@ -1156,6 +1151,7 @@ mod test { test_store, custody_context, spec.clone(), + None, ) .expect("should create cache"), ); From 26e13a1b307a4886b0685e9f2189ec886dff1c15 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 21 Oct 2025 12:43:42 +0100 Subject: [PATCH 11/67] set the zkvm config values from the cli --- Cargo.lock | 3 ++ beacon_node/Cargo.toml | 2 + beacon_node/beacon_chain/Cargo.toml | 2 + beacon_node/beacon_chain/src/builder.rs | 28 +++++++----- beacon_node/client/Cargo.toml | 2 + beacon_node/client/src/builder.rs | 1 + beacon_node/client/src/config.rs | 2 + beacon_node/src/cli.rs | 32 +++++++++++++ beacon_node/src/config.rs | 61 ++++++++++++++++++++++++- zkvm_execution_layer/src/lib.rs | 3 +- 10 files changed, 123 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e1806ec80d..e1158daa351 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -913,6 +913,7 @@ dependencies = [ "tree_hash", "tree_hash_derive", "types", + "zkvm_execution_layer", "zstd 0.13.3", ] @@ -946,6 +947,7 @@ dependencies = [ "task_executor", "tracing", "types", + "zkvm_execution_layer", ] [[package]] @@ -1571,6 +1573,7 @@ dependencies = [ "tracing", "tracing-subscriber", "types", + "zkvm_execution_layer", ] [[package]] diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 8e2c598fd47..45feaa5b5ac 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -27,6 +27,8 @@ dirs = { workspace = true } environment = { workspace = true } eth2_config = { workspace = true } execution_layer = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../zkvm_execution_layer" } genesis = { workspace = true } hex = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index dca351cbac6..bbc88516a5a 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -26,6 +26,8 @@ ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } fork_choice = { workspace = true } futures = { workspace = true } genesis = { workspace = true } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index a8672983014..7523b3a4bff 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -102,12 +102,12 @@ pub struct BeaconChainBuilder { validator_monitor_config: Option, import_all_data_columns: bool, rng: Option>, - /// Minimum number of execution proofs required for ZK-VM mode. - /// - /// TODO(zkproofs): When min_proofs is Some(_), the traditional ExecutionLayer should + /// ZK-VM execution layer configuration. + /// + /// TODO(zkproofs): When this is Some(_), the traditional ExecutionLayer should /// be replaced with ZkVmEngineApi from zkvm_execution_layer. This would allow the /// --execution-endpoint CLI flag to be optional when running in ZK-VM mode. - min_execution_proofs_required: Option, + zkvm_execution_layer_config: Option, } impl @@ -147,7 +147,7 @@ where validator_monitor_config: None, import_all_data_columns: false, rng: None, - min_execution_proofs_required: None, + zkvm_execution_layer_config: None, } } @@ -653,11 +653,13 @@ where self } - /// Sets the minimum number of execution proofs required for ZK-VM mode. - /// If set to Some(n), the beacon chain will require `n` proofs from different subnets - /// before marking an execution payload as valid. - pub fn min_execution_proofs_required(mut self, min_proofs: Option) -> Self { - self.min_execution_proofs_required = min_proofs; + /// Sets the ZK-VM execution layer configuration. + /// When set, enables ZK-VM execution proof verification mode. + pub fn zkvm_execution_layer_config( + mut self, + config: Option, + ) -> Self { + self.zkvm_execution_layer_config = config; self } @@ -1034,7 +1036,11 @@ where store, custody_context, self.spec, - self.min_execution_proofs_required, + // Note(zkproofs): We don't pass the entire config to the da_checker + // because currently only the `min_proofs_required` setting is needed. + self.zkvm_execution_layer_config + .as_ref() + .map(|cfg| cfg.min_proofs_required), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3c4b2572c9a..6b8b79c8efb 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -14,6 +14,8 @@ eth2 = { workspace = true } eth2_config = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } futures = { workspace = true } genesis = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 02c042bf282..de185547a01 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -203,6 +203,7 @@ where .event_handler(event_handler) .execution_layer(execution_layer) .import_all_data_columns(config.network.subscribe_all_data_column_subnets) + .zkvm_execution_layer_config(config.zkvm_execution_layer.clone()) .validator_monitor_config(config.validator_monitor.clone()) .rng(Box::new( StdRng::try_from_rng(&mut OsRng) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index aeaa196df86..c62e3afb2e2 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -69,6 +69,7 @@ pub struct Config { pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, pub execution_layer: Option, + pub zkvm_execution_layer: Option, pub trusted_setup: Vec, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, @@ -94,6 +95,7 @@ impl Default for Config { network: NetworkConfig::default(), chain: <_>::default(), execution_layer: None, + zkvm_execution_layer: None, trusted_setup: get_trusted_setup(), beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2e3b3fde4b0..0e641aee4b7 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -921,6 +921,38 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + /* ZK-VM Execution Layer settings */ + .arg( + Arg::new("zkvm-min-proofs") + .long("zkvm-min-proofs") + .value_name("NUM") + .help("Minimum number of execution proofs required from different subnets \ + before marking an execution payload as valid in ZK-VM mode. \ + When set, enables ZK-VM execution proof verification.") + .value_parser(clap::value_parser!(usize)) + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("zkvm-subscribed-subnets") + .long("zkvm-subscribed-subnets") + .value_name("SUBNET_IDS") + .help("Comma-separated list of execution proof subnet IDs to subscribe to \ + (e.g., '0,1,2'). Required when --zkvm-min-proofs is set.") + .requires("zkvm-min-proofs") + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("zkvm-generation-subnets") + .long("zkvm-generation-subnets") + .value_name("SUBNET_IDS") + .help("Comma-separated list of execution proof subnet IDs to generate proofs for \ + (e.g., '0,1'). Must be a subset of --zkvm-subscribed-subnets.") + .requires("zkvm-subscribed-subnets") + .action(ArgAction::Set) + .display_order(0) + ) /* Deneb settings */ .arg( Arg::new("trusted-setup-file-override") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c2599ec0cd9..f99072697ad 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -28,7 +28,8 @@ use std::str::FromStr; use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; -use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; +use types::{Checkpoint, Epoch, EthSpec, ExecutionProofSubnetId, Hash256, PublicKeyBytes}; +use zkvm_execution_layer::ZKVMExecutionLayerConfig; const PURGE_DB_CONFIRMATION: &str = "confirm"; @@ -323,6 +324,64 @@ pub fn get_config( // Store the EL config in the client config. client_config.execution_layer = Some(el_config); + // Parse ZK-VM execution layer config if provided + if let Some(min_proofs) = clap_utils::parse_optional::(cli_args, "zkvm-min-proofs")? { + // Parse subscribed subnets (required when min-proofs is set) + let subscribed_subnets_str: String = + clap_utils::parse_required(cli_args, "zkvm-subscribed-subnets")?; + + let subscribed_subnets = subscribed_subnets_str + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>() + .map_err(|e| format!("Invalid subnet ID in --zkvm-subscribed-subnets: {}", e))? + .into_iter() + .map(|id| ExecutionProofSubnetId::new(id)) + .collect::, _>>() + .map_err(|e| format!("Invalid subnet ID: {}", e))?; + + // Parse proof generation subnets (optional) + // + // TODO(zkproofs): Since min-proofs required means no EL is required, and we can only set + // proof-gen here, then it means that even a proof generating validator will not directly have a + // EL attached, so they need to call out to a different node for making EL proofs. This sounds safer. + let generation_subnets = if let Some(gen_subnets_str) = + clap_utils::parse_optional::(cli_args, "zkvm-generation-subnets")? + { + gen_subnets_str + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>() + .map_err(|e| format!("Invalid subnet ID in --zkvm-generation-subnets: {}", e))? + .into_iter() + .map(|id| ExecutionProofSubnetId::new(id)) + .collect::, _>>() + .map_err(|e| format!("Invalid subnet ID: {}", e))? + } else { + HashSet::new() + }; + + // Build and validate the config + let zkvm_config = ZKVMExecutionLayerConfig::builder() + .subscribed_subnets(subscribed_subnets) + .min_proofs_required(min_proofs) + .generation_subnets(generation_subnets) + .build() + .map_err(|e| format!("Invalid ZK-VM configuration: {}", e))?; + + client_config.zkvm_execution_layer = Some(zkvm_config); + + info!( + "ZK-VM mode enabled with min_proofs_required={}, subscribed_subnets={:?}", + min_proofs, + client_config + .zkvm_execution_layer + .as_ref() + .unwrap() + .subscribed_subnets + ); + } + // Override default trusted setup file if required if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") { diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 873bbfcbbc4..df6510c96ad 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -13,8 +13,9 @@ pub mod dummy_proof_verifier; /// Engine API implementation for ZK-VM execution pub mod engine_api; -/// Re-export the main ZK-VM engine API +/// Re-export the main ZK-VM engine API and config pub use engine_api::ZkVmEngineApi; +pub use config::ZKVMExecutionLayerConfig; #[test] fn add() { From c5afb78e3ace5031dbfdcca8982c63678138505b Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 13:43:44 +0000 Subject: [PATCH 12/67] Use ExecutionProofId since we no longer have multiple subnets --- ...oof_subnet_id.rs => execution_proof_id.rs} | 61 +++++++++---------- 1 file changed, 28 insertions(+), 33 deletions(-) rename consensus/types/src/{execution_proof_subnet_id.rs => execution_proof_id.rs} (57%) diff --git a/consensus/types/src/execution_proof_subnet_id.rs b/consensus/types/src/execution_proof_id.rs similarity index 57% rename from consensus/types/src/execution_proof_subnet_id.rs rename to consensus/types/src/execution_proof_id.rs index ad1e612ed18..4122b54b055 100644 --- a/consensus/types/src/execution_proof_subnet_id.rs +++ b/consensus/types/src/execution_proof_id.rs @@ -3,19 +3,17 @@ use ssz::{Decode, DecodeError, Encode}; use std::fmt::{self, Display}; use tree_hash::TreeHash; -/// Number of execution proof subnets -/// Each subnet represents a different zkVM+EL combination +/// Number of execution proofs +/// Each proof represents a different zkVM+EL combination /// /// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future -pub const EXECUTION_PROOF_SUBNET_COUNT: u8 = 8; +pub const EXECUTION_PROOF_TYPE_COUNT: u8 = 8; -/// ExecutionProofSubnetId identifies which zkVM/proof system subnet a proof belongs to. -/// -/// Note: There is a 1-1 mapping between subnet ID and a unique proof. +/// ExecutionProofId identifies which zkVM/proof system a proof belongs to. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] -pub struct ExecutionProofSubnetId(u8); +pub struct ExecutionProofId(u8); -impl Encode for ExecutionProofSubnetId { +impl Encode for ExecutionProofId { fn is_ssz_fixed_len() -> bool { ::is_ssz_fixed_len() } @@ -37,7 +35,7 @@ impl Encode for ExecutionProofSubnetId { } } -impl Decode for ExecutionProofSubnetId { +impl Decode for ExecutionProofId { fn is_ssz_fixed_len() -> bool { ::is_ssz_fixed_len() } @@ -52,7 +50,7 @@ impl Decode for ExecutionProofSubnetId { } } -impl TreeHash for ExecutionProofSubnetId { +impl TreeHash for ExecutionProofId { fn tree_hash_type() -> tree_hash::TreeHashType { ::tree_hash_type() } @@ -70,18 +68,15 @@ impl TreeHash for ExecutionProofSubnetId { } } -impl ExecutionProofSubnetId { - /// Creates a new ExecutionProofSubnetId if the value is valid +impl ExecutionProofId { + /// Creates a new ExecutionProofId if the value is valid pub fn new(id: u8) -> Result { - // TODO(zkproofs): Do we need this check or can we - // get the subnet ID from the subnet we received the proof from - // making id always < the maximum amount of subnets. - if id < EXECUTION_PROOF_SUBNET_COUNT { + if id < EXECUTION_PROOF_TYPE_COUNT { Ok(Self(id)) } else { Err(format!( - "Invalid ExecutionProofSubnetId: {}, must be < {}", - id, EXECUTION_PROOF_SUBNET_COUNT + "Invalid ExecutionProofId: {}, must be < {}", + id, EXECUTION_PROOF_TYPE_COUNT )) } } @@ -98,23 +93,23 @@ impl ExecutionProofSubnetId { /// Returns all valid subnet IDs pub fn all() -> Vec { - (0..EXECUTION_PROOF_SUBNET_COUNT).map(Self).collect() + (0..EXECUTION_PROOF_TYPE_COUNT).map(Self).collect() } } -impl Display for ExecutionProofSubnetId { +impl Display for ExecutionProofId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } -impl From for u8 { - fn from(subnet_id: ExecutionProofSubnetId) -> u8 { +impl From for u8 { + fn from(subnet_id: ExecutionProofId) -> u8 { subnet_id.0 } } -impl TryFrom for ExecutionProofSubnetId { +impl TryFrom for ExecutionProofId { type Error = String; fn try_from(value: u8) -> Result { @@ -127,23 +122,23 @@ mod tests { use super::*; #[test] - fn test_valid_subnet_ids() { - for id in 0..EXECUTION_PROOF_SUBNET_COUNT { - assert!(ExecutionProofSubnetId::new(id).is_ok()); + fn test_valid_proof_ids() { + for id in 0..EXECUTION_PROOF_TYPE_COUNT { + assert!(ExecutionProofId::new(id).is_ok()); } } #[test] - fn test_invalid_subnet_ids() { - assert!(ExecutionProofSubnetId::new(EXECUTION_PROOF_SUBNET_COUNT).is_err()); + fn test_invalid_proof_ids() { + assert!(ExecutionProofId::new(EXECUTION_PROOF_TYPE_COUNT).is_err()); } #[test] - fn test_all_subnet_ids() { - let all = ExecutionProofSubnetId::all(); - assert_eq!(all.len(), EXECUTION_PROOF_SUBNET_COUNT as usize); - for (idx, subnet_id) in all.iter().enumerate() { - assert_eq!(subnet_id.as_usize(), idx); + fn test_all_proof_ids() { + let all = ExecutionProofId::all(); + assert_eq!(all.len(), EXECUTION_PROOF_TYPE_COUNT as usize); + for (idx, proof_id) in all.iter().enumerate() { + assert_eq!(proof_id.as_usize(), idx); } } } \ No newline at end of file From 984a1e0dfcaa6c4b0eae3ca79fcfa6fd2bb6bdfd Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 13:46:06 +0000 Subject: [PATCH 13/67] add slot to ExecutionProof and refactor to remove ExecutionProofSubnetId --- consensus/types/src/execution_proof.rs | 66 ++++++++++++++++++-------- 1 file changed, 46 insertions(+), 20 deletions(-) diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs index ce12730a515..e4a885bd6a3 100644 --- a/consensus/types/src/execution_proof.rs +++ b/consensus/types/src/execution_proof.rs @@ -1,36 +1,51 @@ -use crate::{ExecutionBlockHash, Hash256, VariableList}; +use crate::{ExecutionBlockHash, Hash256, Slot, VariableList}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::typenum; use std::fmt::{self, Debug}; use tree_hash_derive::TreeHash; -use super::ExecutionProofSubnetId; +use super::ExecutionProofId; /// Maximum size of proof data in bytes -/// +/// /// Note: Most proofs will fit within 300KB. Some zkVMs have 1MB proofs (currently) /// and so this number was set to accommodate for the most zkVMs. pub const MAX_PROOF_DATA_BYTES: usize = 1_048_576; +/// Minimum number of execution proofs required from different proof types +/// before marking an execution payload as available in ZK-VM mode. +/// +/// This provides client diversity - nodes wait for proofs from K different +/// zkVM+EL combinations before considering an execution payload available. +pub const DEFAULT_MIN_PROOFS_REQUIRED: usize = 2; + +/// Maximum number of execution proofs that can be requested or stored. +/// This corresponds to the maximum number of proof types (zkVM+EL combinations) +/// that can be supported, which is currently 8 (ExecutionProofId is 0-7). +pub const MAX_PROOFS: usize = 8; + type ProofData = VariableList; /// ExecutionProof represents a cryptographic `proof of execution` that -/// an execution payload is valid. -/// +/// an execution payload is valid. +/// /// In short, it is proof that if we were to run a particular execution layer client /// with the given execution payload, they would return the output values that are attached /// to the proof. /// -/// Each proof is associated with a specific subnet_id, which identifies the +/// Each proof is associated with a specific proof_id, which identifies the /// zkVM and EL combination used to generate it. Multiple proofs from different -/// subnets can exist for the same execution payload, providing both client and EL diversity. +/// proof IDs can exist for the same execution payload, providing both zkVM and EL diversity. #[derive(Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Eq)] pub struct ExecutionProof { - /// Which subnet/zkVM this proof belongs to - /// TODO(zkproofs): The node should provide this in themselves since they - /// know what subnet the proof came from. - pub subnet_id: ExecutionProofSubnetId, + /// Which proof type (zkVM+EL combination) this proof belongs to + /// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc. + /// TODO(zkproofs): change this from subnet_id from proof_id + pub subnet_id: ExecutionProofId, + + /// The slot of the beacon block this proof validates + pub slot: Slot, /// The block hash of the execution payload this proof validates pub block_hash: ExecutionBlockHash, @@ -45,7 +60,8 @@ pub struct ExecutionProof { impl ExecutionProof { pub fn new( - subnet_id: ExecutionProofSubnetId, + proof_id: ExecutionProofId, + slot: Slot, block_hash: ExecutionBlockHash, block_root: Hash256, proof_data: Vec, @@ -54,7 +70,8 @@ impl ExecutionProof { .map_err(|e| format!("Failed to create proof data: {:?}", e))?; Ok(Self { - subnet_id, + subnet_id: proof_id, + slot, block_hash, block_root, proof_data, @@ -76,9 +93,14 @@ impl ExecutionProof { &self.block_hash == block_hash } - /// Check if this proof is from a specific subnet - pub fn is_from_subnet(&self, subnet_id: ExecutionProofSubnetId) -> bool { - self.subnet_id == subnet_id + /// Check if this proof is from a specific proof type + pub fn is_from_proof_type(&self, proof_id: ExecutionProofId) -> bool { + self.subnet_id == proof_id + } + + /// Get the proof type ID + pub fn proof_id(&self) -> ExecutionProofId { + self.subnet_id } } @@ -86,6 +108,7 @@ impl Debug for ExecutionProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ExecutionProof") .field("subnet_id", &self.subnet_id) + .field("slot", &self.slot) .field("block_hash", &self.block_hash) .field("block_root", &self.block_root) .field("proof_data_size", &self.proof_data.len()) @@ -96,27 +119,30 @@ impl Debug for ExecutionProof { #[cfg(test)] mod tests { use super::*; + use bls::FixedBytesExtended; #[test] fn test_execution_proof_too_large() { - let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_id = ExecutionProofId::new(0).unwrap(); + let slot = Slot::new(100); let block_hash = ExecutionBlockHash::zero(); let block_root = Hash256::zero(); let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES + 1]; - let result = ExecutionProof::new(subnet_id, block_hash, block_root, proof_data); + let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); assert!(result.is_err()); assert!(result.unwrap_err().contains("Proof data too large")); } #[test] fn test_execution_proof_max_size() { - let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_id = ExecutionProofId::new(0).unwrap(); + let slot = Slot::new(100); let block_hash = ExecutionBlockHash::zero(); let block_root = Hash256::zero(); let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES]; - let result = ExecutionProof::new(subnet_id, block_hash, block_root, proof_data); + let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); assert!(result.is_ok()); } From 6dfa83bc8b3845131c007a6334bc309c07e1836e Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 14:29:33 +0000 Subject: [PATCH 14/67] refactor --- consensus/types/src/execution_proof.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs index e4a885bd6a3..eca33f63685 100644 --- a/consensus/types/src/execution_proof.rs +++ b/consensus/types/src/execution_proof.rs @@ -41,8 +41,7 @@ type ProofData = VariableList; pub struct ExecutionProof { /// Which proof type (zkVM+EL combination) this proof belongs to /// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc. - /// TODO(zkproofs): change this from subnet_id from proof_id - pub subnet_id: ExecutionProofId, + pub proof_id: ExecutionProofId, /// The slot of the beacon block this proof validates pub slot: Slot, @@ -70,7 +69,7 @@ impl ExecutionProof { .map_err(|e| format!("Failed to create proof data: {:?}", e))?; Ok(Self { - subnet_id: proof_id, + proof_id, slot, block_hash, block_root, @@ -95,19 +94,19 @@ impl ExecutionProof { /// Check if this proof is from a specific proof type pub fn is_from_proof_type(&self, proof_id: ExecutionProofId) -> bool { - self.subnet_id == proof_id + self.proof_id == proof_id } /// Get the proof type ID pub fn proof_id(&self) -> ExecutionProofId { - self.subnet_id + self.proof_id } } impl Debug for ExecutionProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ExecutionProof") - .field("subnet_id", &self.subnet_id) + .field("proof_id", &self.proof_id) .field("slot", &self.slot) .field("block_hash", &self.block_hash) .field("block_root", &self.block_root) From 6d6b9947c9bfbd82e735668f80d82b0351f44364 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 14:34:00 +0000 Subject: [PATCH 15/67] modify chain_spec --- consensus/types/src/chain_spec.rs | 91 +++++++++++++++++++++++++++++++ consensus/types/src/lib.rs | 4 +- 2 files changed, 93 insertions(+), 2 deletions(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 50a2f268e00..7e2b2ff625e 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -215,6 +215,17 @@ pub struct ChainSpec { /// The Gloas fork epoch is optional, with `None` representing "Gloas never happens". pub gloas_fork_epoch: Option, + /* + * zkVM execution proof params + */ + /// Whether zkVM mode is enabled via CLI flag --activate-zkvm. + /// When true, the node will subscribe to execution proof gossip, verify proofs, + /// and optionally generate proofs. zkVM activates at the Fulu fork. + /// Unlike other forks, this is not a network-wide activation but a per-node opt-in. + pub zkvm_enabled: bool, + /// Minimum number of execution proofs required from different subnets. + /// Only used when zkvm_enabled is true. + pub zkvm_min_proofs_required: usize, /* * Networking */ @@ -257,6 +268,11 @@ pub struct ChainSpec { pub(crate) blob_schedule: BlobSchedule, min_epochs_for_data_column_sidecars_requests: u64, + /* + * Networking zkvm + */ + pub min_epochs_for_execution_proof_requests: u64, + /* * Networking Gloas */ @@ -473,6 +489,44 @@ impl ChainSpec { .is_some_and(|gloas_fork_epoch| gloas_fork_epoch != self.far_future_epoch) } + /// Returns true if zkVM mode is enabled via CLI flag. + /// Unlike other forks, this is set via CLI and indicates per-node opt-in. + pub fn is_zkvm_enabled(&self) -> bool { + self.zkvm_enabled + } + + /// Returns the epoch at which zkVM activates. + /// Currently uses Fulu fork epoch. + /// Returns None if zkVM is disabled or Fulu is not scheduled. + pub fn zkvm_fork_epoch(&self) -> Option { + if self.zkvm_enabled { + self.fulu_fork_epoch + } else { + None + } + } + + /// Returns true if zkVM mode is enabled for the given epoch. + pub fn is_zkvm_enabled_for_epoch(&self, epoch: Epoch) -> bool { + self.zkvm_fork_epoch() + .is_some_and(|zkvm_fork_epoch| epoch >= zkvm_fork_epoch) + } + + /// Returns true if zkVM mode can be used at the given fork. + pub fn is_zkvm_enabled_for_fork(&self, fork_name: ForkName) -> bool { + self.is_zkvm_enabled() && fork_name.fulu_enabled() + } + + /// Returns the minimum number of execution proofs required. + /// Only meaningful when zkVM is enabled. + pub fn zkvm_min_proofs_required(&self) -> Option { + if self.is_zkvm_enabled() { + Some(self.zkvm_min_proofs_required) + } else { + None + } + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); @@ -1104,6 +1158,12 @@ impl ChainSpec { gloas_fork_version: [0x07, 0x00, 0x00, 0x00], gloas_fork_epoch: None, + /* + * zkVM execution proof params + */ + zkvm_enabled: false, + zkvm_min_proofs_required: default_zkvm_min_proofs_required(), + /* * Network specific */ @@ -1155,6 +1215,11 @@ impl ChainSpec { default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking zkvm specific + */ + min_epochs_for_execution_proof_requests: default_min_epochs_for_execution_proof_requests(), + /* * Application specific */ @@ -1230,6 +1295,10 @@ impl ChainSpec { // Gloas gloas_fork_version: [0x07, 0x00, 0x00, 0x00], gloas_fork_epoch: None, + // zkVM + zkvm_enabled: false, + zkvm_min_proofs_required: 0, + min_epochs_for_execution_proof_requests: 2, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1450,6 +1519,12 @@ impl ChainSpec { gloas_fork_version: [0x07, 0x00, 0x00, 0x64], gloas_fork_epoch: None, + /* + * zkVM execution proof params + */ + zkvm_enabled: false, + zkvm_min_proofs_required: default_zkvm_min_proofs_required(), + /* * Network specific */ @@ -1501,6 +1576,11 @@ impl ChainSpec { default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking zkvm specific + */ + min_epochs_for_execution_proof_requests: default_min_epochs_for_execution_proof_requests(), + /* * Application specific */ @@ -1961,6 +2041,11 @@ const fn default_min_epochs_for_blob_sidecars_requests() -> u64 { 4096 } +const fn default_min_epochs_for_execution_proof_requests() -> u64 { + // TODO(zkproofs): add into specs with rational + 2 +} + const fn default_blob_sidecar_subnet_count() -> u64 { 6 } @@ -1991,6 +2076,12 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } +/// Minimum number of execution proofs required from different subnets +/// before marking an execution payload as available in ZK-VM mode. +pub const fn default_zkvm_min_proofs_required() -> usize { + crate::execution_proof::DEFAULT_MIN_PROOFS_REQUIRED +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 64202f99eb7..5aba30246fa 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -43,7 +43,7 @@ pub mod execution_block_hash; pub mod execution_payload; pub mod execution_payload_header; pub mod execution_proof; -pub mod execution_proof_subnet_id; +pub mod execution_proof_id; pub mod fork; pub mod fork_data; pub mod fork_name; @@ -180,7 +180,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES}; -pub use crate::execution_proof_subnet_id::{EXECUTION_PROOF_SUBNET_COUNT, ExecutionProofSubnetId}; +pub use crate::execution_proof_id::{EXECUTION_PROOF_TYPE_COUNT, ExecutionProofId}; pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; From 94027ce0125073571303578d5312749b6572753d Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 14:46:24 +0000 Subject: [PATCH 16/67] refactor for ExecutionProof --- .../src/registry_proof_verification.rs | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/zkvm_execution_layer/src/registry_proof_verification.rs b/zkvm_execution_layer/src/registry_proof_verification.rs index 4017e44e8f6..fa836ca6582 100644 --- a/zkvm_execution_layer/src/registry_proof_verification.rs +++ b/zkvm_execution_layer/src/registry_proof_verification.rs @@ -2,7 +2,7 @@ use crate::dummy_proof_verifier::DummyVerifier; use crate::proof_verification::DynProofVerifier; use hashbrown::HashMap; use std::sync::Arc; -use types::ExecutionProofSubnetId; +use types::ExecutionProofId; /// Registry mapping subnet IDs to proof verifiers /// @@ -10,7 +10,7 @@ use types::ExecutionProofSubnetId; /// maintains the mapping from subnet ID to the appropriate verifier implementation. #[derive(Clone)] pub struct VerifierRegistry { - verifiers: HashMap, + verifiers: HashMap, } impl VerifierRegistry { @@ -27,11 +27,11 @@ impl VerifierRegistry { let mut verifiers = HashMap::new(); // Register dummy verifiers for all 8 subnets - for id in 0..types::EXECUTION_PROOF_SUBNET_COUNT { - if let Ok(subnet_id) = ExecutionProofSubnetId::new(id) { + for id in 0..types::EXECUTION_PROOF_TYPE_COUNT { + if let Ok(proof_id) = ExecutionProofId::new(id) { verifiers.insert( - subnet_id, - Arc::new(DummyVerifier::new(subnet_id)) as DynProofVerifier, + proof_id, + Arc::new(DummyVerifier::new(proof_id)) as DynProofVerifier, ); } } @@ -45,14 +45,14 @@ impl VerifierRegistry { self.verifiers.insert(subnet_id, verifier); } - /// Get a verifier for a specific subnet - pub fn get_verifier(&self, subnet_id: ExecutionProofSubnetId) -> Option { - self.verifiers.get(&subnet_id).cloned() + /// Get a verifier for a specific proof ID + pub fn get_verifier(&self, proof_id: ExecutionProofId) -> Option { + self.verifiers.get(&proof_id).cloned() } - /// Check if a verifier is registered for a subnet - pub fn has_verifier(&self, subnet_id: ExecutionProofSubnetId) -> bool { - self.verifiers.contains_key(&subnet_id) + /// Check if a verifier is registered for a proof ID + pub fn has_verifier(&self, proof_id: ExecutionProofId) -> bool { + self.verifiers.contains_key(&proof_id) } /// Get the number of registered verifiers @@ -66,7 +66,7 @@ impl VerifierRegistry { } /// Get all registered subnet IDs - pub fn subnet_ids(&self) -> Vec { + pub fn proof_ids(&self) -> Vec { self.verifiers.keys().copied().collect() } } @@ -94,45 +94,45 @@ mod tests { assert!(!registry.is_empty()); assert_eq!(registry.len(), 8); // All 8 subnets - // Check all subnets are registered + // Check all proof IDs are registered for id in 0..8 { - let subnet_id = ExecutionProofSubnetId::new(id).unwrap(); - assert!(registry.has_verifier(subnet_id)); - assert!(registry.get_verifier(subnet_id).is_some()); + let proof_id = ExecutionProofId::new(id).unwrap(); + assert!(registry.has_verifier(proof_id)); + assert!(registry.get_verifier(proof_id).is_some()); } } #[test] fn test_register_verifier() { let mut registry = VerifierRegistry::new(); - let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); - let verifier = Arc::new(DummyVerifier::new(subnet_id)); + let proof_id = ExecutionProofId::new(0).unwrap(); + let verifier = Arc::new(DummyVerifier::new(proof_id)); registry.register_verifier(verifier); assert_eq!(registry.len(), 1); - assert!(registry.has_verifier(subnet_id)); + assert!(registry.has_verifier(proof_id)); } #[test] fn test_get_verifier() { let registry = VerifierRegistry::new_with_dummy_verifiers(); - let subnet_id = ExecutionProofSubnetId::new(3).unwrap(); + let proof_id = ExecutionProofId::new(3).unwrap(); - let verifier = registry.get_verifier(subnet_id); + let verifier = registry.get_verifier(proof_id); assert!(verifier.is_some()); - assert_eq!(verifier.unwrap().subnet_id(), subnet_id); + assert_eq!(verifier.unwrap().subnet_id(), proof_id); } #[test] - fn test_subnet_ids() { + fn test_proof_ids() { let registry = VerifierRegistry::new_with_dummy_verifiers(); - let subnet_ids = registry.subnet_ids(); + let proof_ids = registry.proof_ids(); - assert_eq!(subnet_ids.len(), 8); + assert_eq!(proof_ids.len(), 8); for id in 0..8 { - let subnet_id = ExecutionProofSubnetId::new(id).unwrap(); - assert!(subnet_ids.contains(&subnet_id)); + let proof_id = ExecutionProofId::new(id).unwrap(); + assert!(proof_ids.contains(&proof_id)); } } } From 2a187842080b0ac07f1c383a74ea31b8433e7056 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 19:42:11 +0000 Subject: [PATCH 17/67] add registry_proof_gen --- .../src/registry_proof_gen.rs | 50 +++++++++---------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/zkvm_execution_layer/src/registry_proof_gen.rs b/zkvm_execution_layer/src/registry_proof_gen.rs index 90f768c9207..a94abd1412e 100644 --- a/zkvm_execution_layer/src/registry_proof_gen.rs +++ b/zkvm_execution_layer/src/registry_proof_gen.rs @@ -3,16 +3,15 @@ use crate::proof_generation::DynProofGenerator; use hashbrown::HashMap; use std::collections::HashSet; use std::sync::Arc; -use types::ExecutionProofSubnetId; +use types::ExecutionProofId; -/// Registry mapping subnet IDs to proof generators +/// Registry mapping proof IDs to proof generators /// -/// Each subnet can have a different zkVM/proof system, and this registry -/// maintains the mapping from subnet ID to the appropriate generator implementation. -/// Not all subnets need generators - nodes can verify without generating. +/// Each proof ID represents a different zkVM/proof system, and this registry +/// maintains the mapping from proof ID to the appropriate generator implementation. #[derive(Clone)] pub struct GeneratorRegistry { - generators: HashMap, + generators: HashMap, } impl GeneratorRegistry { @@ -23,9 +22,8 @@ impl GeneratorRegistry { } } - /// Create a registry with dummy generators for specified subnets - /// This is useful for Phase 1 testing - pub fn new_with_dummy_generators(enabled_subnets: HashSet) -> Self { + /// Create a registry with dummy generators for specified proof IDs + pub fn new_with_dummy_generators(enabled_subnets: HashSet) -> Self { let mut generators = HashMap::new(); for subnet_id in enabled_subnets { @@ -43,13 +41,13 @@ impl GeneratorRegistry { self.generators.insert(subnet_id, generator); } - pub fn get_generator(&self, subnet_id: ExecutionProofSubnetId) -> Option { - self.generators.get(&subnet_id).cloned() + pub fn get_generator(&self, proof_id: ExecutionProofId) -> Option { + self.generators.get(&proof_id).cloned() } - /// Check if a generator is registered for a subnet - pub fn has_generator(&self, subnet_id: ExecutionProofSubnetId) -> bool { - self.generators.contains_key(&subnet_id) + /// Check if a generator is registered for a proof ID + pub fn has_generator(&self, proof_id: ExecutionProofId) -> bool { + self.generators.contains_key(&proof_id) } /// Get the number of registered generators @@ -62,7 +60,7 @@ impl GeneratorRegistry { self.generators.is_empty() } - pub fn subnet_ids(&self) -> Vec { + pub fn proof_ids(&self) -> Vec { self.generators.keys().copied().collect() } } @@ -80,22 +78,22 @@ mod tests { #[test] fn test_dummy_generators_registry() { let mut enabled_subnets = HashSet::new(); - enabled_subnets.insert(ExecutionProofSubnetId::new(0).unwrap()); - enabled_subnets.insert(ExecutionProofSubnetId::new(1).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(1).unwrap()); let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); assert!(!registry.is_empty()); assert_eq!(registry.len(), 2); - assert!(registry.has_generator(ExecutionProofSubnetId::new(0).unwrap())); - assert!(registry.has_generator(ExecutionProofSubnetId::new(1).unwrap())); - assert!(!registry.has_generator(ExecutionProofSubnetId::new(2).unwrap())); + assert!(registry.has_generator(ExecutionProofId::new(0).unwrap())); + assert!(registry.has_generator(ExecutionProofId::new(1).unwrap())); + assert!(!registry.has_generator(ExecutionProofId::new(2).unwrap())); } #[test] fn test_register_generator() { let mut registry = GeneratorRegistry::new(); - let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_id = ExecutionProofId::new(0).unwrap(); let generator = Arc::new(DummyProofGenerator::new(subnet_id)); registry.register_generator(generator); @@ -107,10 +105,10 @@ mod tests { #[test] fn test_get_generator() { let mut enabled_subnets = HashSet::new(); - enabled_subnets.insert(ExecutionProofSubnetId::new(3).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(3).unwrap()); let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); - let subnet_id = ExecutionProofSubnetId::new(3).unwrap(); + let subnet_id = ExecutionProofId::new(3).unwrap(); let generator = registry.get_generator(subnet_id); assert!(generator.is_some()); @@ -120,11 +118,11 @@ mod tests { #[test] fn test_subnet_ids() { let mut enabled_subnets = HashSet::new(); - enabled_subnets.insert(ExecutionProofSubnetId::new(0).unwrap()); - enabled_subnets.insert(ExecutionProofSubnetId::new(5).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(5).unwrap()); let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets.clone()); - let subnet_ids = registry.subnet_ids(); + let subnet_ids = registry.proof_ids(); assert_eq!(subnet_ids.len(), 2); for subnet_id in enabled_subnets { From d50c679753dc71f83d97038a1dc523028689830c Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 19:59:40 +0000 Subject: [PATCH 18/67] refactor proof_gen --- zkvm_execution_layer/src/proof_generation.rs | 14 ++++++-------- .../src/registry_proof_verification.rs | 4 ++-- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/zkvm_execution_layer/src/proof_generation.rs b/zkvm_execution_layer/src/proof_generation.rs index 62c3361d90b..25836d4e81e 100644 --- a/zkvm_execution_layer/src/proof_generation.rs +++ b/zkvm_execution_layer/src/proof_generation.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use std::sync::Arc; use thiserror::Error; -use types::{ExecutionProof, ExecutionProofSubnetId}; +use types::{ExecutionProof, ExecutionProofId}; /// Result type for proof generation operations pub type ProofGenerationResult = Result; @@ -28,24 +28,22 @@ pub enum ProofGenerationError { Internal(String), } -/// Trait for proof generation (one implementation per zkVM) +/// Trait for proof generation (one implementation per zkVM+EL combo) /// -/// Each proof system (RISC Zero, SP1, etc.) implements this trait +/// Each proof system (RISC Zero, SP1, etc.) + zkVM combination implements this trait /// to generate proofs for execution payloads from their subnet. #[async_trait] pub trait ProofGenerator: Send + Sync { /// Generate a proof for the given execution payload - /// - /// Note: This is a computationally expensive operation and should be run - /// in a background task. async fn generate( &self, + slot: types::Slot, payload_hash: &types::ExecutionBlockHash, block_root: &types::Hash256, ) -> ProofGenerationResult; - /// Get the subnet ID this generator produces proofs for - fn subnet_id(&self) -> ExecutionProofSubnetId; + /// Get the proof ID this generator produces proofs for + fn subnet_id(&self) -> ExecutionProofId; } /// Type-erased proof generator mainly for convenience diff --git a/zkvm_execution_layer/src/registry_proof_verification.rs b/zkvm_execution_layer/src/registry_proof_verification.rs index fa836ca6582..e2f914e1965 100644 --- a/zkvm_execution_layer/src/registry_proof_verification.rs +++ b/zkvm_execution_layer/src/registry_proof_verification.rs @@ -41,7 +41,7 @@ impl VerifierRegistry { /// Register a verifier for a specific subnet pub fn register_verifier(&mut self, verifier: DynProofVerifier) { - let subnet_id = verifier.subnet_id(); + let subnet_id = verifier.proof_id(); self.verifiers.insert(subnet_id, verifier); } @@ -121,7 +121,7 @@ mod tests { let verifier = registry.get_verifier(proof_id); assert!(verifier.is_some()); - assert_eq!(verifier.unwrap().subnet_id(), proof_id); + assert_eq!(verifier.unwrap().proof_id(), proof_id); } #[test] From adb890d86b5c78567af49666a9e2fc9e1368a5fc Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 20:00:03 +0000 Subject: [PATCH 19/67] refactor proof verification --- .../src/proof_verification.rs | 23 ++++++++----------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/zkvm_execution_layer/src/proof_verification.rs b/zkvm_execution_layer/src/proof_verification.rs index dc768e13c5f..56b484320fd 100644 --- a/zkvm_execution_layer/src/proof_verification.rs +++ b/zkvm_execution_layer/src/proof_verification.rs @@ -1,6 +1,6 @@ use std::sync::Arc; use thiserror::Error; -use types::{ExecutionProof, ExecutionProofSubnetId}; +use types::{ExecutionProof, ExecutionProofId}; /// Result type for proof verification operations pub type ProofVerificationResult = Result; @@ -14,8 +14,8 @@ pub enum VerificationError { #[error("Invalid proof format: {0}")] InvalidProofFormat(String), - #[error("Unsupported subnet: {0}")] - UnsupportedSubnet(ExecutionProofSubnetId), + #[error("Unsupported proof ID: {0}")] + UnsupportedProofID(ExecutionProofId), #[error("Proof size mismatch: expected {expected}, got {actual}")] ProofSizeMismatch { expected: usize, actual: usize }, @@ -26,20 +26,17 @@ pub enum VerificationError { /// Trait for proof verification (one implementation per zkVM+EL combination) pub trait ProofVerifier: Send + Sync { - /// Verify that the proof is valid for the given execution payload + /// Verify that the proof is valid. /// - /// Returns : + /// TODO(zkproofs): we can probably collapse Ok(false) and Err or make Ok(false) an enum variant + /// + /// Returns: /// - Ok(true) if valid, /// - Ok(false) if invalid (but well-formed) /// - Err if the proof is malformed or verification cannot be performed. - /// TODO(zkproofs): Maybe make Ok(false) an enum variant - fn verify( - &self, - payload_hash: &types::ExecutionBlockHash, - proof: &ExecutionProof, - ) -> ProofVerificationResult; - - fn subnet_id(&self) -> ExecutionProofSubnetId; + fn verify(&self, proof: &ExecutionProof) -> ProofVerificationResult; + + fn proof_id(&self) -> ExecutionProofId; } /// Type-erased proof verifier From 958d360dc02f79b880d581c1d690b11bd627396b Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 20:01:35 +0000 Subject: [PATCH 20/67] commit proof_cache changes -- todo:remove this --- zkvm_execution_layer/src/proof_cache.rs | 65 ++++++++++++++----------- 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/zkvm_execution_layer/src/proof_cache.rs b/zkvm_execution_layer/src/proof_cache.rs index 573261548d5..5f204a35140 100644 --- a/zkvm_execution_layer/src/proof_cache.rs +++ b/zkvm_execution_layer/src/proof_cache.rs @@ -2,7 +2,7 @@ use lru::LruCache; use std::num::NonZeroUsize; use std::sync::Arc; use tokio::sync::RwLock; -use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofSubnetId}; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId}; /// Thread-safe LRU cache for execution proofs /// @@ -32,7 +32,7 @@ impl ProofCache { cache .get_or_insert_mut(block_hash, Vec::new) // TODO(zkproofs): can replace this with a HashSet so we don't need this - .retain(|p| p.subnet_id != proof.subnet_id); + .retain(|p| p.proof_id != proof.proof_id); cache.get_mut(&block_hash).unwrap().push(proof); } @@ -49,7 +49,7 @@ impl ProofCache { pub async fn get_from_subnets( &self, block_hash: &ExecutionBlockHash, - subnet_ids: &[ExecutionProofSubnetId], + proof_ids: &[ExecutionProofId], ) -> Vec { let cache = self.cache.read().await; @@ -58,7 +58,7 @@ impl ProofCache { .map(|proofs| { proofs .iter() - .filter(|p| subnet_ids.contains(&p.subnet_id)) + .filter(|p| proof_ids.contains(&p.proof_id)) .cloned() .collect() }) @@ -79,8 +79,8 @@ impl ProofCache { .unwrap_or(false) } - /// Get the number of unique subnets/proofs we have for a particular execution payload - pub async fn subnet_count(&self, block_hash: &ExecutionBlockHash) -> usize { + /// Get the number of unique proofs we have for a particular execution payload + pub async fn proof_count(&self, block_hash: &ExecutionBlockHash) -> usize { let cache = self.cache.read().await; cache @@ -93,13 +93,13 @@ impl ProofCache { pub async fn has_proof_from_subnet( &self, block_hash: &ExecutionBlockHash, - subnet_id: ExecutionProofSubnetId, + proof_id: ExecutionProofId, ) -> bool { let cache = self.cache.read().await; cache .peek(block_hash) - .map(|proofs| proofs.iter().any(|p| p.subnet_id == subnet_id)) + .map(|proofs| proofs.iter().any(|p| p.proof_id == proof_id)) .unwrap_or(false) } @@ -142,17 +142,24 @@ mod tests { use types::Hash256; fn create_test_proof( - subnet_id: ExecutionProofSubnetId, + proof_id: ExecutionProofId, block_hash: ExecutionBlockHash, ) -> ExecutionProof { - use types::FixedBytesExtended; - ExecutionProof::new(subnet_id, block_hash, Hash256::zero(), vec![1, 2, 3]).unwrap() + use types::{FixedBytesExtended, Slot}; + ExecutionProof::new( + proof_id, + Slot::new(100), + block_hash, + Hash256::zero(), + vec![1, 2, 3], + ) + .unwrap() } #[tokio::test] async fn test_cache_insert_and_get() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); let proof = create_test_proof(subnet_0, block_hash); @@ -166,8 +173,8 @@ mod tests { #[tokio::test] async fn test_cache_multiple_subnets() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); let proof_0 = create_test_proof(subnet_0, block_hash); @@ -178,13 +185,13 @@ mod tests { let proofs = cache.get(&block_hash).await.unwrap(); assert_eq!(proofs.len(), 2); - assert_eq!(cache.subnet_count(&block_hash).await, 2); + assert_eq!(cache.proof_count(&block_hash).await, 2); } #[tokio::test] async fn test_cache_replace_same_subnet() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); let mut proof_1 = create_test_proof(subnet_0, block_hash); @@ -203,8 +210,8 @@ mod tests { #[tokio::test] async fn test_has_required_proofs() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); assert!(!cache.has_required_proofs(&block_hash, 2).await); @@ -219,8 +226,8 @@ mod tests { #[tokio::test] async fn test_has_proof_from_subnet() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); assert!(!cache.has_proof_from_subnet(&block_hash, subnet_0).await); @@ -234,9 +241,9 @@ mod tests { #[tokio::test] async fn test_get_from_subnets() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); - let subnet_2 = ExecutionProofSubnetId::new(2).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); + let subnet_2 = ExecutionProofId::new(2).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); cache.insert(create_test_proof(subnet_0, block_hash)).await; @@ -247,15 +254,15 @@ mod tests { .get_from_subnets(&block_hash, &[subnet_0, subnet_2]) .await; assert_eq!(proofs.len(), 2); - assert!(proofs.iter().any(|p| p.subnet_id == subnet_0)); - assert!(proofs.iter().any(|p| p.subnet_id == subnet_2)); - assert!(!proofs.iter().any(|p| p.subnet_id == subnet_1)); + assert!(proofs.iter().any(|p| p.proof_id == subnet_0)); + assert!(proofs.iter().any(|p| p.proof_id == subnet_2)); + assert!(!proofs.iter().any(|p| p.proof_id == subnet_1)); } #[tokio::test] async fn test_cache_remove() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); let block_hash = ExecutionBlockHash::repeat_byte(1); cache.insert(create_test_proof(subnet_0, block_hash)).await; @@ -269,7 +276,7 @@ mod tests { #[tokio::test] async fn test_cache_clear() { let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); let block_hash_1 = ExecutionBlockHash::repeat_byte(1); let block_hash_2 = ExecutionBlockHash::repeat_byte(2); @@ -291,7 +298,7 @@ mod tests { #[tokio::test] async fn test_cache_lru_eviction() { let cache = ProofCache::new(2); - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); let block_hash_1 = ExecutionBlockHash::repeat_byte(1); let block_hash_2 = ExecutionBlockHash::repeat_byte(2); let block_hash_3 = ExecutionBlockHash::repeat_byte(3); From b7c3538b192408fd254e78ae43dea07b1b742450 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 20:02:50 +0000 Subject: [PATCH 21/67] simplify engine_api struct --- zkvm_execution_layer/src/engine_api.rs | 34 ++++++-------------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs index bb456e93a9f..fced776da0f 100644 --- a/zkvm_execution_layer/src/engine_api.rs +++ b/zkvm_execution_layer/src/engine_api.rs @@ -1,39 +1,21 @@ -/// Keeping this here for now to see if we can encapsulate any behaviour into this. module. - -use crate::proof_verification::DynProofVerifier; -// use crate::proof_cache::ProofCache; use execution_layer::{PayloadStatus, Error as ExecutionLayerError, BlockProposalContentsType}; -use std::sync::Arc; -use tokio::sync::RwLock; use types::{EthSpec, ExecutionBlockHash, ExecPayload}; type PayloadId = [u8; 8]; pub struct ZkVmEngineApi { - /// Cache for storing and retrieving ZK proofs - // TODO(zkproofs): Using the cache in the da_checker - // proof_cache: Arc>, - - /// Verifier for ZK proofs - proof_verifier: DynProofVerifier, - - /// Track the latest validated execution block hash - /// TODO(zkproofs): I think we can get this from the beacon chain and it - /// may not need to be here - // latest_valid_hash: Arc>>, - _phantom: std::marker::PhantomData, } +impl Default for ZkVmEngineApi { + fn default() -> Self { + Self::new() + } +} + impl ZkVmEngineApi { - pub fn new( - // proof_cache: Arc>, - proof_verifier: DynProofVerifier, - ) -> Self { + pub fn new() -> Self { Self { - // proof_cache, - proof_verifier, - // latest_valid_hash: Arc::new(RwLock::new(None)), _phantom: std::marker::PhantomData, } } @@ -44,7 +26,7 @@ impl ZkVmEngineApi { _execution_payload: &'a impl ExecPayload, ) -> Result { // TODO(zkproofs): There are some engine_api checks that should be made, but these should be - // done when we have the proof + // done when we have the proof, check the EL newPayload method to see what these are Ok(PayloadStatus::Syncing) } From e1a09052596a0fdb3c5e3e1a776e8b2ef62ed672 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 20:16:30 +0000 Subject: [PATCH 22/67] fixup renaming changes --- zkvm_execution_layer/src/config.rs | 190 ++++++------------ zkvm_execution_layer/src/dummy_proof_gen.rs | 49 ++--- .../src/dummy_proof_verifier.rs | 82 +++----- zkvm_execution_layer/src/lib.rs | 1 + zkvm_execution_layer/src/proof_generation.rs | 2 +- .../src/registry_proof_gen.rs | 6 +- 6 files changed, 121 insertions(+), 209 deletions(-) diff --git a/zkvm_execution_layer/src/config.rs b/zkvm_execution_layer/src/config.rs index dee8cdb5aa9..f7ab2ab6f1e 100644 --- a/zkvm_execution_layer/src/config.rs +++ b/zkvm_execution_layer/src/config.rs @@ -1,56 +1,34 @@ use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use std::time::Duration; -use types::ExecutionProofSubnetId; +use types::{execution_proof::DEFAULT_MIN_PROOFS_REQUIRED, ExecutionProofId}; -const DEFAULT_PROOF_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); -const DEFAULT_GOSSIP_GRACE_PERIOD: Duration = Duration::from_millis(4000); - -/// Configuration for the zkVM Execution Layer #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ZKVMExecutionLayerConfig { - /// Which subnets/proofs that we are subscribed to and therefore need to - /// know how to verify - pub subscribed_subnets: HashSet, - - /// Minimum number of proofs required from _different_ subnets + /// Minimum number of proofs required from _different_ proof types (proof_ids) /// in order for the node to mark an execution payload as VALID. + /// + /// Note: All nodes receive ALL proof types via the single execution_proof gossip topic. pub min_proofs_required: usize, - /// Which subnets to generate proofs for (empty if not generating proofs) - pub generation_subnets: HashSet, + /// Which proof types to generate (empty if not generating proofs) + /// The proof ID identifies the zkVM+EL combination (e.g., 0=SP1+Reth, 1=Risc0+Geth) + pub generation_proof_types: HashSet, /// Proof cache size (number of execution block hashes to cache proofs for) + /// TODO(zkproofs): remove since we use da_checker for proof caches pub proof_cache_size: usize, - - /// Timeout for proof requests via RPC - /// - /// Note: This is needed for the case that we need to request proofs via - /// RPC because we didn't receive them via gossip within `gossip_grace_period` - pub proof_request_timeout: Duration, - - /// Delay before falling back to RPC (gossip grace period) - /// During this time, we wait for `min_proofs_required` proofs to arrive via gossip - /// - /// TODO(zkproofs): This starts counting down from when the user receives the execution payload - pub gossip_grace_period: Duration, } impl Default for ZKVMExecutionLayerConfig { fn default() -> Self { Self { - subscribed_subnets: HashSet::new(), - min_proofs_required: 1, - generation_subnets: HashSet::new(), + min_proofs_required: DEFAULT_MIN_PROOFS_REQUIRED, + generation_proof_types: HashSet::new(), // TODO(zkproofs): This is somewhat arbitrary. The number was computed // by NUMBER_OF_BLOCKS_BEFORE_FINALIZATION * NUM_PROOFS_PER_BLOCK = 64 * 8 // We can change it to be more rigorous/scientific proof_cache_size: 64 * 8, - // TODO(zkproofs): Also arbitrary - proof_request_timeout: DEFAULT_PROOF_REQUEST_TIMEOUT, - // TODO(zkproofs): Also arbitrary - gossip_grace_period: DEFAULT_GOSSIP_GRACE_PERIOD, } } } @@ -65,67 +43,40 @@ impl ZKVMExecutionLayerConfig { return Err("proof_cache_size must be at least 1".to_string()); } - // Ensure we subscribe to enough subnets to meet min_proofs_required - if self.subscribed_subnets.len() < self.min_proofs_required { - return Err(format!( - "subscribed_subnets ({}) must be >= min_proofs_required ({})", - self.subscribed_subnets.len(), - self.min_proofs_required - )); - } - - // Node can only generate proofs for subnets they are subscribed to - for subnet in &self.generation_subnets { - if !self.subscribed_subnets.contains(subnet) { - return Err(format!( - "generation_subnets must be a subset of subscribed_subnets (subnet {} not subscribed)", - subnet - )); - } - } + // Note: We do NOT validate that generation_proof_types.len() >= min_proofs_required + // because proof-generating nodes validate via their execution layer, not via proofs. + // Only lightweight verifier nodes (without EL) need to wait for min_proofs_required. Ok(()) } - /// Create a builder for the config (mostly for convenience, we can remove) - pub fn builder() -> StatelessExecutionLayerConfigBuilder { - StatelessExecutionLayerConfigBuilder::default() + /// Create a builder for the config + /// TODO(zkproofs): I think we can remove this + pub fn builder() -> ZKVMExecutionLayerConfigBuilder { + ZKVMExecutionLayerConfigBuilder::default() } } #[derive(Default)] -pub struct StatelessExecutionLayerConfigBuilder { - subscribed_subnets: HashSet, +pub struct ZKVMExecutionLayerConfigBuilder { min_proofs_required: Option, - generation_subnets: HashSet, + generation_proof_types: HashSet, proof_cache_size: Option, - proof_request_timeout: Option, - gossip_grace_period: Option, } -impl StatelessExecutionLayerConfigBuilder { - pub fn subscribed_subnets(mut self, subnets: HashSet) -> Self { - self.subscribed_subnets = subnets; - self - } - - pub fn add_subscribed_subnet(mut self, subnet: ExecutionProofSubnetId) -> Self { - self.subscribed_subnets.insert(subnet); - self - } - +impl ZKVMExecutionLayerConfigBuilder { pub fn min_proofs_required(mut self, min: usize) -> Self { self.min_proofs_required = Some(min); self } - pub fn generation_subnets(mut self, subnets: HashSet) -> Self { - self.generation_subnets = subnets; + pub fn generation_proof_types(mut self, proof_types: HashSet) -> Self { + self.generation_proof_types = proof_types; self } - pub fn add_generation_subnet(mut self, subnet: ExecutionProofSubnetId) -> Self { - self.generation_subnets.insert(subnet); + pub fn add_generation_proof_type(mut self, proof_type: ExecutionProofId) -> Self { + self.generation_proof_types.insert(proof_type); self } @@ -134,29 +85,12 @@ impl StatelessExecutionLayerConfigBuilder { self } - pub fn proof_request_timeout(mut self, timeout: Duration) -> Self { - self.proof_request_timeout = Some(timeout); - self - } - - pub fn gossip_grace_period(mut self, period: Duration) -> Self { - self.gossip_grace_period = Some(period); - self - } - /// Build the configuration pub fn build(self) -> Result { let config = ZKVMExecutionLayerConfig { - subscribed_subnets: self.subscribed_subnets, - min_proofs_required: self.min_proofs_required.unwrap_or(1), - generation_subnets: self.generation_subnets, + min_proofs_required: self.min_proofs_required.unwrap_or(DEFAULT_MIN_PROOFS_REQUIRED), + generation_proof_types: self.generation_proof_types, proof_cache_size: self.proof_cache_size.unwrap_or(1024), - proof_request_timeout: self - .proof_request_timeout - .unwrap_or_else(|| DEFAULT_PROOF_REQUEST_TIMEOUT), - gossip_grace_period: self - .gossip_grace_period - .unwrap_or_else(|| DEFAULT_GOSSIP_GRACE_PERIOD), }; config.validate()?; @@ -168,21 +102,14 @@ impl StatelessExecutionLayerConfigBuilder { mod tests { use super::*; - #[test] - fn test_default_config_validation() { - let config = ZKVMExecutionLayerConfig::default(); - // Default config should fail validation due to no subnets subscribed to - assert!(config.validate().is_err()); - } - #[test] fn test_valid_config() { - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + let proof_type_1 = ExecutionProofId::new(1).unwrap(); let config = ZKVMExecutionLayerConfig::builder() - .add_subscribed_subnet(subnet_0) - .add_subscribed_subnet(subnet_1) + .add_generation_proof_type(proof_type_0) + .add_generation_proof_type(proof_type_1) .min_proofs_required(2) .build(); @@ -190,60 +117,59 @@ mod tests { } #[test] - fn test_min_proofs_too_high() { - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + fn test_valid_config_with_generation() { + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + let proof_type_1 = ExecutionProofId::new(1).unwrap(); let config = ZKVMExecutionLayerConfig::builder() - .add_subscribed_subnet(subnet_0) - .min_proofs_required(2) // Requires 2 but only subscribed to 1 subnet + .add_generation_proof_type(proof_type_0) + .add_generation_proof_type(proof_type_1) + .min_proofs_required(1) + .proof_cache_size(512) .build(); - assert!(config.is_err()); + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.generation_proof_types.len(), 2); + assert_eq!(config.min_proofs_required, 1); + assert_eq!(config.proof_cache_size, 512); } #[test] - fn test_generation_subnet_not_subscribed() { - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); - + fn test_min_proofs_required_zero() { let config = ZKVMExecutionLayerConfig::builder() - .add_subscribed_subnet(subnet_0) - .add_generation_subnet(subnet_1) // Generate for subnet 1 but not subscribed + .min_proofs_required(0) // Invalid: must be > 0 .build(); assert!(config.is_err()); } #[test] - fn test_valid_config_with_generation() { - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); - + fn test_no_generation_proof_types() { + // Node can receive and verify proofs without generating any let config = ZKVMExecutionLayerConfig::builder() - .add_subscribed_subnet(subnet_0) - .add_subscribed_subnet(subnet_1) - .add_generation_subnet(subnet_0) - .min_proofs_required(1) - .proof_cache_size(512) + .min_proofs_required(2) .build(); assert!(config.is_ok()); let config = config.unwrap(); - assert_eq!(config.subscribed_subnets.len(), 2); - assert_eq!(config.generation_subnets.len(), 1); - assert_eq!(config.min_proofs_required, 1); - assert_eq!(config.proof_cache_size, 512); + assert!(config.generation_proof_types.is_empty()); } #[test] - fn test_min_proofs_required_zero() { - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); + fn test_generation_proof_types_less_than_min() { + // Proof-generating nodes validate via EL, not proofs + // They can generate any number of proof types regardless of min_proofs_required + let proof_type_0 = ExecutionProofId::new(0).unwrap(); let config = ZKVMExecutionLayerConfig::builder() - .add_subscribed_subnet(subnet_0) - .min_proofs_required(0) // Invalid: must be > 0 + .add_generation_proof_type(proof_type_0) + .min_proofs_required(2) .build(); - assert!(config.is_err()); + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.generation_proof_types.len(), 1); + assert_eq!(config.min_proofs_required, 2); } } diff --git a/zkvm_execution_layer/src/dummy_proof_gen.rs b/zkvm_execution_layer/src/dummy_proof_gen.rs index 5e4437c1485..bfbbb8d8e5c 100644 --- a/zkvm_execution_layer/src/dummy_proof_gen.rs +++ b/zkvm_execution_layer/src/dummy_proof_gen.rs @@ -2,30 +2,30 @@ use crate::proof_generation::{ProofGenerationError, ProofGenerationResult, Proof use async_trait::async_trait; use std::time::Duration; use tokio::time::sleep; -use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofSubnetId, Hash256}; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; /// Dummy proof generator for testing /// /// This generator simulates the proof generation process with a configurable delay /// and creates dummy proofs. pub struct DummyProofGenerator { - subnet_id: ExecutionProofSubnetId, + proof_id: ExecutionProofId, generation_delay: Duration, } impl DummyProofGenerator { - /// Create a new dummy generator for the specified subnet - pub fn new(subnet_id: ExecutionProofSubnetId) -> Self { + /// Create a new dummy generator for the specified proof ID + pub fn new(proof_id: ExecutionProofId) -> Self { Self { - subnet_id, - generation_delay: Duration::from_millis(50), // Simulate some work + proof_id, + generation_delay: Duration::from_millis(50), } } /// Create a new dummy generator with custom generation delay - pub fn with_delay(subnet_id: ExecutionProofSubnetId, delay: Duration) -> Self { + pub fn with_delay(proof_id: ExecutionProofId, delay: Duration) -> Self { Self { - subnet_id, + proof_id, generation_delay: delay, } } @@ -35,6 +35,7 @@ impl DummyProofGenerator { impl ProofGenerator for DummyProofGenerator { async fn generate( &self, + slot: Slot, payload_hash: &ExecutionBlockHash, block_root: &Hash256, ) -> ProofGenerationResult { @@ -43,23 +44,21 @@ impl ProofGenerator for DummyProofGenerator { sleep(self.generation_delay).await; } - // Create a dummy proof with some deterministic data let proof_data = vec![ - 0xFF, // Magic byte for dummy proof - self.subnet_id.as_u8(), - // Include some payload hash bytes + 0xFF, + self.proof_id.as_u8(), payload_hash.0[0], payload_hash.0[1], payload_hash.0[2], payload_hash.0[3], ]; - ExecutionProof::new(self.subnet_id, *payload_hash, *block_root, proof_data) + ExecutionProof::new(self.proof_id, slot, *payload_hash, *block_root, proof_data) .map_err(ProofGenerationError::ProofGenerationFailed) } - fn subnet_id(&self) -> ExecutionProofSubnetId { - self.subnet_id + fn proof_id(&self) -> ExecutionProofId { + self.proof_id } } @@ -69,16 +68,18 @@ mod tests { #[tokio::test] async fn test_dummy_generator_success() { - let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let subnet = ExecutionProofId::new(0).unwrap(); let generator = DummyProofGenerator::new(subnet); + let slot = Slot::new(100); let block_hash = ExecutionBlockHash::repeat_byte(1); let block_root = Hash256::repeat_byte(2); - let result = generator.generate(&block_hash, &block_root).await; + let result = generator.generate(slot, &block_hash, &block_root).await; assert!(result.is_ok()); let proof = result.unwrap(); - assert_eq!(proof.subnet_id, subnet); + assert_eq!(proof.proof_id, subnet); + assert_eq!(proof.slot, slot); assert_eq!(proof.block_hash, block_hash); assert_eq!(proof.block_root, block_root); assert!(proof.proof_data_size() > 0); @@ -86,14 +87,15 @@ mod tests { #[tokio::test] async fn test_dummy_generator_deterministic() { - let subnet = ExecutionProofSubnetId::new(1).unwrap(); + let subnet = ExecutionProofId::new(1).unwrap(); let generator = DummyProofGenerator::new(subnet); + let slot = Slot::new(200); let block_hash = ExecutionBlockHash::repeat_byte(42); let block_root = Hash256::repeat_byte(99); // Generate twice - let proof1 = generator.generate(&block_hash, &block_root).await.unwrap(); - let proof2 = generator.generate(&block_hash, &block_root).await.unwrap(); + let proof1 = generator.generate(slot, &block_hash, &block_root).await.unwrap(); + let proof2 = generator.generate(slot, &block_hash, &block_root).await.unwrap(); // Should be identical assert_eq!(proof1.proof_data_slice(), proof2.proof_data_slice()); @@ -102,14 +104,15 @@ mod tests { #[tokio::test] async fn test_dummy_generator_custom_delay() { // TODO(zkproofs): Maybe remove, mainly need it as a temp check - let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let subnet = ExecutionProofId::new(0).unwrap(); let delay = Duration::from_millis(1); let generator = DummyProofGenerator::with_delay(subnet, delay); + let slot = Slot::new(100); let block_hash = ExecutionBlockHash::repeat_byte(1); let block_root = Hash256::repeat_byte(2); let start = tokio::time::Instant::now(); - let result = generator.generate(&block_hash, &block_root).await; + let result = generator.generate(slot, &block_hash, &block_root).await; let elapsed = start.elapsed(); assert!(result.is_ok()); diff --git a/zkvm_execution_layer/src/dummy_proof_verifier.rs b/zkvm_execution_layer/src/dummy_proof_verifier.rs index d5dae89e4ba..b7cc6178f78 100644 --- a/zkvm_execution_layer/src/dummy_proof_verifier.rs +++ b/zkvm_execution_layer/src/dummy_proof_verifier.rs @@ -1,51 +1,39 @@ use crate::proof_verification::{ProofVerificationResult, ProofVerifier, VerificationError}; use std::time::Duration; -use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofSubnetId}; +use types::{ExecutionProof, ExecutionProofId}; /// Dummy proof verifier for testing /// /// This verifier simulates the verification process with a configurable delay /// and always returns successful verification. pub struct DummyVerifier { - subnet_id: ExecutionProofSubnetId, + proof_id: ExecutionProofId, verification_delay: Duration, } impl DummyVerifier { - /// Create a new dummy verifier for the specified subnet - pub fn new(subnet_id: ExecutionProofSubnetId) -> Self { + /// Create a new dummy verifier for the specified proof ID + pub fn new(proof_id: ExecutionProofId) -> Self { Self { - subnet_id, + proof_id, verification_delay: Duration::from_millis(10), } } /// Create a new dummy verifier with custom verification delay - pub fn with_delay(subnet_id: ExecutionProofSubnetId, delay: Duration) -> Self { + pub fn with_delay(proof_id: ExecutionProofId, delay: Duration) -> Self { Self { - subnet_id, + proof_id, verification_delay: delay, } } } impl ProofVerifier for DummyVerifier { - fn verify( - &self, - payload_hash: &ExecutionBlockHash, - proof: &ExecutionProof, - ) -> ProofVerificationResult { + fn verify(&self, proof: &ExecutionProof) -> ProofVerificationResult { // Check that the proof is for the correct subnet - if proof.subnet_id != self.subnet_id { - return Err(VerificationError::UnsupportedSubnet(proof.subnet_id)); - } - - // Check that the proof is for the correct payload - if &proof.block_hash != payload_hash { - return Err(VerificationError::VerificationFailed(format!( - "Proof block hash mismatch: expected {}, got {}", - payload_hash, proof.block_hash - ))); + if proof.proof_id != self.proof_id { + return Err(VerificationError::UnsupportedProofID(proof.proof_id)); } // Simulate verification work @@ -54,67 +42,61 @@ impl ProofVerifier for DummyVerifier { } // Dummy verifier always succeeds + // In a real implementation, this would cryptographically verify that + // proof.proof_data is a valid zkVM proof for proof.block_hash Ok(true) } - fn subnet_id(&self) -> ExecutionProofSubnetId { - self.subnet_id + fn proof_id(&self) -> ExecutionProofId { + self.proof_id } } #[cfg(test)] mod tests { use super::*; - use types::{FixedBytesExtended, Hash256}; + use types::{ExecutionBlockHash, FixedBytesExtended}; fn create_test_proof( - subnet_id: ExecutionProofSubnetId, - block_hash: ExecutionBlockHash, + subnet_id: ExecutionProofId, + block_hash: types::ExecutionBlockHash, ) -> ExecutionProof { - ExecutionProof::new(subnet_id, block_hash, Hash256::zero(), vec![1, 2, 3, 4]).unwrap() + use types::{Hash256, Slot}; + ExecutionProof::new( + subnet_id, + Slot::new(100), + block_hash, + Hash256::zero(), + vec![1, 2, 3, 4], + ) + .unwrap() } #[tokio::test] async fn test_dummy_verifier_success() { - let subnet = ExecutionProofSubnetId::new(0).unwrap(); + let subnet = ExecutionProofId::new(0).unwrap(); let verifier = DummyVerifier::new(subnet); let block_hash = ExecutionBlockHash::zero(); let proof = create_test_proof(subnet, block_hash); - let result = verifier.verify(&block_hash, &proof); + let result = verifier.verify(&proof); assert!(result.is_ok()); assert_eq!(result.unwrap(), true); } #[tokio::test] async fn test_dummy_verifier_wrong_subnet() { - let subnet_0 = ExecutionProofSubnetId::new(0).unwrap(); - let subnet_1 = ExecutionProofSubnetId::new(1).unwrap(); + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); let verifier = DummyVerifier::new(subnet_0); let block_hash = ExecutionBlockHash::zero(); let proof = create_test_proof(subnet_1, block_hash); - let result = verifier.verify(&block_hash, &proof); - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - VerificationError::UnsupportedSubnet(_) - )); - } - - #[tokio::test] - async fn test_dummy_verifier_wrong_block_hash() { - let subnet = ExecutionProofSubnetId::new(0).unwrap(); - let verifier = DummyVerifier::new(subnet); - let block_hash_1 = ExecutionBlockHash::repeat_byte(1); - let block_hash_2 = ExecutionBlockHash::repeat_byte(2); - let proof = create_test_proof(subnet, block_hash_1); - - let result = verifier.verify(&block_hash_2, &proof); + let result = verifier.verify(&proof); assert!(result.is_err()); assert!(matches!( result.unwrap_err(), - VerificationError::VerificationFailed(_) + VerificationError::UnsupportedProofID(_) )); } } diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index df6510c96ad..2633158da6b 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -16,6 +16,7 @@ pub mod engine_api; /// Re-export the main ZK-VM engine API and config pub use engine_api::ZkVmEngineApi; pub use config::ZKVMExecutionLayerConfig; +pub use registry_proof_gen::GeneratorRegistry; #[test] fn add() { diff --git a/zkvm_execution_layer/src/proof_generation.rs b/zkvm_execution_layer/src/proof_generation.rs index 25836d4e81e..9254d5fe560 100644 --- a/zkvm_execution_layer/src/proof_generation.rs +++ b/zkvm_execution_layer/src/proof_generation.rs @@ -43,7 +43,7 @@ pub trait ProofGenerator: Send + Sync { ) -> ProofGenerationResult; /// Get the proof ID this generator produces proofs for - fn subnet_id(&self) -> ExecutionProofId; + fn proof_id(&self) -> ExecutionProofId; } /// Type-erased proof generator mainly for convenience diff --git a/zkvm_execution_layer/src/registry_proof_gen.rs b/zkvm_execution_layer/src/registry_proof_gen.rs index a94abd1412e..01ded0af454 100644 --- a/zkvm_execution_layer/src/registry_proof_gen.rs +++ b/zkvm_execution_layer/src/registry_proof_gen.rs @@ -37,8 +37,8 @@ impl GeneratorRegistry { } pub fn register_generator(&mut self, generator: DynProofGenerator) { - let subnet_id = generator.subnet_id(); - self.generators.insert(subnet_id, generator); + let proof_id = generator.proof_id(); + self.generators.insert(proof_id, generator); } pub fn get_generator(&self, proof_id: ExecutionProofId) -> Option { @@ -112,7 +112,7 @@ mod tests { let generator = registry.get_generator(subnet_id); assert!(generator.is_some()); - assert_eq!(generator.unwrap().subnet_id(), subnet_id); + assert_eq!(generator.unwrap().proof_id(), subnet_id); } #[test] From 23a8a592a540a369f1a403a8b86f1d958b74c3da Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 27 Oct 2025 23:12:44 +0000 Subject: [PATCH 23/67] add proof generation service --- Cargo.lock | 15 + Cargo.toml | 1 + .../proof_generation_service/Cargo.toml | 16 + .../proof_generation_service/src/lib.rs | 374 ++++++++++++++++++ 4 files changed, 406 insertions(+) create mode 100644 beacon_node/proof_generation_service/Cargo.toml create mode 100644 beacon_node/proof_generation_service/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index e1158daa351..6d91efd8353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1556,6 +1556,7 @@ dependencies = [ "monitoring_api", "network", "operation_pool", + "proof_generation_service", "rand 0.9.0", "sensitive_url", "serde", @@ -7341,6 +7342,20 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "proof_generation_service" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "lighthouse_network", + "logging", + "network", + "tokio", + "tracing", + "types", + "zkvm_execution_layer", +] + [[package]] name = "proptest" version = "1.6.0" diff --git a/Cargo.toml b/Cargo.toml index 6ef5455eb62..e528a6985d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ members = [ "beacon_node/lighthouse_tracing", "beacon_node/network", "beacon_node/operation_pool", + "beacon_node/proof_generation_service", "beacon_node/store", "beacon_node/timer", "boot_node", diff --git a/beacon_node/proof_generation_service/Cargo.toml b/beacon_node/proof_generation_service/Cargo.toml new file mode 100644 index 00000000000..bbd043e0fdd --- /dev/null +++ b/beacon_node/proof_generation_service/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "proof_generation_service" +version = "0.1.0" +edition = "2021" + +[dependencies] +beacon_chain = { path = "../beacon_chain" } +lighthouse_network = { workspace = true } +network = { workspace = true } +types = { path = "../../consensus/types" } +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } +tokio = { workspace = true } +logging = { workspace = true } +tracing = { workspace = true } + +[dev-dependencies] diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs new file mode 100644 index 00000000000..e674b63d830 --- /dev/null +++ b/beacon_node/proof_generation_service/src/lib.rs @@ -0,0 +1,374 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes, ProofGenerationEvent}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use std::sync::Arc; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use tracing::{debug, error, info}; +use types::{EthSpec, ExecPayload, ExecutionProofId, Hash256, SignedBeaconBlock, Slot}; + +/// Service responsible for "altruistic" proof generation +/// +/// This service receives notifications about newly imported blocks and generates +/// execution proofs for blocks that don't have proofs yet. This allows any node +/// (not just the block proposer) to generate and publish proofs. +/// +/// Note: While proofs are optional, we don't have the proposer making proofs +/// for their own block. The proposer should insert the block into their own +/// chain, so this should trigger. +pub struct ProofGenerationService { + /// Reference to the beacon chain + chain: Arc>, + /// Receiver for proof generation events + event_rx: UnboundedReceiver>, + /// Sender to publish proofs to the network + network_tx: UnboundedSender>, +} + +impl ProofGenerationService { + pub fn new( + chain: Arc>, + event_rx: UnboundedReceiver>, + network_tx: UnboundedSender>, + ) -> Self { + Self { + chain, + event_rx, + network_tx, + } + } + + /// Run the service event loop + pub async fn run(mut self) { + info!("Proof generation service started"); + + while let Some(event) = self.event_rx.recv().await { + let (block_root, slot, block) = event; + + debug!( + slot = ?slot, + block_root = ?block_root, + "Received block import notification" + ); + + // Handle the event + self.handle_block_import(block_root, slot, block).await; + } + + info!("Proof generation service stopped"); + } + + /// Handle a block import event + async fn handle_block_import( + &self, + block_root: Hash256, + slot: Slot, + block: Arc>, + ) { + // Check if proofs are required for this epoch + // TODO(zkproofs): alternative is to only enable this when + // the zkvm fork is enabled. Check if this is possible + let block_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + if !self + .chain + .data_availability_checker + .execution_proof_check_required_for_epoch(block_epoch) + { + debug!( + slot = ?slot, + epoch = ?block_epoch, + "Proofs not required for this epoch, skipping proof generation" + ); + return; + } + + // Check if we have a proof generator registry + let registry = match &self.chain.zkvm_generator_registry { + Some(registry) => registry.clone(), + None => { + debug!( + slot = ?slot, + "No generator registry configured, skipping proof generation" + ); + return; + } + }; + + // Get the list of proof types we should generate + let proof_types = registry.proof_ids(); + + if proof_types.is_empty() { + debug!( + slot = ?slot, + "No proof generators registered" + ); + return; + } + + debug!( + slot = ?slot, + block_root = ?block_root, + proof_types = proof_types.len(), + "Checking for locally missing proofs" + ); + + // Check which proofs are missing/we haven't received yet + for proof_id in proof_types { + // Check if we already have this proof + let has_proof = self.check_if_proof_exists(slot, block_root, proof_id); + + if has_proof { + debug!( + slot = ?slot, + proof_id = ?proof_id, + "Proof already exists, skipping" + ); + continue; + } + + self.spawn_proof_generation( + block_root, + slot, + block.clone(), + proof_id, + registry.clone(), + self.network_tx.clone(), + ); + } + } + + /// Check if a proof already exists for this block + fn check_if_proof_exists(&self, slot: Slot, block_root: Hash256, proof_id: ExecutionProofId) -> bool { + let observed = self.chain.observed_execution_proofs.read(); + observed.is_known(slot, block_root, proof_id).unwrap_or(false) + } + + /// Spawn a task to generate a proof + fn spawn_proof_generation( + &self, + block_root: Hash256, + slot: Slot, + block: Arc>, + proof_id: ExecutionProofId, + registry: Arc, + network_tx: UnboundedSender>, + ) { + let chain = self.chain.clone(); + + // Get the generator for this proof type + let generator = match registry.get_generator(proof_id) { + Some(gen) => gen, + None => { + debug!( + slot = ?slot, + proof_id = ?proof_id, + "No generator found for proof type" + ); + return; + } + }; + + // Spawn the generation task (async because generator.generate() is async) + self.chain.task_executor.spawn( + async move { + info!( + slot = ?slot, + block_root = ?block_root, + proof_id = ?proof_id, + "Generating execution proof" + ); + + // Extract execution payload hash from the block + let block_hash = match block.message().execution_payload() { + Ok(payload) => payload.block_hash(), + Err(e) => { + debug!( + slot = ?slot, + block_root = ?block_root, + error = ?e, + "Block has no execution payload, skipping proof generation" + ); + return; + } + }; + + // Generate the proof using the generator + let proof_result = generator.generate(slot, &block_hash, &block_root).await; + + match proof_result { + Ok(proof) => { + info!( + slot = ?slot, + proof_id = ?proof_id, + "Successfully generated proof" + ); + + // Double-check that proof didn't arrive via gossip while we were generating + let observed = chain.observed_execution_proofs.read(); + if observed.is_known(slot, block_root, proof_id).unwrap_or(false) { + info!( + slot = ?slot, + proof_id = ?proof_id, + "Proof arrived via gossip while generating, discarding our copy" + ); + return; + } + drop(observed); + + // Note: We don't store the proof in the data availability checker because: + // 1. The block has already been imported and is no longer in the availability cache + // 2. This is altruistic proof generation - we're generating proofs for OTHER nodes + // 3. We already have the block, so we don't need the proof for ourselves + + // Publish the proof to the network + let pubsub_message = PubsubMessage::ExecutionProof( + Box::new((proof_id, Arc::new(proof))) + ); + + let network_message = NetworkMessage::Publish { + messages: vec![pubsub_message], + }; + + if let Err(e) = network_tx.send(network_message) { + error!( + slot = ?slot, + proof_id = ?proof_id, + error = ?e, + "Failed to send proof to network service" + ); + } else { + info!( + slot = ?slot, + proof_id = ?proof_id, + "Proof successfully published to network" + ); + + // Mark the proof as observed so we don't regenerate it + if let Err(e) = chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + { + error!( + slot = ?slot, + proof_id = ?proof_id, + error = ?e, + "Failed to mark proof as observed" + ); + } + } + } + Err(e) => { + error!( + slot = ?slot, + proof_id = ?proof_id, + error = %e, + "Failed to generate proof" + ); + } + } + }, + "proof_generation", + ); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + }; + use tokio::sync::mpsc; + use types::MinimalEthSpec as E; + + type TestHarness = BeaconChainHarness>; + + /// Create a test harness with minimal setup + fn build_test_harness(validator_count: usize) -> TestHarness { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + .build(); + harness + } + + #[tokio::test] + async fn test_check_if_proof_exists_returns_false_for_new_proof() { + let harness = build_test_harness(8); + let chain = harness.chain.clone(); + + let (_event_tx, event_rx) = mpsc::unbounded_channel(); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); + + let service = ProofGenerationService::new(chain, event_rx, network_tx); + + let block_root = Hash256::random(); + let slot = types::Slot::new(1); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Should return false for a proof that hasn't been observed + assert_eq!(service.check_if_proof_exists(slot, block_root, proof_id), false); + } + + #[tokio::test] + async fn test_check_if_proof_exists_returns_true_after_observation() { + let harness = build_test_harness(8); + let chain = harness.chain.clone(); + + let (_event_tx, event_rx) = mpsc::unbounded_channel(); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); + + let service = ProofGenerationService::new(chain.clone(), event_rx, network_tx); + + let block_root = Hash256::random(); + let slot = types::Slot::new(1); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Mark the proof as observed + chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + .unwrap(); + + // Should return true for an observed proof + assert_eq!(service.check_if_proof_exists(slot, block_root, proof_id), true); + } + + #[tokio::test] + async fn test_handle_block_import_skips_when_epoch_not_required() { + let harness = build_test_harness(8); + let chain = harness.chain.clone(); + + // Note: zkVM is NOT enabled in this harness + // TODO(zkproofs): can we make a harness with zkVM enabled to test this functionality in a unit test + + let (_event_tx, event_rx) = mpsc::unbounded_channel(); + let (network_tx, mut network_rx) = mpsc::unbounded_channel(); + + let service = ProofGenerationService::new(chain.clone(), event_rx, network_tx); + + harness.advance_slot(); + + harness.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ).await; + + let block = harness.chain.head_snapshot().beacon_block.clone(); + let block_root = block.canonical_root(); + let slot = block.slot(); + + service.handle_block_import(block_root, slot, block).await; + + // Give async tasks time to complete + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Should not have published any proofs because epoch doesn't require them + assert!(network_rx.try_recv().is_err(), "Should not publish proofs when epoch doesn't require them"); + } + + +} From fda47957ed7d68c757231c6438ab41c5cd15cfd5 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 01:07:59 +0000 Subject: [PATCH 24/67] Add execution proof observation and verification --- .../beacon_chain/src/canonical_head.rs | 7 + beacon_node/beacon_chain/src/errors.rs | 1 + .../src/execution_proof_verification.rs | 620 ++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 4 +- .../src/observed_execution_proofs.rs | 396 +++++++++++ 5 files changed, 1027 insertions(+), 1 deletion(-) create mode 100644 beacon_node/beacon_chain/src/execution_proof_verification.rs create mode 100644 beacon_node/beacon_chain/src/observed_execution_proofs.rs diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 7dd4c88c513..228e5eb2d27 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -951,6 +951,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_execution_proofs.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.observed_slashable.write().prune( new_view .finalized_checkpoint diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 7b04a36faec..70410fa9bd3 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -98,6 +98,7 @@ pub enum BeaconChainError { ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), ObservedDataSidecarsError(ObservedDataSidecarsError), + ObservedExecutionProofError(String), AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), diff --git a/beacon_node/beacon_chain/src/execution_proof_verification.rs b/beacon_node/beacon_chain/src/execution_proof_verification.rs new file mode 100644 index 00000000000..79a22b7cc15 --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_verification.rs @@ -0,0 +1,620 @@ +use crate::observed_data_sidecars::{ObservationStrategy, Observe}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use slot_clock::SlotClock; +use std::marker::PhantomData; +use std::sync::Arc; +use tracing::{debug, error, warn}; +use types::{ChainSpec, EthSpec, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// An error occurred while validating a gossip execution proof. +#[derive(Debug)] +pub enum GossipExecutionProofError { + /// There was an error whilst processing the execution proof. It is not known if it is + /// valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this proof due to an internal error. It's unclear if the proof + /// is valid. + BeaconChainError(Box), + + /// The execution proof is from a slot that is later than the current slot (with respect to + /// the gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + + /// The proof corresponds to a slot older than the finalized head slot. + /// + /// ## Peer scoring + /// + /// It's unclear if this proof is valid, but this proof is for a finalized slot and is + /// therefore useless to us. + PastFinalizedSlot { + proof_slot: Slot, + finalized_slot: Slot, + }, + + /// The proof's parent block is unknown. + /// + /// ## Peer scoring + /// + /// We cannot process the proof without validating its parent, the peer isn't necessarily + /// faulty. + ParentUnknown { parent_root: Hash256 }, + + /// The proof conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this proof is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, + + /// An execution proof has already been seen for the given `(proof.block_root, + /// proof_id)` tuple over gossip or no gossip sources. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, but we do not forward it over gossip. + PriorKnown { + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + }, + + /// An execution proof has already been processed from non-gossip source and has not yet been + /// seen on the gossip network. This proof should be accepted and forwarded over gossip. + PriorKnownUnpublished, + + /// The proof verification failed (invalid zkVM proof). + /// + /// ## Peer scoring + /// + /// The proof is invalid and the peer is faulty. + ProofVerificationFailed(String), + + /// The proof size exceeds the maximum allowed size. + /// + /// ## Peer scoring + /// + /// The proof is invalid and the peer is faulty. + ProofTooLarge { size: usize, max_size: usize }, + + /// The block for this proof is not yet available. + /// + /// ## Peer scoring + /// + /// The peer may have sent a proof before we've seen the block. Not necessarily faulty. + BlockNotAvailable { block_root: Hash256 }, +} + +impl From for GossipExecutionProofError { + fn from(e: BeaconChainError) -> Self { + GossipExecutionProofError::BeaconChainError(Box::new(e)) + } +} + +/// A wrapper around an `ExecutionProof` that has been verified for propagation on the gossip +/// network. +pub struct GossipVerifiedExecutionProof { + block_root: Hash256, + execution_proof: Arc, + _phantom: PhantomData<(T, O)>, +} + +impl std::fmt::Debug for GossipVerifiedExecutionProof { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("GossipVerifiedExecutionProof") + .field("block_root", &self.block_root) + .field("execution_proof", &self.execution_proof) + .finish() + } +} + +impl Clone for GossipVerifiedExecutionProof { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + execution_proof: self.execution_proof.clone(), + _phantom: PhantomData, + } + } +} + +impl GossipVerifiedExecutionProof { + pub fn new( + execution_proof: Arc, + chain: &BeaconChain, + ) -> Result { + validate_execution_proof_for_gossip::(execution_proof, chain) + } + + pub fn slot(&self) -> Slot { + self.execution_proof.slot + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn execution_proof(&self) -> &Arc { + &self.execution_proof + } + + pub fn subnet_id(&self) -> ExecutionProofId { + self.execution_proof.proof_id + } + + /// Get the block root for this proof. + pub fn into_inner(self) -> Arc { + self.execution_proof + } +} + +/// Validate an execution proof for gossip +pub fn validate_execution_proof_for_gossip( + execution_proof: Arc, + chain: &BeaconChain, +) -> Result, GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_slot = execution_proof.slot; + + // 1. Verify proof is not from the future + verify_proof_not_from_future_slot(chain, proof_slot)?; + + // 2. Verify proof slot is greater than finalized slot + verify_slot_greater_than_latest_finalized_slot(chain, proof_slot)?; + + // 3. Check if proof is already known via gossip + verify_is_unknown_execution_proof(chain, &execution_proof)?; + + // 4. Check if the proof is already in the DA checker cache + // If it exists in the cache, we know it has already passed validation. + if chain + .data_availability_checker + .is_execution_proof_cached(&block_root, &execution_proof) + { + if O::observe() { + observe_gossip_execution_proof(&execution_proof, chain)?; + } + return Err(GossipExecutionProofError::PriorKnownUnpublished); + } + + // 5. Verify proof size limits + verify_proof_size(&execution_proof, &chain.spec)?; + + // Note: We intentionally do NOT verify the block exists yet + // Execution proofs can arrive via gossip before their corresponding blocks, + // so we cache them in the DA checker and match them up when the block arrives. + // This is kind of similar to how blob sidecars work. + + // 6. Run zkVM proof verification + verify_zkvm_proof(&execution_proof, chain)?; + + // 7. Observe the proof to prevent reprocessing + if O::observe() { + observe_gossip_execution_proof(&execution_proof, chain)?; + } + + Ok(GossipVerifiedExecutionProof { + block_root, + execution_proof, + _phantom: PhantomData, + }) +} + +/// Verify that this execution proof has not been seen before via gossip +fn verify_is_unknown_execution_proof( + chain: &BeaconChain, + execution_proof: &ExecutionProof, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + let slot = execution_proof.slot; + + if chain + .observed_execution_proofs + .read() + .is_known(slot, block_root, proof_id) + .map_err(|e| { + GossipExecutionProofError::BeaconChainError(Box::new( + BeaconChainError::ObservedExecutionProofError(format!("{:?}", e)), + )) + })? + { + return Err(GossipExecutionProofError::PriorKnown { + slot, + block_root, + proof_id, + }); + } + + Ok(()) +} + +/// Verify that the proof size is within acceptable limits. +fn verify_proof_size( + execution_proof: &ExecutionProof, + _spec: &ChainSpec, +) -> Result<(), GossipExecutionProofError> { + use types::MAX_PROOF_DATA_BYTES; + + let proof_size = execution_proof.proof_data.len(); + if proof_size > MAX_PROOF_DATA_BYTES { + return Err(GossipExecutionProofError::ProofTooLarge { + size: proof_size, + max_size: MAX_PROOF_DATA_BYTES, + }); + } + + Ok(()) +} + +/// Mark this execution proof as observed in gossip, to prevet reprocessing +fn observe_gossip_execution_proof( + execution_proof: &ExecutionProof, + chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + let slot = execution_proof.slot; + + chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + .map_err(|e| { + GossipExecutionProofError::BeaconChainError(Box::new( + BeaconChainError::ObservedExecutionProofError(format!("{:?}", e)), + )) + })?; + + debug!( + %block_root, + %proof_id, + %slot, + "Marked execution proof as observed" + ); + + Ok(()) +} + +/// Verify that the execution proof is not from a future slot. +fn verify_proof_not_from_future_slot( + chain: &BeaconChain, + proof_slot: Slot, +) -> Result<(), GossipExecutionProofError> { + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or(BeaconChainError::UnableToReadSlot)?; + + if proof_slot > latest_permissible_slot { + return Err(GossipExecutionProofError::FutureSlot { + message_slot: proof_slot, + latest_permissible_slot, + }); + } + + Ok(()) +} + +/// Verify that the execution proof slot is greater than the latest finalized slot. +fn verify_slot_greater_than_latest_finalized_slot( + chain: &BeaconChain, + proof_slot: Slot, +) -> Result<(), GossipExecutionProofError> { + let latest_finalized_slot = chain + .head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + if proof_slot <= latest_finalized_slot { + return Err(GossipExecutionProofError::PastFinalizedSlot { + proof_slot, + finalized_slot: latest_finalized_slot, + }); + } + + Ok(()) +} + +/// Verify the zkVM proof. +/// +/// Note: This is expensive +fn verify_zkvm_proof( + execution_proof: &ExecutionProof, + chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let subnet_id = execution_proof.proof_id; + + match chain + .data_availability_checker + .verify_execution_proof_for_gossip(execution_proof) + { + Ok(true) => { + debug!(%block_root, %subnet_id, "Proof verification succeeded"); + Ok(()) + } + Ok(false) => { + warn!(%block_root, %subnet_id, "Proof verification failed: proof is invalid"); + Err(GossipExecutionProofError::ProofVerificationFailed(format!( + "zkVM proof verification failed for block_root={}, subnet_id={}", + block_root, subnet_id + ))) + } + Err(e) => { + error!(%block_root, %subnet_id, ?e, "Proof verification error"); + Err(GossipExecutionProofError::BeaconChainError(Box::new( + e.into(), + ))) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; + use types::{ExecutionBlockHash, ForkName, MainnetEthSpec}; + + type E = MainnetEthSpec; + + /// Helper to create a test execution proof + fn create_test_execution_proof( + subnet_id: ExecutionProofId, + slot: Slot, + block_root: Hash256, + ) -> ExecutionProof { + let block_hash = ExecutionBlockHash::zero(); + let proof_data = vec![0u8; 32]; // Dummy proof data + ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data) + .expect("Valid test proof") + } + + #[tokio::test] + async fn test_reject_future_slot() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let current_slot = harness.get_current_slot(); + let future_slot = current_slot + 100; + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof = create_test_execution_proof(proof_id, future_slot, Hash256::random()); + + let result = validate_execution_proof_for_gossip::<_, Observe>( + Arc::new(proof), + &harness.chain, + ); + + assert!(matches!( + result.err(), + Some(GossipExecutionProofError::FutureSlot { .. }) + )); + } + + #[tokio::test] + async fn test_reject_past_finalized_slot() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // Advance to slot 1 first + harness.advance_slot(); + + // Advance chain to create finalized slot + harness.extend_chain( + 32, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ).await; + + let finalized_slot = harness.finalized_checkpoint().epoch.start_slot(E::slots_per_epoch()); + // Create proof for slot before finalized + let old_slot = finalized_slot.saturating_sub(1u64); + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof = create_test_execution_proof(proof_id, old_slot, Hash256::random()); + + let result = validate_execution_proof_for_gossip::<_, Observe>( + Arc::new(proof), + &harness.chain, + ); + + assert!(matches!( + result.err(), + Some(GossipExecutionProofError::PastFinalizedSlot { .. }) + )); + } + + #[tokio::test] + async fn test_successful_validation() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let current_slot = harness.get_current_slot(); + let proof_id = ExecutionProofId::new(0).expect("Valid subnet id"); + + // Use a realistic block root from the chain + let block_root = harness.chain.head_beacon_block_root(); + let proof = create_test_execution_proof(proof_id, current_slot, block_root); + + let result = validate_execution_proof_for_gossip::<_, Observe>( + Arc::new(proof), + &harness.chain, + ); + + match result { + Ok(_) => {} + Err(GossipExecutionProofError::FutureSlot { .. }) + | Err(GossipExecutionProofError::PastFinalizedSlot { .. }) => { + panic!("Should not fail basic validation checks"); + } + Err(_) => {} + } + } + + /// This test verifies that: + /// 1. First gossip proof is accepted and marked as observed + /// 2. Duplicate gossip proof is rejected with PriorKnown + /// 3. DoS protection: Expensive verification only happens once + #[tokio::test] + async fn test_gossip_duplicate_proof_rejected() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let current_slot = harness.get_current_slot(); + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let block_root = Hash256::random(); + let proof = Arc::new(create_test_execution_proof(proof_id, current_slot, block_root)); + + let result1 = validate_execution_proof_for_gossip::<_, Observe>( + proof.clone(), + &harness.chain, + ); + assert!(result1.is_ok()); + + // Should now be rejected as duplicate + let result2 = validate_execution_proof_for_gossip::<_, Observe>( + proof.clone(), + &harness.chain, + ); + + assert!( + matches!( + result2.err(), + Some(GossipExecutionProofError::PriorKnown { slot, block_root: br, proof_id: sid }) + if slot == current_slot && br == block_root && sid == proof_id + ), + "Duplicate proof must be rejected with PriorKnown error" + ); + + assert!( + harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, proof_id) + .unwrap(), + "Proof should be marked as observed" + ); + } + + /// Test that proofs in the DA checker cache are detected and marked as observed. + /// + /// When a proof arrives via gossip but is already in the DA checker cache (from RPC), + /// we should: + /// 1. Accept it for gossip propagation + /// 2. Mark it as observed to prevent reprocessing + /// 3. Return PriorKnownUnpublished + #[tokio::test] + async fn test_da_cached_proof_accepted_and_observed() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let subnet_id = ExecutionProofId::new(0).expect("Valid subnet id"); + let current_slot = harness.get_current_slot(); + let block_root = Hash256::random(); + + let proof = Arc::new(create_test_execution_proof(subnet_id, current_slot, block_root)); + + // Put the proof directly into the DA checker cache (this can happen if it arritves via RPC) + harness + .chain + .data_availability_checker + .put_execution_proofs(block_root, vec![proof.clone()]) + .expect("Should put proof in DA cache"); + + // Verify it's in the cache + assert!( + harness + .chain + .data_availability_checker + .is_execution_proof_cached(&block_root, &proof), + "Proof should be in DA cache" + ); + + // Verify it's NOT in observed cache yet + assert!( + !harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, subnet_id) + .unwrap(), + "Proof should not be in observed cache initially" + ); + + // Now it arrives via gossip + let result = validate_execution_proof_for_gossip::<_, Observe>( + proof.clone(), + &harness.chain, + ); + + // Should be rejected with PriorKnownUnpublished (safe to propagate) + assert!( + matches!( + result.as_ref().err(), + Some(GossipExecutionProofError::PriorKnownUnpublished) + ), + "DA cached proof should return PriorKnownUnpublished, got: {:?}", + result + ); + + // Should now be marked as observed + assert!( + harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, subnet_id) + .unwrap(), + "Proof should be marked as observed after DA cache check" + ); + + // Second gossip attempt should be rejected as PriorKnown (not PriorKnownUnpublished) + let result2 = validate_execution_proof_for_gossip::<_, Observe>( + proof.clone(), + &harness.chain, + ); + + assert!( + matches!(result2.err(), Some(GossipExecutionProofError::PriorKnown { .. })), + "Second gossip should be rejected as PriorKnown (already observed)" + ); + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9d8c3dba38f..5c42e374a28 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -19,6 +19,7 @@ pub mod canonical_head; pub mod chain_config; pub mod data_availability_checker; pub mod data_column_verification; +pub mod execution_proof_verification; mod early_attester_cache; mod errors; pub mod events; @@ -39,6 +40,7 @@ pub mod observed_aggregates; mod observed_attesters; pub mod observed_block_producers; pub mod observed_data_sidecars; +pub mod observed_execution_proofs; pub mod observed_operations; mod observed_slashable; pub mod persisted_beacon_chain; @@ -63,7 +65,7 @@ pub use self::beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, ChainSegmentResult, ForkChoiceError, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, LightClientProducerEvent, OverrideForkchoiceUpdate, - ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + ProduceBlockVerification, ProofGenerationEvent, StateSkipConfig, WhenSlotSkipped, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/observed_execution_proofs.rs b/beacon_node/beacon_chain/src/observed_execution_proofs.rs new file mode 100644 index 00000000000..154d7f32150 --- /dev/null +++ b/beacon_node/beacon_chain/src/observed_execution_proofs.rs @@ -0,0 +1,396 @@ +//! Provides the `ObservedExecutionProofs` struct which allows for rejecting execution proofs +//! that we have already seen over the gossip network. +//! +//! This cache prevents DoS attacks where an attacker repeatedly gossips the same execution proof, +//! forcing expensive zkVM verification operations. Only proofs that have passed basic gossip +//! validation and proof verification should be added to this cache. +//! +//! TODO(zkproofs): we want the proofs to be signed and then we can just add them to the cache +//! once the signature has been verified like `observed_data_sidecars` + +use std::collections::{HashMap, HashSet}; +use types::{ExecutionProofId, Hash256, Slot}; + +#[derive(Debug, PartialEq)] +pub enum Error { + /// The slot of the provided execution proof is prior to finalization. + FinalizedExecutionProof { slot: Slot, finalized_slot: Slot }, +} + +/// Key for tracking observed execution proofs. +/// We track by (slot, block_root) to efficiently prune old entries. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct ProofKey { + slot: Slot, + block_root: Hash256, +} + +impl ProofKey { + fn new(slot: Slot, block_root: Hash256) -> Self { + Self { slot, block_root } + } +} + +/// Maintains a cache of seen execution proofs that were received over gossip. +/// +/// The cache tracks (slot, block_root, proof_id) tuples and prunes entries from finalized slots. +/// +/// ## DoS Resistance +/// +/// This cache is critical for preventing DoS attacks where an attacker repeatedly gossips +/// the same execution proof. zkVM verification is expensive (50-100ms), so we must avoid +/// re-verifying proofs we've already seen. +/// +/// ## Pruning +/// +/// Call `prune` on finalization to remove entries from finalized slots. This basically matches the +/// pattern used for observed blobs and data columns. +pub struct ObservedExecutionProofs { + /// The finalized slot. Proofs at or below this slot are rejected. + finalized_slot: Slot, + /// Map from (slot, block_root) to the set of subnet IDs we've seen for that block. + items: HashMap>, +} + +impl ObservedExecutionProofs { + /// Create a new cache with the given finalized slot. + /// + /// Proofs at or below `finalized_slot` will be rejected. + pub fn new(finalized_slot: Slot) -> Self { + Self { + finalized_slot, + items: HashMap::new(), + } + } + + /// Observe an execution proof from gossip. + /// + /// Returns `true` if the proof was already observed (duplicate), `false` if it's new. + /// + /// Returns an error if the proof's slot is at or below the finalized slot. + /// Note: This shouldn't happen because it means we've received a proof for + /// a finalized block + pub fn observe_proof( + &mut self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> Result { + // Reject finalized proofs + if self.finalized_slot > 0 && slot <= self.finalized_slot { + return Err(Error::FinalizedExecutionProof { + slot, + finalized_slot: self.finalized_slot, + }); + } + + let key = ProofKey::new(slot, block_root); + let proof_ids = self.items.entry(key).or_insert_with(HashSet::new); + + let was_duplicate = !proof_ids.insert(proof_id); + + Ok(was_duplicate) + } + + /// Check if we have already observed this proof. + /// + /// Returns `true` if the proof has been seen, `false` if it's new. + /// + /// Returns an error if the proof's slot is at or below the finalized slot. + pub fn is_known( + &self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> Result { + // Reject finalized proofs + if self.finalized_slot > 0 && slot <= self.finalized_slot { + return Err(Error::FinalizedExecutionProof { + slot, + finalized_slot: self.finalized_slot, + }); + } + + let key = ProofKey::new(slot, block_root); + let is_known = self + .items + .get(&key) + .is_some_and(|proof_ids| proof_ids.contains(&proof_id)); + + Ok(is_known) + } + + /// Prune execution proof observations for slots less than or equal to the given slot. + /// + /// This matches the pruning behavior of observed blobs and data columns. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|key, _| key.slot > finalized_slot); + } + + /// Get the current finalized slot boundary. + /// + /// Proofs at or below this slot will be rejected. + pub fn finalized_slot(&self) -> Slot { + self.finalized_slot + } + + /// Get the number of unique (slot, block_root) keys being tracked. + pub fn len(&self) -> usize { + self.items.len() + } + + /// Check if the cache is empty. + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } + + /// Clear all entries from the cache. + #[cfg(test)] + pub fn clear(&mut self) { + self.items.clear(); + } +} + +impl Default for ObservedExecutionProofs { + fn default() -> Self { + Self::new(Slot::new(0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::FixedBytesExtended; + + fn test_proof_key(slot: u64) -> (Slot, Hash256, ExecutionProofId) { + ( + Slot::new(slot), + Hash256::from_low_u64_be(slot), + ExecutionProofId::new(0).unwrap(), + ) + } + + #[test] + fn test_observe_new_proof() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let (slot, block_root, subnet_id) = test_proof_key(10); + + // First observation should return false (not a duplicate) + assert_eq!( + cache.observe_proof(slot, block_root, subnet_id), + Ok(false), + "first observation should not be duplicate" + ); + + // Second observation should return true (is a duplicate) + assert_eq!( + cache.observe_proof(slot, block_root, subnet_id), + Ok(true), + "second observation should be duplicate" + ); + } + + #[test] + fn test_observe_different_subnets() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root = Hash256::from_low_u64_be(10); + let proof_0 = ExecutionProofId::new(0).unwrap(); + let proof_1 = ExecutionProofId::new(1).unwrap(); + + assert_eq!( + cache.observe_proof(slot, block_root, proof_0), + Ok(false), + "proof 0 is new" + ); + + // Observe proof from subnet 1 (same block, different proofID) + assert_eq!( + cache.observe_proof(slot, block_root, proof_1), + Ok(false), + "proof 1 is new" + ); + + // Re-observe proof 0 + assert_eq!( + cache.observe_proof(slot, block_root, proof_0), + Ok(true), + "proof 0 is duplicate" + ); + + assert!(cache.is_known(slot, block_root, proof_0).unwrap()); + assert!(cache.is_known(slot, block_root, proof_1).unwrap()); + } + + #[test] + fn test_is_known() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let (slot, block_root, proof_id) = test_proof_key(10); + + // Before observation + assert_eq!( + cache.is_known(slot, block_root, proof_id), + Ok(false), + "not yet observed" + ); + + // After observation + cache.observe_proof(slot, block_root, proof_id).unwrap(); + assert_eq!( + cache.is_known(slot, block_root, proof_id), + Ok(true), + "now observed" + ); + } + + #[test] + fn test_reject_finalized_proofs() { + let finalized_slot = Slot::new(100); + let mut cache = ObservedExecutionProofs::new(finalized_slot); + + let old_slot = Slot::new(100); + let block_root = Hash256::from_low_u64_be(100); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Observing finalized proof should error + assert_eq!( + cache.observe_proof(old_slot, block_root, proof_id), + Err(Error::FinalizedExecutionProof { + slot: old_slot, + finalized_slot, + }), + "finalized proofs should be rejected" + ); + + // Checking finalized proof should error + assert_eq!( + cache.is_known(old_slot, block_root, proof_id), + Err(Error::FinalizedExecutionProof { + slot: old_slot, + finalized_slot, + }), + "finalized proofs should be rejected in is_known" + ); + } + + #[test] + fn test_pruning() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + + // Add proofs at different slots + for slot in 0..100 { + let (s, br, pid) = test_proof_key(slot); + cache.observe_proof(s, br, pid).unwrap(); + } + + assert_eq!(cache.len(), 100, "should have 100 entries"); + + // Prune at finalized_slot = 50 + // Should remove slots <= 50, keep slots > 50 + let finalized_slot = Slot::new(50); + cache.prune(finalized_slot); + + assert_eq!( + cache.finalized_slot(), + finalized_slot, + "finalized slot should be updated" + ); + + // Check that finalized entries were removed + let old_slot = Slot::new(50); + let old_block_root = Hash256::from_low_u64_be(50); + let proof_id = ExecutionProofId::new(0).unwrap(); + + assert!( + cache.is_known(old_slot, old_block_root, proof_id).is_err(), + "finalized entries should be rejected after pruning" + ); + + // Check that non-finalized entries are still present + let recent_slot = Slot::new(51); + let recent_block_root = Hash256::from_low_u64_be(51); + assert!( + cache + .is_known(recent_slot, recent_block_root, proof_id) + .unwrap(), + "non-finalized entries should still be present" + ); + } + + #[test] + fn test_prune_removes_exact_boundary() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + + // Add proofs at slots 50, 51, 52 + for slot in 50..=52 { + let (s, br, pid) = test_proof_key(slot); + cache.observe_proof(s, br, pid).unwrap(); + } + + // Prune at finalized_slot = 50 + // Should remove slots <= 50, keep slots > 50 + cache.prune(Slot::new(50)); + + assert_eq!(cache.finalized_slot(), Slot::new(50)); + + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Slot 50 should be rejected (finalized) + assert!(cache + .is_known(Slot::new(50), Hash256::from_low_u64_be(50), proof_id) + .is_err()); + + // Slot 51 should still be present (> finalized) + assert!(cache + .is_known(Slot::new(51), Hash256::from_low_u64_be(51), proof_id) + .unwrap()); + + // Slot 52 should still be present + assert!(cache + .is_known(Slot::new(52), Hash256::from_low_u64_be(52), proof_id) + .unwrap()); + } + + #[test] + fn test_different_blocks_same_slot() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root_a = Hash256::from_low_u64_be(100); + let block_root_b = Hash256::from_low_u64_be(200); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Observe proof for block A + cache.observe_proof(slot, block_root_a, proof_id).unwrap(); + + // Proof for block B should be new (different block_root) + assert_eq!( + cache.observe_proof(slot, block_root_b, proof_id), + Ok(false), + "different block_root should not be duplicate" + ); + + assert!(cache.is_known(slot, block_root_a, proof_id).unwrap()); + assert!(cache.is_known(slot, block_root_b, proof_id).unwrap()); + } + + #[test] + fn test_len_counts_blocks_not_subnets() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root = Hash256::from_low_u64_be(10); + + // Add multiple proof IDs for same block + for i in 0..8 { + let proof_id = ExecutionProofId::new(i).unwrap(); + cache.observe_proof(slot, block_root, proof_id).unwrap(); + } + + // Length should be 1 (one unique (slot, block_root) key) + assert_eq!(cache.len(), 1, "len counts unique keys, not proofIDs"); + } +} From b0f81ed96ba8bf7411f9456e07ce24d775b0dc38 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 11:00:47 +0000 Subject: [PATCH 25/67] Add ExecutionProofByRoots RPC layer --- .../lighthouse_network/src/rpc/codec.rs | 54 ++- .../lighthouse_network/src/rpc/config.rs | 11 + .../lighthouse_network/src/rpc/methods.rs | 89 ++++- .../lighthouse_network/src/rpc/protocol.rs | 42 +- .../src/rpc/rate_limiter.rs | 16 + .../lighthouse_network/tests/rpc_tests.rs | 376 +++++++++++++++++- .../requests/execution_proofs_by_root.rs | 72 ++++ 7 files changed, 641 insertions(+), 19 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index acb01884564..4f31a77ea8e 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -16,12 +16,13 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, - ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, + BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, + ExecutionProof, ForkContext, ForkName, Hash256, + LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, + LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBeaconBlockGloas, }; use unsigned_varint::codec::Uvi; @@ -80,6 +81,7 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -360,6 +362,7 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), + RequestType::ExecutionProofsByRoot(req) => req.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), @@ -568,6 +571,15 @@ fn handle_rpc_request( )?, }, ))), + SupportedProtocol::ExecutionProofsByRootV1 => { + let request = ExecutionProofsByRootRequest::from_ssz_bytes(decoded_buffer) + .map_err(RPCError::SSZDecodeError)?; + + request.validate(spec) + .map_err(|e| RPCError::InvalidData(e))?; + + Ok(Some(RequestType::ExecutionProofsByRoot(request))) + } SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -731,6 +743,11 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::ExecutionProofsByRootV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRoot(Arc::new( + ExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -910,9 +927,9 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, FixedBytesExtended, FullPayload, - KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, - blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, + FixedBytesExtended, FullPayload, KzgCommitment, KzgProof, Signature, + SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; @@ -1107,6 +1124,18 @@ mod tests { .unwrap() } + fn execution_proofs_by_root_request( + _fork_name: ForkName, + _spec: &ChainSpec, + ) -> ExecutionProofsByRootRequest { + ExecutionProofsByRootRequest::new( + Hash256::zero(), + vec![ExecutionProofId::new(0).unwrap()], + 2, + ) + .unwrap() + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -1261,6 +1290,9 @@ mod tests { RequestType::DataColumnsByRange(dcbrange) => { assert_eq!(decoded, RequestType::DataColumnsByRange(dcbrange)) } + RequestType::ExecutionProofsByRoot(exec_proofs) => { + assert_eq!(decoded, RequestType::ExecutionProofsByRoot(exec_proofs)) + } RequestType::Ping(ping) => { assert_eq!(decoded, RequestType::Ping(ping)) } @@ -2002,6 +2034,10 @@ mod tests { RequestType::BlocksByRoot(bbroot_request_v1(fork_name, &chain_spec)), RequestType::BlocksByRoot(bbroot_request_v2(fork_name, &chain_spec)), RequestType::DataColumnsByRoot(dcbroot_request(fork_name, &chain_spec)), + RequestType::ExecutionProofsByRoot(execution_proofs_by_root_request( + fork_name, + &chain_spec, + )), ] }; for fork_name in ForkName::list_all() { diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index b0ee6fea64b..d23c16f8fa1 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -93,6 +93,7 @@ pub struct RateLimiterConfig { pub(super) blobs_by_root_quota: Quota, pub(super) data_columns_by_root_quota: Quota, pub(super) data_columns_by_range_quota: Quota, + pub(super) execution_proofs_by_root_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -122,6 +123,9 @@ impl RateLimiterConfig { Quota::n_every(NonZeroU64::new(16384).unwrap(), 10); pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(NonZeroU64::new(16384).unwrap(), 10); + // TODO(zkproofs): Configure this to be less arbitrary + pub const DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -141,6 +145,7 @@ impl Default for RateLimiterConfig { blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, + execution_proofs_by_root_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -201,6 +206,7 @@ impl FromStr for RateLimiterConfig { let mut blobs_by_root_quota = None; let mut data_columns_by_root_quota = None; let mut data_columns_by_range_quota = None; + let mut execution_proofs_by_root_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -222,6 +228,9 @@ impl FromStr for RateLimiterConfig { Protocol::DataColumnsByRange => { data_columns_by_range_quota = data_columns_by_range_quota.or(quota) } + Protocol::ExecutionProofsByRoot => { + execution_proofs_by_root_quota = execution_proofs_by_root_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -257,6 +266,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA), data_columns_by_range_quota: data_columns_by_range_quota .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), + execution_proofs_by_root_quota: execution_proofs_by_root_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9319973e597..dc7de931b36 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -16,9 +16,9 @@ use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, - ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, - blob_sidecar::BlobSidecar, + ExecutionProof, ExecutionProofId, ForkContext, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, + RuntimeVariableList, SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; /// Maximum length of error message. @@ -540,6 +540,63 @@ impl DataColumnsByRootRequest { } } +/// Request execution proofs by block root and proof IDs. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRootRequest { + /// The block root we need proofs for + pub block_root: Hash256, + /// How many additional proofs we need + /// TODO(zkproofs): Remove. This can be inferred since `MIN_PROOFS_REQUIRED` + /// is a global value + pub count_needed: u64, + /// Proof IDs we already have (responder should exclude these) + pub already_have: Vec, +} + +impl ExecutionProofsByRootRequest { + pub fn new( + block_root: Hash256, + already_have: Vec, + count_needed: usize, + ) -> Result { + if already_have.len() > types::execution_proof::MAX_PROOFS { + return Err("Too many proof IDs in already_have"); + } + if count_needed == 0 { + return Err("count_needed must be > 0"); + } + if count_needed > types::execution_proof::MAX_PROOFS { + return Err("count_needed too large"); + } + Ok(Self { + block_root, + count_needed: count_needed as u64, + already_have, + }) + } + + pub fn validate(&self, _spec: &ChainSpec) -> Result<(), String> { + if self.already_have.len() > types::execution_proof::MAX_PROOFS { + return Err("Too many proof IDs in already_have".to_string()); + } + if self.count_needed == 0 { + return Err("count_needed must be > 0".to_string()); + } + if self.count_needed > types::execution_proof::MAX_PROOFS as u64 { + return Err(format!( + "count_needed too large: {} > {}", + self.count_needed, + types::execution_proof::MAX_PROOFS + )); + } + Ok(()) + } + + pub fn max_requested(&self) -> usize { + self.count_needed as usize + } +} + /// Request a number of beacon data columns from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientUpdatesByRangeRequest { @@ -607,6 +664,9 @@ pub enum RpcSuccessResponse { /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. DataColumnsByRange(Arc>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Arc), + /// A PONG response to a PING request. Pong(Ping), @@ -635,6 +695,9 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + /// Execution proofs by root stream termination. + ExecutionProofsByRoot, + /// Light client updates by range stream termination. LightClientUpdatesByRange, } @@ -648,6 +711,7 @@ impl ResponseTermination { ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::ExecutionProofsByRoot => Protocol::ExecutionProofsByRoot, ResponseTermination::LightClientUpdatesByRange => Protocol::LightClientUpdatesByRange, } } @@ -743,6 +807,7 @@ impl RpcSuccessResponse { RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, + RpcSuccessResponse::ExecutionProofsByRoot(_) => Protocol::ExecutionProofsByRoot, RpcSuccessResponse::Pong(_) => Protocol::Ping, RpcSuccessResponse::MetaData(_) => Protocol::MetaData, RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -767,7 +832,8 @@ impl RpcSuccessResponse { Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), - Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + // TODO(zkproofs): Change this when we add Slot to ExecutionProof + Self::ExecutionProofsByRoot(_) | Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, } } } @@ -827,6 +893,9 @@ impl std::fmt::Display for RpcSuccessResponse { sidecar.slot() ) } + RpcSuccessResponse::ExecutionProofsByRoot(proof) => { + write!(f, "ExecutionProofsByRoot: Block root: {}", proof.block_root) + } RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RpcSuccessResponse::MetaData(metadata) => { write!(f, "Metadata: {}", metadata.seq_number()) @@ -937,3 +1006,15 @@ impl std::fmt::Display for DataColumnsByRootRequest { ) } } + +impl std::fmt::Display for ExecutionProofsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRoot: Block Root: {}, Already Have: {}, Count Needed: {}", + self.block_root, + self.already_have.len(), + self.count_needed + ) + } +} \ No newline at end of file diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 228a74f08cc..4a08d2859e1 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -17,10 +17,11 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, - EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, + EmptyBlock, Epoch, EthSpec, EthSpecId, ExecutionProof, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, + Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -247,6 +248,9 @@ pub enum Protocol { /// The `DataColumnSidecarsByRange` protocol name. #[strum(serialize = "data_column_sidecars_by_range")] DataColumnsByRange, + /// The `ExecutionProofsByRoot` protocol name. + #[strum(serialize = "execution_proofs_by_root")] + ExecutionProofsByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -277,6 +281,7 @@ impl Protocol { Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), + Protocol::ExecutionProofsByRoot => Some(ResponseTermination::ExecutionProofsByRoot), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -307,6 +312,7 @@ pub enum SupportedProtocol { BlobsByRootV1, DataColumnsByRootV1, DataColumnsByRangeV1, + ExecutionProofsByRootV1, PingV1, MetaDataV1, MetaDataV2, @@ -331,6 +337,7 @@ impl SupportedProtocol { SupportedProtocol::BlobsByRootV1 => "1", SupportedProtocol::DataColumnsByRootV1 => "1", SupportedProtocol::DataColumnsByRangeV1 => "1", + SupportedProtocol::ExecutionProofsByRootV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -355,6 +362,7 @@ impl SupportedProtocol { SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, + SupportedProtocol::ExecutionProofsByRootV1 => Protocol::ExecutionProofsByRoot, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -405,6 +413,9 @@ impl SupportedProtocol { ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.spec.is_zkvm_enabled() { + supported.push(ProtocolId::new(SupportedProtocol::ExecutionProofsByRootV1, Encoding::SSZSnappy)); + } supported } } @@ -517,6 +528,7 @@ impl ProtocolId { DataColumnsByRangeRequest::ssz_min_len(), DataColumnsByRangeRequest::ssz_max_len::(), ), + Protocol::ExecutionProofsByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -553,6 +565,7 @@ impl ProtocolId { Protocol::DataColumnsByRange => { rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } + Protocol::ExecutionProofsByRoot => rpc_execution_proof_limits(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -594,6 +607,7 @@ impl ProtocolId { | SupportedProtocol::StatusV2 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 + | SupportedProtocol::ExecutionProofsByRootV1 | SupportedProtocol::PingV1 | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 @@ -643,6 +657,14 @@ pub fn rpc_data_column_limits( ) } +pub fn rpc_execution_proof_limits() -> RpcLimits { + // TODO(zkproofs): Can max proof size change over hardforks? + RpcLimits::new( + ExecutionProof::min_size(), + ExecutionProof::max_size(), + ) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -722,6 +744,7 @@ pub enum RequestType { BlobsByRoot(BlobsByRootRequest), DataColumnsByRoot(DataColumnsByRootRequest), DataColumnsByRange(DataColumnsByRangeRequest), + ExecutionProofsByRoot(ExecutionProofsByRootRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -745,6 +768,7 @@ impl RequestType { RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::ExecutionProofsByRoot(req) => req.max_requested() as u64, RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -774,6 +798,7 @@ impl RequestType { RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, + RequestType::ExecutionProofsByRoot(_) => SupportedProtocol::ExecutionProofsByRootV1, RequestType::Ping(_) => SupportedProtocol::PingV1, RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -805,6 +830,7 @@ impl RequestType { RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::ExecutionProofsByRoot(_) => ResponseTermination::ExecutionProofsByRoot, RequestType::Status(_) => unreachable!(), RequestType::Goodbye(_) => unreachable!(), RequestType::Ping(_) => unreachable!(), @@ -851,6 +877,10 @@ impl RequestType { SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy, )], + RequestType::ExecutionProofsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )], RequestType::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -889,6 +919,7 @@ impl RequestType { RequestType::BlobsByRoot(_) => false, RequestType::DataColumnsByRoot(_) => false, RequestType::DataColumnsByRange(_) => false, + RequestType::ExecutionProofsByRoot(_) => false, RequestType::Ping(_) => true, RequestType::MetaData(_) => true, RequestType::LightClientBootstrap(_) => true, @@ -1002,6 +1033,9 @@ impl std::fmt::Display for RequestType { RequestType::DataColumnsByRange(req) => { write!(f, "Data columns by range: {:?}", req) } + RequestType::ExecutionProofsByRoot(req) => { + write!(f, "Execution proofs by root: {:?}", req) + } RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), RequestType::MetaData(_) => write!(f, "MetaData request"), RequestType::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 8b364f506cc..46a26c43630 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -105,6 +105,8 @@ pub struct RPCRateLimiter { dcbroot_rl: Limiter, /// DataColumnsByRange rate limiter. dcbrange_rl: Limiter, + /// ExecutionProofsByRoot rate limiter. + execution_proofs_by_root_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -148,6 +150,8 @@ pub struct RPCRateLimiterBuilder { dcbroot_quota: Option, /// Quota for the DataColumnsByRange protocol. dcbrange_quota: Option, + /// Quota for the ExecutionProofsByRoot protocol. + execution_proofs_by_root_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -173,6 +177,7 @@ impl RPCRateLimiterBuilder { Protocol::BlobsByRoot => self.blbroot_quota = q, Protocol::DataColumnsByRoot => self.dcbroot_quota = q, Protocol::DataColumnsByRange => self.dcbrange_quota = q, + Protocol::ExecutionProofsByRoot => self.execution_proofs_by_root_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -221,6 +226,10 @@ impl RPCRateLimiterBuilder { .dcbrange_quota .ok_or("DataColumnsByRange quota not specified")?; + let execution_proofs_by_root_quota = self + .execution_proofs_by_root_quota + .ok_or("ExecutionProofsByRoot quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -232,6 +241,7 @@ impl RPCRateLimiterBuilder { let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; + let execution_proofs_by_root_rl = Limiter::from_quota(execution_proofs_by_root_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -255,6 +265,7 @@ impl RPCRateLimiterBuilder { blbroot_rl, dcbroot_rl, dcbrange_rl, + execution_proofs_by_root_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -308,6 +319,7 @@ impl RPCRateLimiter { blobs_by_root_quota, data_columns_by_root_quota, data_columns_by_range_quota, + execution_proofs_by_root_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -325,6 +337,7 @@ impl RPCRateLimiter { .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) + .set_quota(Protocol::ExecutionProofsByRoot, execution_proofs_by_root_quota) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -372,6 +385,7 @@ impl RPCRateLimiter { Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, Protocol::DataColumnsByRange => &mut self.dcbrange_rl, + Protocol::ExecutionProofsByRoot => &mut self.execution_proofs_by_root_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, @@ -396,6 +410,7 @@ impl RPCRateLimiter { blbroot_rl, dcbroot_rl, dcbrange_rl, + execution_proofs_by_root_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -413,6 +428,7 @@ impl RPCRateLimiter { blbroot_rl.prune(time_since_start); dcbrange_rl.prune(time_since_start); dcbroot_rl.prune(time_since_start); + execution_proofs_by_root_rl.prune(time_since_start); lc_bootstrap_rl.prune(time_since_start); lc_optimistic_update_rl.prune(time_since_start); lc_finality_update_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index e37f4131a76..4bf70542b6b 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -17,8 +17,9 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, - RuntimeVariableList, Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, FixedBytesExtended, + ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, RuntimeVariableList, Signature, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; @@ -1732,3 +1733,374 @@ fn test_active_requests() { } }) } + +// Tests ExecutionProofsByRoot RPC - basic single proof request +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_single() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Fulu; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from_root(Hash256::random()); + let subnet_id = ExecutionProofId::new(0).unwrap(); + + // ExecutionProofsByRoot Request + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new( + block_root, + vec![], // No proofs already have + 2, // Request 2 proofs + ) + .unwrap(), + ); + + // ExecutionProofsByRoot Response + let proof = Arc::new( + ExecutionProof::new( + subnet_id, + Slot::new(100), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ); + let rpc_response = Response::ExecutionProofsByRoot(Some(proof.clone())); + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(received_proof)) => { + debug!("Proof received"); + assert_eq!(received_proof.block_root, block_root); + assert_eq!(received_proof.block_hash, block_hash); + assert_eq!(received_proof.proof_id, subnet_id); + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated"); + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send the proof + receiver.send_response(peer_id, inbound_request_id, rpc_response.clone()); + // Send stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent proof and termination"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests ExecutionProofsByRoot RPC - multiple proofs chunked response +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_chunked() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Deneb; + + let messages_to_send = 3; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from_root(Hash256::random()); + let proof_ids = vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ExecutionProofId::new(2).unwrap(), + ]; + assert_eq!(proof_ids.len(), messages_to_send); + + // ExecutionProofsByRoot Request for multiple proofs + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new( + block_root, + vec![], + proof_ids.len(), + ) + .unwrap(), + ); + + // Create proofs for each proof ID + let proofs: Vec> = proof_ids + .iter() + .map(|subnet_id| { + Arc::new( + ExecutionProof::new( + *subnet_id, + Slot::new(100), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ) + }) + .collect(); + + let mut messages_received = 0; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(received_proof)) => { + debug!("Chunk received"); + assert_eq!(received_proof.block_root, block_root); + assert_eq!(received_proof.block_hash, block_hash); + messages_received += 1; + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated"); + assert_eq!(messages_received, messages_to_send); + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send all proofs + for proof in &proofs { + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(Some(proof.clone())), + ); + debug!("Sent proof chunk"); + } + // Send stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent termination"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests ExecutionProofsByRoot RPC - empty response (peer has no proofs) +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_empty_response() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Fulu; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new( + block_root, + vec![], + 2, + ) + .unwrap(), + ); + + let mut received_termination = false; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(_)) => { + panic!("Should not receive any proofs in empty response test"); + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated (empty response)"); + received_termination = true; + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send only stream termination (no proofs) + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent empty response (termination only)"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => { + assert!(received_termination, "Should have received stream termination"); + } + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs new file mode 100644 index 00000000000..c3fad2cb0ac --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs @@ -0,0 +1,72 @@ +use lighthouse_network::rpc::methods::ExecutionProofsByRootRequest; +use std::sync::Arc; +use types::{EthSpec, ExecutionProof, ExecutionProofId, Hash256}; + +use super::{ActiveRequestItems, LookupVerifyError}; + +#[derive(Debug, Clone)] +pub struct ExecutionProofsByRootSingleBlockRequest { + pub block_root: Hash256, + pub already_have: Vec, + pub count_needed: usize, +} + +impl ExecutionProofsByRootSingleBlockRequest { + pub fn into_request(self) -> Result { + ExecutionProofsByRootRequest::new( + self.block_root, + self.already_have, + self.count_needed, + ) + .map_err(|e| e.to_string()) + } +} + +pub struct ExecutionProofsByRootRequestItems { + request: ExecutionProofsByRootSingleBlockRequest, + items: Vec>, + _phantom: std::marker::PhantomData, +} + +impl ExecutionProofsByRootRequestItems { + pub fn new(request: ExecutionProofsByRootSingleBlockRequest) -> Self { + Self { + request, + items: vec![], + _phantom: std::marker::PhantomData, + } + } +} + +impl ActiveRequestItems for ExecutionProofsByRootRequestItems { + type Item = Arc; + + /// Appends a proof to this multi-item request. + /// Note: This is very similar to `DataColumnsByRootSingleBlockRequest` + fn add(&mut self, proof: Self::Item) -> Result { + let block_root = proof.block_root; + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + + // Verify proof is not in the already_have list + // We should not receive proofs we already have + if self.request.already_have.contains(&proof.proof_id) { + return Err(LookupVerifyError::UnrequestedProof(proof.proof_id)); + } + + // Check for duplicate proof IDs + if self.items.iter().any(|p| p.proof_id == proof.proof_id) { + return Err(LookupVerifyError::DuplicatedProofIDs(proof.proof_id)); + } + + self.items.push(proof); + + // We've received all requested proofs when we have count_needed proofs + Ok(self.items.len() >= self.request.count_needed) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} \ No newline at end of file From 665b485b1af46a1de92207b48a62d0cabdee5acd Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 11:02:18 +0000 Subject: [PATCH 26/67] commit debug tests --- .../requests/execution_proofs_by_root.rs | 143 ++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs index c3fad2cb0ac..f7b2cfc56a9 100644 --- a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs @@ -69,4 +69,147 @@ impl ActiveRequestItems for ExecutionProofsByRootRequestItems { fn consume(&mut self) -> Vec { std::mem::take(&mut self.items) } +} + + +#[cfg(test)] +mod tests { + use super::*; + use types::{ExecutionBlockHash, Hash256, MinimalEthSpec as E}; + + fn make_proof( + block_root: Hash256, + subnet_id: u8, + block_hash: ExecutionBlockHash, + ) -> Arc { + Arc::new( + ExecutionProof::new( + ExecutionProofId::new(subnet_id).unwrap(), + types::Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ) + } + + #[test] + fn test_add_proof_success() { + let block_root = Hash256::random(); + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: vec![], + count_needed: 2, + }; + + let mut items = ExecutionProofsByRootRequestItems::::new(request); + let proof = make_proof(block_root, 0, ExecutionBlockHash::zero()); + + let result = items.add(proof); + assert!(result.is_ok()); + assert!(!result.unwrap()); // Not complete yet (need 2) + } + + #[test] + fn test_add_proof_wrong_block_root() { + let block_root = Hash256::random(); + let wrong_root = Hash256::random(); + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: vec![], + count_needed: 1, + }; + + let mut items = ExecutionProofsByRootRequestItems::::new(request); + let proof = make_proof(wrong_root, 0, ExecutionBlockHash::zero()); + + let result = items.add(proof); + assert!(matches!( + result, + Err(LookupVerifyError::UnrequestedBlockRoot(_)) + )); + } + + #[test] + fn test_add_proof_already_have() { + let block_root = Hash256::random(); + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: vec![ExecutionProofId::new(0).unwrap()], + count_needed: 2, + }; + + let mut items = ExecutionProofsByRootRequestItems::::new(request); + let proof = make_proof(block_root, 0, ExecutionBlockHash::zero()); // proof 0 in already_have + + let result = items.add(proof); + assert!(matches!( + result, + Err(LookupVerifyError::UnrequestedProof(_)) + )); + } + + #[test] + fn test_add_duplicate_subnet() { + let block_root = Hash256::random(); + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: vec![], + count_needed: 1, + }; + + let mut items = ExecutionProofsByRootRequestItems::::new(request); + let proof1 = make_proof(block_root, 0, ExecutionBlockHash::zero()); + let proof2 = make_proof(block_root, 0, ExecutionBlockHash::zero()); + + assert!(items.add(proof1).is_ok()); + let result = items.add(proof2); + assert!(matches!( + result, + Err(LookupVerifyError::DuplicatedProofIDs(_)) + )); + } + + #[test] + fn test_complete_when_count_reached() { + let block_root = Hash256::random(); + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: vec![], + count_needed: 2, + }; + + let mut items = ExecutionProofsByRootRequestItems::::new(request); + let proof1 = make_proof(block_root, 0, ExecutionBlockHash::zero()); + let proof2 = make_proof(block_root, 1, ExecutionBlockHash::zero()); + + assert!(!items.add(proof1).unwrap()); // Not complete + assert!(items.add(proof2).unwrap()); // Complete! + + let received = items.consume(); + assert_eq!(received.len(), 2); + } + + #[test] + fn test_already_have_excludes_proofs() { + let block_root = Hash256::random(); + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: vec![ExecutionProofId::new(0).unwrap(), ExecutionProofId::new(1).unwrap()], + count_needed: 2, + }; + + let mut items = ExecutionProofsByRootRequestItems::::new(request); + + // Should accept proofs not in already_have + let proof2 = make_proof(block_root, 2, ExecutionBlockHash::zero()); + let proof3 = make_proof(block_root, 3, ExecutionBlockHash::zero()); + + assert!(!items.add(proof2).unwrap()); // Not complete + assert!(items.add(proof3).unwrap()); // Complete! + + let received = items.consume(); + assert_eq!(received.len(), 2); + } } \ No newline at end of file From 9d5aaccd38d13d4f0b7dddce6cac6e8d8e21c1d8 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 11:02:47 +0000 Subject: [PATCH 27/67] remove debug tests --- .../requests/execution_proofs_by_root.rs | 143 ------------------ 1 file changed, 143 deletions(-) diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs index f7b2cfc56a9..c3fad2cb0ac 100644 --- a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs @@ -69,147 +69,4 @@ impl ActiveRequestItems for ExecutionProofsByRootRequestItems { fn consume(&mut self) -> Vec { std::mem::take(&mut self.items) } -} - - -#[cfg(test)] -mod tests { - use super::*; - use types::{ExecutionBlockHash, Hash256, MinimalEthSpec as E}; - - fn make_proof( - block_root: Hash256, - subnet_id: u8, - block_hash: ExecutionBlockHash, - ) -> Arc { - Arc::new( - ExecutionProof::new( - ExecutionProofId::new(subnet_id).unwrap(), - types::Slot::new(0), - block_hash, - block_root, - vec![1, 2, 3], - ) - .unwrap(), - ) - } - - #[test] - fn test_add_proof_success() { - let block_root = Hash256::random(); - let request = ExecutionProofsByRootSingleBlockRequest { - block_root, - already_have: vec![], - count_needed: 2, - }; - - let mut items = ExecutionProofsByRootRequestItems::::new(request); - let proof = make_proof(block_root, 0, ExecutionBlockHash::zero()); - - let result = items.add(proof); - assert!(result.is_ok()); - assert!(!result.unwrap()); // Not complete yet (need 2) - } - - #[test] - fn test_add_proof_wrong_block_root() { - let block_root = Hash256::random(); - let wrong_root = Hash256::random(); - let request = ExecutionProofsByRootSingleBlockRequest { - block_root, - already_have: vec![], - count_needed: 1, - }; - - let mut items = ExecutionProofsByRootRequestItems::::new(request); - let proof = make_proof(wrong_root, 0, ExecutionBlockHash::zero()); - - let result = items.add(proof); - assert!(matches!( - result, - Err(LookupVerifyError::UnrequestedBlockRoot(_)) - )); - } - - #[test] - fn test_add_proof_already_have() { - let block_root = Hash256::random(); - let request = ExecutionProofsByRootSingleBlockRequest { - block_root, - already_have: vec![ExecutionProofId::new(0).unwrap()], - count_needed: 2, - }; - - let mut items = ExecutionProofsByRootRequestItems::::new(request); - let proof = make_proof(block_root, 0, ExecutionBlockHash::zero()); // proof 0 in already_have - - let result = items.add(proof); - assert!(matches!( - result, - Err(LookupVerifyError::UnrequestedProof(_)) - )); - } - - #[test] - fn test_add_duplicate_subnet() { - let block_root = Hash256::random(); - let request = ExecutionProofsByRootSingleBlockRequest { - block_root, - already_have: vec![], - count_needed: 1, - }; - - let mut items = ExecutionProofsByRootRequestItems::::new(request); - let proof1 = make_proof(block_root, 0, ExecutionBlockHash::zero()); - let proof2 = make_proof(block_root, 0, ExecutionBlockHash::zero()); - - assert!(items.add(proof1).is_ok()); - let result = items.add(proof2); - assert!(matches!( - result, - Err(LookupVerifyError::DuplicatedProofIDs(_)) - )); - } - - #[test] - fn test_complete_when_count_reached() { - let block_root = Hash256::random(); - let request = ExecutionProofsByRootSingleBlockRequest { - block_root, - already_have: vec![], - count_needed: 2, - }; - - let mut items = ExecutionProofsByRootRequestItems::::new(request); - let proof1 = make_proof(block_root, 0, ExecutionBlockHash::zero()); - let proof2 = make_proof(block_root, 1, ExecutionBlockHash::zero()); - - assert!(!items.add(proof1).unwrap()); // Not complete - assert!(items.add(proof2).unwrap()); // Complete! - - let received = items.consume(); - assert_eq!(received.len(), 2); - } - - #[test] - fn test_already_have_excludes_proofs() { - let block_root = Hash256::random(); - let request = ExecutionProofsByRootSingleBlockRequest { - block_root, - already_have: vec![ExecutionProofId::new(0).unwrap(), ExecutionProofId::new(1).unwrap()], - count_needed: 2, - }; - - let mut items = ExecutionProofsByRootRequestItems::::new(request); - - // Should accept proofs not in already_have - let proof2 = make_proof(block_root, 2, ExecutionBlockHash::zero()); - let proof3 = make_proof(block_root, 3, ExecutionBlockHash::zero()); - - assert!(!items.add(proof2).unwrap()); // Not complete - assert!(items.add(proof3).unwrap()); // Complete! - - let received = items.consume(); - assert_eq!(received.len(), 2); - } } \ No newline at end of file From 4b9802e99c55f02a3b1f0d73f13e725406717c65 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 12:22:20 +0000 Subject: [PATCH 28/67] commit GossipTopics/Discovery/PeerManagement --- .../lighthouse_network/src/discovery/enr.rs | 21 ++++++- .../lighthouse_network/src/discovery/mod.rs | 12 +++- .../src/discovery/subnet_predicate.rs | 4 ++ .../src/peer_manager/config.rs | 3 + .../src/peer_manager/mod.rs | 56 ++++++++++++++++++- .../src/peer_manager/peerdb/peer_info.rs | 13 +++++ .../src/service/api_types.rs | 10 +++- .../src/service/gossip_cache.rs | 13 +++++ .../lighthouse_network/src/service/mod.rs | 21 +++++++ .../lighthouse_network/src/service/utils.rs | 2 + .../lighthouse_network/src/types/pubsub.rs | 29 +++++++++- .../lighthouse_network/src/types/subnet.rs | 7 +++ .../lighthouse_network/src/types/topics.rs | 18 +++++- .../lighthouse_network/tests/common.rs | 2 + 14 files changed, 204 insertions(+), 7 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index bb9ff299c5d..2b6a519c2b9 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -29,6 +29,8 @@ pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// The ENR field specifying the peerdas custody group count. pub const PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY: &str = "cgc"; +/// The ENR field specifying whether zkVM execution proofs are enabled. +pub const ZKVM_ENABLED_ENR_KEY: &str = "zkvm"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -43,6 +45,9 @@ pub trait Eth2Enr { /// The peerdas custody group count associated with the ENR. fn custody_group_count(&self, spec: &ChainSpec) -> Result; + /// Whether zkVM execution proofs are enabled for this node. + fn zkvm_enabled(&self) -> bool; + /// The next fork digest associated with the ENR. fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; @@ -85,6 +90,13 @@ impl Eth2Enr for Enr { } } + fn zkvm_enabled(&self) -> bool { + // If the key exists and is true, zkVM is enabled, otherwise false + self.get_decodable::(ZKVM_ENABLED_ENR_KEY) + .and_then(|result| result.ok()) + .unwrap_or(false) + } + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) .ok_or("ENR next fork digest non-existent")? @@ -278,6 +290,10 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); + if spec.is_zkvm_enabled() { + builder.add_value(ZKVM_ENABLED_ENR_KEY, &true); + } + // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { let custody_group_count = if let Some(cgc) = custody_group_count { @@ -317,11 +333,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and - // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will - // likely only be true for non-validating nodes. + // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY and ZKVM_ENABLED_ENR_KEY key to match, + // otherwise we use a new ENR. This will likely only be true for non-validating nodes. && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) && local_enr.get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) + && local_enr.get_decodable::(ZKVM_ENABLED_ENR_KEY) == disk_enr.get_decodable(ZKVM_ENABLED_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 49de62546df..f5662ca0a34 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -15,7 +15,10 @@ pub use libp2p::identity::{Keypair, PublicKey}; use network_utils::enr_ext::{CombinedKeyExt, EnrExt, peer_id_to_node_id}; use alloy_rlp::bytes::Bytes; -use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; +use enr::{ + ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, + SYNC_COMMITTEE_BITFIELD_ENR_KEY, ZKVM_ENABLED_ENR_KEY, +}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::core::transport::PortUse; @@ -560,6 +563,12 @@ impl Discovery { } // Data column subnets are computed from node ID. No subnet bitfield in the ENR. Subnet::DataColumn(_) => return Ok(()), + // Execution proof uses a simple boolean flag in the ENR + Subnet::ExecutionProof => { + self.discv5 + .enr_insert(ZKVM_ENABLED_ENR_KEY, &value) + .map_err(|e| format!("{:?}", e))?; + } } // replace the global version @@ -904,6 +913,7 @@ impl Discovery { Subnet::Attestation(_) => "attestation", Subnet::SyncCommittee(_) => "sync_committee", Subnet::DataColumn(_) => "data_column", + Subnet::ExecutionProof => "execution_proof", }; if let Some(v) = metrics::get_int_counter( diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 6e841c25a50..dc1ac54e97b 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -41,6 +41,10 @@ where false } } + Subnet::ExecutionProof => { + // Check if ENR advertises zkVM support + enr.zkvm_enabled() + } }); if !predicate { diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index b2ed6524861..fc77171cee7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -20,6 +20,8 @@ pub struct Config { pub metrics_enabled: bool, /// Whether quic is enabled. pub quic_enabled: bool, + /// Whether execution proofs are enabled. + pub execution_proof_enabled : bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -40,6 +42,7 @@ impl Default for Config { discovery_enabled: true, metrics_enabled: false, quic_enabled: true, + execution_proof_enabled: false, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ad16bb0421c..ddcf27b35db 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,7 +1,7 @@ //! Implementation of Lighthouse's peer management system. use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; -use crate::service::TARGET_SUBNET_PEERS; +use crate::service::{TARGET_EXECUTION_PROOF_PEERS, TARGET_SUBNET_PEERS}; use crate::{Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery, metrics}; use delay_map::HashSetDelay; use discv5::Enr; @@ -113,6 +113,8 @@ pub struct PeerManager { /// discovery queries for subnet peers if we disconnect from existing sync /// committee subnet peers. sync_committee_subnets: HashMap, + /// Keeps track of whether this node has zkVM execution proof support enabled. + execution_proof_enabled: bool, /// A mapping of all custody groups to column subnets to avoid re-computation. subnets_by_custody_group: HashMap>, /// The heartbeat interval to perform routine maintenance. @@ -162,6 +164,7 @@ impl PeerManager { let config::Config { discovery_enabled, metrics_enabled, + execution_proof_enabled, target_peer_count, status_interval, ping_interval_inbound, @@ -199,6 +202,7 @@ impl PeerManager { target_peers: target_peer_count, temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), + execution_proof_enabled, subnets_by_custody_group, heartbeat, discovery_enabled, @@ -601,6 +605,7 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -621,6 +626,7 @@ impl PeerManager { Protocol::BlobsByRoot => return, Protocol::DataColumnsByRoot => return, Protocol::DataColumnsByRange => return, + Protocol::ExecutionProofsByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -644,6 +650,7 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, @@ -1004,6 +1011,46 @@ impl PeerManager { } } + /// Run discovery query for zkVM-enabled peers if we fall below `TARGET_EXECUTION_PROOF_PEERS`. + fn maintain_execution_proof_peers(&mut self) { + // Only maintain peers if zkVM is enabled + if !self.execution_proof_enabled { + return; + } + + // Check if we have enough zkVM-enabled peers + // Count peers subscribed to the execution_proof gossip topic + // TODO(zkproofs): Note that since peers do not advertise whether + // they are proof generating, we cannot favour them. This is + // fine for optional proofs and mandatory proofs will imply + // that the builder who is well connected will propagate it + // to most of the network. + let zkvm_peer_count = self + .network_globals + .peers + .read() + .connected_peers() + .filter(|(_, info)| { + // Check if peer is subscribed to ExecutionProof gossip topic + info.on_subnet_gossipsub(&Subnet::ExecutionProof) + }) + .count(); + + if zkvm_peer_count < TARGET_EXECUTION_PROOF_PEERS { + debug!( + current_peers = zkvm_peer_count, + target = TARGET_EXECUTION_PROOF_PEERS, + "Making discovery query for zkVM-enabled peers" + ); + self.events.push(PeerManagerEvent::DiscoverSubnetPeers(vec![ + SubnetDiscovery { + subnet: Subnet::ExecutionProof, + min_ttl: None, + }, + ])); + } + } + fn maintain_trusted_peers(&mut self) { let trusted_peers = self.trusted_peers.clone(); for trusted_peer in trusted_peers { @@ -1081,6 +1128,10 @@ impl PeerManager { Subnet::DataColumn(id) => { peer_info.custody_subnets.insert(id); } + Subnet::ExecutionProof => { + // ExecutionProof uses a single topic, not subnet-based + // So there is no subnet assignment to track + } } } @@ -1449,6 +1500,9 @@ impl PeerManager { // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); + // Maintain minimum count for zkVM-enabled peers (if zkVM is enabled). + self.maintain_execution_proof_peers(); + // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index c289cb9a69c..f47a34f069b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -105,6 +105,14 @@ impl PeerInfo { Subnet::DataColumn(subnet_id) => { return self.is_assigned_to_custody_subnet(subnet_id); } + Subnet::ExecutionProof => { + // ExecutionProof capability is advertised via ENR zkvm flag, not metadata + // A node cannot dynamically change what the support. + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled(); + } + return false; + } } } false @@ -272,6 +280,11 @@ impl PeerInfo { return true; } + // Check if the peer has zkVM enabled (execution proof support) + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled() + } + false } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 0f5fd99c279..1f403dec9ab 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,7 +3,7 @@ use libp2p::PeerId; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ - BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ExecutionProof, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; @@ -22,6 +22,8 @@ pub enum SyncRequestId { SingleBlock { id: SingleLookupReqId }, /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, + /// Request searching for execution proofs given a block hash and proof IDs. + SingleExecutionProof { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. DataColumnsByRoot(DataColumnsByRootRequestId), /// Blocks by range request @@ -140,6 +142,8 @@ pub enum Response { BlobsByRoot(Option>>), /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. DataColumnsByRoot(Option>>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Option>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -177,6 +181,10 @@ impl std::convert::From> for RpcResponse { Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(d)), None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, + Response::ExecutionProofsByRoot(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRoot(p)), + None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRoot), + }, Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 120b9e6c245..227317f79ea 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -44,6 +44,8 @@ pub struct GossipCache { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// Timeout for execution proofs. + execution_proof: Option, } #[derive(Default)] @@ -75,6 +77,8 @@ pub struct GossipCacheBuilder { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// Timeout for execution proofs. + execution_proof: Option, } #[allow(dead_code)] @@ -151,6 +155,12 @@ impl GossipCacheBuilder { self } + /// Timeout for execution proof messages. + pub fn execution_proof_timeout(mut self, timeout: Duration) -> Self { + self.execution_proof = Some(timeout); + self + } + pub fn build(self) -> GossipCache { let GossipCacheBuilder { default_timeout, @@ -167,6 +177,7 @@ impl GossipCacheBuilder { bls_to_execution_change, light_client_finality_update, light_client_optimistic_update, + execution_proof, } = self; GossipCache { expirations: DelayQueue::default(), @@ -184,6 +195,7 @@ impl GossipCacheBuilder { bls_to_execution_change: bls_to_execution_change.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), + execution_proof: execution_proof.or(default_timeout), } } } @@ -211,6 +223,7 @@ impl GossipCache { GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, + GossipKind::ExecutionProof => self.execution_proof, }; let Some(expire_timeout) = expire_timeout else { return; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea2c53a07fe..b8fd30f66fa 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -52,6 +52,10 @@ pub mod utils; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 3; +/// The number of peers we target for execution proof peer discovery. +/// Set to 1 since we don't expect many nodes to run it +pub const TARGET_EXECUTION_PROOF_PEERS: usize = 1; + const MAX_IDENTIFY_ADDRESSES: usize = 10; /// The types of events than can be obtained from polling the behaviour. @@ -255,6 +259,7 @@ impl Network { // .signed_contribution_and_proof_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry .bls_to_execution_change_timeout(half_epoch * 2) + .execution_proof_timeout(slot_duration) .build() }; @@ -411,6 +416,7 @@ impl Network { quic_enabled: !config.disable_quic_support, metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, + execution_proof_enabled: ctx.chain_spec.is_zkvm_enabled(), ..Default::default() }; PeerManager::new(peer_manager_cfg, network_globals.clone())? @@ -1563,6 +1569,17 @@ impl Network { request_type, }) } + RequestType::ExecutionProofsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_root"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } RequestType::LightClientBootstrap(_) => { metrics::inc_counter_vec( &metrics::TOTAL_RPC_REQUESTS, @@ -1648,6 +1665,9 @@ impl Network { RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } + RpcSuccessResponse::ExecutionProofsByRoot(resp) => { + self.build_response(id, peer_id, Response::ExecutionProofsByRoot(Some(resp))) + } // Should never be reached RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1677,6 +1697,7 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::ExecutionProofsByRoot => Response::ExecutionProofsByRoot(None), ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index a0026837e37..8851fb39153 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -288,6 +288,8 @@ pub(crate) fn create_whitelist_filter( for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } + // Add ExecutionProof topic + add(ExecutionProof); } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 72f2873def9..1cd46a2a723 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -8,7 +8,7 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, @@ -26,6 +26,8 @@ pub enum PubsubMessage { BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a [`DataColumnSidecar`] along with the subnet id where it was received. DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), + /// Gossipsub message providing notification of an [`ExecutionProof`]. + ExecutionProof(Arc), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a `SingleAttestation` with its subnet id. @@ -135,6 +137,7 @@ impl PubsubMessage { PubsubMessage::DataColumnSidecar(column_sidecar_data) => { GossipKind::DataColumnSidecar(column_sidecar_data.0) } + PubsubMessage::ExecutionProof(_) => GossipKind::ExecutionProof, PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -290,6 +293,23 @@ impl PubsubMessage { )), } } + GossipKind::ExecutionProof => { + match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { + // TODO(zkproofs): we don't have the ChainSpec here, so if we change this to + // be for gloas, then we should change it here too + Some(fork) if fork.fulu_enabled() => { + let execution_proof = Arc::new( + ExecutionProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::ExecutionProof(execution_proof)) + } + Some(_) | None => Err(format!( + "execution_proof topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )), + } + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -403,6 +423,7 @@ impl PubsubMessage { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::DataColumnSidecar(data) => data.1.as_ssz_bytes(), + PubsubMessage::ExecutionProof(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -438,6 +459,12 @@ impl std::fmt::Display for PubsubMessage { data.1.slot(), data.1.index, ), + PubsubMessage::ExecutionProof(data) => write!( + f, + "ExecutionProof: block_root: {}, proof_id: {}", + data.block_root, + data.proof_id.as_u8(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 1892dcc83af..43b59b12273 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -14,6 +14,13 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), + /// Represents execution proof support. + // + /// Note: ExecutionProof uses a single gossip topic (not multiple topics), + /// but we track it here for ENR-based peer discovery to find zkVM-enabled peers. + /// TODO(zkproofs): Is there a way to have peer discovery without adding the global topic + /// into Subnet? + ExecutionProof, } /// A subnet to discover peers on along with the instant after which it's no longer useful. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index b22adfbc487..8230f91c0b1 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,7 +2,9 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; +use types::{ + ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned, +}; use crate::Subnet; @@ -16,6 +18,7 @@ pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const DATA_COLUMN_SIDECAR_PREFIX: &str = "data_column_sidecar_"; +pub const EXECUTION_PROOF_TOPIC: &str = "execution_proof"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -91,6 +94,14 @@ pub fn core_topics_to_subscribe( } } + // Subscribe to execution proof topic if zkVM mode is enabled for this fork. + // TODO(zkproofs): this looks different than the other checks because + // there is no official zkvm_fork and we enable this alongside a current fork + let zkvm_check = spec.is_zkvm_enabled_for_fork(fork_name); + if zkvm_check { + topics.push(GossipKind::ExecutionProof); + } + topics } @@ -109,6 +120,7 @@ pub fn is_fork_non_core_topic(topic: &GossipTopic, _fork_name: ForkName) -> bool | GossipKind::BeaconAggregateAndProof | GossipKind::BlobSidecar(_) | GossipKind::DataColumnSidecar(_) + | GossipKind::ExecutionProof | GossipKind::VoluntaryExit | GossipKind::ProposerSlashing | GossipKind::AttesterSlashing @@ -156,6 +168,8 @@ pub enum GossipKind { BlobSidecar(u64), /// Topic for publishing DataColumnSidecars. DataColumnSidecar(DataColumnSubnetId), + /// Topic for publishing ExecutionProofs + ExecutionProof, /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -320,6 +334,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::DataColumnSidecar(column_subnet_id) => { format!("{}{}", DATA_COLUMN_SIDECAR_PREFIX, *column_subnet_id) } + GossipKind::ExecutionProof => EXECUTION_PROOF_TOPIC.into(), GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), @@ -341,6 +356,7 @@ impl From for GossipKind { Subnet::Attestation(s) => GossipKind::Attestation(s), Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), Subnet::DataColumn(s) => GossipKind::DataColumnSidecar(s), + Subnet::ExecutionProof => GossipKind::ExecutionProof, } } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 8a3047692f3..50ecc7d7d99 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -29,6 +29,8 @@ pub fn spec_with_all_forks_enabled() -> ChainSpec { chain_spec.electra_fork_epoch = Some(Epoch::new(5)); chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); chain_spec.gloas_fork_epoch = Some(Epoch::new(7)); + // Enable zkVM + chain_spec.zkvm_enabled = true; // check that we have all forks covered assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); From 1bcb67418f4d27b43dd5a9a7e4c8c8b9f6013fc9 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 15:05:42 +0000 Subject: [PATCH 29/67] Add proof available checking logic and has_execution_layer_and_proof_gen flag --- beacon_node/beacon_chain/src/builder.rs | 43 ++- .../src/data_availability_checker.rs | 272 ++++++++++++++++-- .../src/data_availability_checker/error.rs | 32 ++- .../overflow_lru_cache.rs | 54 ++-- 4 files changed, 353 insertions(+), 48 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 7523b3a4bff..05a476cdbd9 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -2,6 +2,7 @@ use crate::ChainConfig; use crate::CustodyContext; use crate::beacon_chain::{ BEACON_CHAIN_DB_KEY, CanonicalHead, LightClientProducerEvent, OP_POOL_DB_KEY, + ProofGenerationEvent, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; @@ -12,6 +13,7 @@ use crate::kzg_utils::build_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; +use crate::observed_execution_proofs::ObservedExecutionProofs; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::load_custody_context; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -38,6 +40,7 @@ use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, error, info}; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, @@ -108,6 +111,10 @@ pub struct BeaconChainBuilder { /// be replaced with ZkVmEngineApi from zkvm_execution_layer. This would allow the /// --execution-endpoint CLI flag to be optional when running in ZK-VM mode. zkvm_execution_layer_config: Option, + /// Registry of zkVM proof generators for currently altruistic proof generation + zkvm_generator_registry: Option>, + /// Sender to notify proof generation service of blocks needing proofs + proof_generation_tx: Option>>, } impl @@ -148,6 +155,8 @@ where import_all_data_columns: false, rng: None, zkvm_execution_layer_config: None, + zkvm_generator_registry: None, + proof_generation_tx: None, } } @@ -698,6 +707,24 @@ where self } + /// Sets the zkVM generator registry for altruistic proof generation. + pub fn zkvm_generator_registry( + mut self, + registry: Arc, + ) -> Self { + self.zkvm_generator_registry = Some(registry); + self + } + + /// Sets a `Sender` to notify the proof generation service of new blocks. + pub fn proof_generation_tx( + mut self, + sender: UnboundedSender>, + ) -> Self { + self.proof_generation_tx = Some(sender); + self + } + /// Creates a new, empty operation pool. fn empty_op_pool(mut self) -> Self { self.op_pool = Some(OperationPool::new()); @@ -959,6 +986,9 @@ where }; debug!(?custody_context, "Loading persisted custody context"); + let has_execution_layer_and_proof_gen = self.execution_layer.is_some() + && self.zkvm_generator_registry.is_some(); + let beacon_chain = BeaconChain { spec: self.spec.clone(), config: self.chain_config, @@ -991,6 +1021,7 @@ where observed_block_producers: <_>::default(), observed_column_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), observed_blob_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), + observed_execution_proofs: RwLock::new(ObservedExecutionProofs::default()), observed_slashable: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), @@ -1036,16 +1067,22 @@ where store, custody_context, self.spec, - // Note(zkproofs): We don't pass the entire config to the da_checker - // because currently only the `min_proofs_required` setting is needed. + // Create verifier registry if zkvm mode is enabled + // For now, we use dummy verifiers for all subnets self.zkvm_execution_layer_config .as_ref() - .map(|cfg| cfg.min_proofs_required), + .map(|_| Arc::new(zkvm_execution_layer::registry_proof_verification::VerifierRegistry::new_with_dummy_verifiers())), + // Pass whether this node has an execution layer AND generates proofs + // Nodes with EL+proof-gen validate via traditional execution + // Nodes with EL but no proof-gen wait for proofs (lightweight verifier) + has_execution_layer_and_proof_gen, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), rng: Arc::new(Mutex::new(rng)), + zkvm_generator_registry: self.zkvm_generator_registry, + proof_generation_tx: self.proof_generation_tx, }; let head = beacon_chain.head_snapshot(); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index b423d54cfa0..69b5d99a4c2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -18,12 +18,13 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, - EthSpec, Hash256, SignedBeaconBlock, Slot, + EthSpec, ExecutionProof, ExecutionProofId, Hash256, SignedBeaconBlock, Slot, }; +use zkvm_execution_layer::registry_proof_verification::VerifierRegistry; mod error; mod overflow_lru_cache; @@ -56,10 +57,6 @@ pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(32); pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(32); pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); -/// Minimum number of epochs to retain execution proofs for ZK-VM mode. -/// TODO(zkproofs): Consider making this a spec parameter like MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS -pub const MIN_EPOCHS_FOR_PROOF_RETENTION: u64 = 2; - /// Cache to hold fully valid data that can't be imported to fork-choice yet. After Dencun hard-fork /// blocks have a sidecar of data that is received separately from the network. We call the concept /// of a block "becoming available" when all of its import dependencies are inserted into this @@ -91,6 +88,8 @@ pub struct DataAvailabilityChecker { kzg: Arc, custody_context: Arc>, spec: Arc, + /// Registry of proof verifiers for different zkVM proof IDs. + verifier_registry: Option>, } pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); @@ -130,14 +129,15 @@ impl DataAvailabilityChecker { store: BeaconStore, custody_context: Arc>, spec: Arc, - min_execution_proofs_required: Option, + verifier_registry: Option>, + has_execution_layer_and_proof_gen: bool, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, custody_context.clone(), spec.clone(), - min_execution_proofs_required, + has_execution_layer_and_proof_gen, )?; Ok(Self { complete_blob_backfill, @@ -146,6 +146,7 @@ impl DataAvailabilityChecker { kzg, custody_context, spec, + verifier_registry, }) } @@ -176,6 +177,57 @@ impl DataAvailabilityChecker { }) } + /// Return the set of cached execution proof IDs for `block_root`. Returns None if there is + /// no block component for `block_root`. + pub fn cached_execution_proof_subnet_ids( + &self, + block_root: &Hash256, + ) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| proof.proof_id) + .collect::>() + }) + }) + } + + /// Get proof IDs we already have for a block. + /// Used when creating RPC requests to tell peers what we don't need. + pub fn get_existing_proof_ids(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| proof.proof_id) + .collect::>() + }) + }) + } + + /// Get all execution proofs we have for a block. + /// Used when responding to RPC requests. + pub fn get_execution_proofs( + &self, + block_root: &Hash256, + ) -> Option>> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| Arc::new(proof.clone())) + .collect::>() + }) + }) + } + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { @@ -200,6 +252,64 @@ impl DataAvailabilityChecker { }) } + /// Check if an execution proof is already cached in the availability cache. + /// + /// We usually call this method if the proof was made available ia RPC, and we later receive it via Gossip. + /// If it exists in the cache, we know it has already passed validation, + /// even though this particular instance may not have been seen/published on gossip yet. + pub fn is_execution_proof_cached( + &self, + block_root: &Hash256, + execution_proof: &ExecutionProof, + ) -> bool { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.is_some_and(|components| { + components + .get_cached_execution_proofs() + .iter() + .any(|cached| cached == execution_proof) + }) + }) + } + + /// Verify a single execution proof for gossip. + /// + /// This performs cryptographic verification of the proof without requiring the full block. + /// + /// Returns: + /// - Ok(true) if proof is valid + /// - Ok(false) if proof is invalid + /// - Err if no verifier is configured or verification fails + pub fn verify_execution_proof_for_gossip( + &self, + proof: &ExecutionProof, + ) -> Result { + let Some(verifier_registry) = &self.verifier_registry else { + // No verifier configured but receiving proofs - this is a configuration error. + // If the chain spec enables zkVM, the node must have --activate-zkvm flag set. + return Err(AvailabilityCheckError::ProofVerificationError( + "Node is receiving execution proofs but zkVM verification is not enabled. \ + Use --activate-zkvm flag to enable proof verification.".to_string() + )); + }; + + let subnet_id = proof.proof_id; + let verifier = verifier_registry + .get_verifier(subnet_id) + .ok_or_else(|| { + warn!(?subnet_id, "No verifier registered for subnet"); + AvailabilityCheckError::UnsupportedProofSubnet(subnet_id) + })?; + + verifier.verify(proof).map_err(|e| { + AvailabilityCheckError::ProofVerificationError(format!( + "Proof verification failed: {:?}", + e + )) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -276,6 +386,116 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, verified_custody_columns) } + /// Put a list of execution proofs received via RPC into the availability cache. + /// This performs cryptographic verification on the proofs. + #[instrument(skip_all, level = "trace")] + pub fn put_rpc_execution_proofs( + &self, + block_root: Hash256, + proofs: Vec>, + ) -> Result, AvailabilityCheckError> { + debug!( + ?block_root, + num_proofs = proofs.len(), + "Verifying and storing execution proofs in DA checker" + ); + + // If no verifier registry is configured, skip verification + let Some(verifier_registry) = &self.verifier_registry else { + debug!( + ?block_root, + "No verifier registry configured, storing proofs without verification" + ); + let owned_proofs = proofs.iter().map(|p| (**p).clone()); + return self + .availability_cache + .put_verified_execution_proofs(block_root, owned_proofs); + }; + + // Get the execution payload hash from the block + let execution_payload_hash = self + .availability_cache + .peek_pending_components(&block_root, |components| { + components.and_then(|c| c.block.as_ref().and_then(|b| b.execution_payload_hash())) + }) + .ok_or_else(|| { + warn!(?block_root, "Cannot verify proofs: block not in cache or has no execution payload"); + AvailabilityCheckError::MissingExecutionPayload + })?; + + debug!( + ?block_root, + ?execution_payload_hash, + "Got execution payload hash for proof verification" + ); + + let mut verified_proofs = Vec::new(); + for proof in proofs { + let proof_id = proof.proof_id; + + // Check that the proof's block_hash matches the execution payload hash + if proof.block_hash != execution_payload_hash { + warn!( + ?block_root, + ?proof_id, + proof_hash = ?proof.block_hash, + ?execution_payload_hash, + "Proof execution payload hash mismatch" + ); + return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { + proof_hash: proof.block_hash, + block_hash: execution_payload_hash, + }); + } + + let verifier = verifier_registry + .get_verifier(proof_id) + .ok_or_else(|| { + warn!(?proof_id, "No verifier registered for proof ID"); + AvailabilityCheckError::UnsupportedProofSubnet(proof_id) + })?; + + // Verify the proof (proof contains block_hash internally) + match verifier.verify(&proof) { + Ok(true) => { + debug!(?proof_id, ?block_root, "Proof verification succeeded"); + verified_proofs.push((*proof).clone()); + } + Ok(false) => { + warn!( + ?proof_id, + ?block_root, + "Proof verification failed: proof is invalid" + ); + return Err(AvailabilityCheckError::InvalidProof { + proof_id: proof_id, + reason: "Proof verification returns false".to_string(), + }); + } + Err(e) => { + warn!( + ?proof_id, + ?block_root, + error = ?e, + "Proof verification error" + ); + return Err(AvailabilityCheckError::ProofVerificationError( + e.to_string(), + )); + } + } + } + + debug!( + ?block_root, + verified_count = verified_proofs.len(), + "All proofs verified successfully" + ); + + self.availability_cache + .put_verified_execution_proofs(block_root, verified_proofs) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -587,24 +807,31 @@ impl DataAvailabilityChecker { }) } - /// The epoch at which we expect execution proofs in block processing. - /// - /// Note: For optional proofs, we specify that proofs only need to be available for 2 epochs - /// ie not past finalization + /// The epoch at which we require execution proofs for block processing. + /// + /// Note: This follows the same pattern as blob retention: proofs are required starting from + /// the zkvm_fork epoch, but only retained for a configured number of epochs. /// + /// TODO(zkproofs): We don't store proofs forever and we also don't store + /// blobs forever, perhaps we should because when the blob disappears, we may not + /// be able to remake the proof when we put blobs in blocks. + /// We don't for now because proofs are quite large at the moment. + /// /// Returns `None` if ZK-VM mode is disabled. pub fn execution_proof_boundary(&self) -> Option { - // Only enable if min_execution_proofs_required is set - if self.availability_cache.min_execution_proofs_required().is_none() { - return None; - } + let zkvm_fork_epoch = self.spec.zkvm_fork_epoch()?; - // TODO(zkproofs): Add zkvm_fork_epoch to ChainSpec once ZK-VM fork is defined - // This would be when proofs are mandatory. - // For now, calculate boundary based on current epoch let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); - let retention_boundary = current_epoch.saturating_sub(MIN_EPOCHS_FOR_PROOF_RETENTION); - Some(retention_boundary) + + // Calculate retention boundary + let proof_retention_epoch = + current_epoch.saturating_sub(self.spec.min_epochs_for_execution_proof_requests); + + // Return max of fork epoch and retention boundary + // This ensures: + // 1. Proofs are never required before the zkvm fork + // 2. Proofs are only retained for the configured number of epochs + Some(std::cmp::max(zkvm_fork_epoch, proof_retention_epoch)) } /// Returns true if the given epoch lies within the proof retention boundary. @@ -615,7 +842,7 @@ impl DataAvailabilityChecker { /// Returns the minimum number of execution proofs required for ZK-VM mode. pub fn min_execution_proofs_required(&self) -> Option { - self.availability_cache.min_execution_proofs_required() + self.spec.zkvm_min_proofs_required() } /// Collects metrics from the data availability checker. @@ -1262,6 +1489,7 @@ mod test { custody_context, spec, None, + false, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index c9efb7a4149..e5158827479 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -1,5 +1,5 @@ use kzg::{Error as KzgError, KzgCommitment}; -use types::{BeaconStateError, ColumnIndex, Hash256}; +use types::{BeaconStateError, ColumnIndex, ExecutionProofId, Hash256}; #[derive(Debug)] pub enum Error { @@ -22,6 +22,27 @@ pub enum Error { BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), SlotClockError, + /// Execution proof verification failed - proof is invalid. + /// Penalize peer, a peer should not forward invalid proofs + InvalidProof { + proof_id: ExecutionProofId, + reason: String, + }, + /// No verifier registered for this proof ID. + /// Internal error; no peer penalization. + UnsupportedProofID(ExecutionProofId), + /// Error during proof verification process. + /// Internal error; no peer penalization. + ProofVerificationError(String), + /// Could not extract execution payload from block. + /// Internal error; no peer penalization. + MissingExecutionPayload, + /// Execution payload hash mismatch between proof and block. + /// Penalize peer, similar to an invalid proof. + ExecutionPayloadHashMismatch { + proof_hash: types::ExecutionBlockHash, + block_hash: types::ExecutionBlockHash, + }, } #[derive(PartialEq, Eq)] @@ -44,13 +65,18 @@ impl Error { | Error::ParentStateMissing(_) | Error::BlockReplayError(_) | Error::RebuildingStateCaches(_) - | Error::SlotClockError => ErrorCategory::Internal, + | Error::SlotClockError + | Error::UnsupportedProofID(_) + | Error::ProofVerificationError(_) + | Error::MissingExecutionPayload => ErrorCategory::Internal, Error::InvalidBlobs { .. } | Error::InvalidColumn { .. } | Error::ReconstructColumnsError { .. } | Error::BlobIndexInvalid(_) | Error::DataColumnIndexInvalid(_) - | Error::KzgCommitmentMismatch { .. } => ErrorCategory::Malicious, + | Error::KzgCommitmentMismatch { .. } + | Error::InvalidProof { .. } + | Error::ExecutionPayloadHashMismatch { .. } => ErrorCategory::Malicious, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 71e86fcafe4..1c4096cdc22 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -55,6 +55,16 @@ impl CachedBlock { .blob_kzg_commitments() .map_or(0, |commitments| commitments.len()) } + + /// Get the execution payload hash if this block has an execution payload + pub fn execution_payload_hash(&self) -> Option { + self.as_block() + .message() + .body() + .execution_payload() + .ok() + .map(|payload| payload.execution_payload_ref().block_hash()) + } } /// This represents the components of a partially available block @@ -205,11 +215,11 @@ impl PendingComponents { &self.verified_execution_proofs } - /// Check if we have a proof from a specific subnet - pub fn has_proof_from_subnet(&self, subnet_id: types::ExecutionProofSubnetId) -> bool { + /// Check if we have a specific proof + pub fn has_proof_with_id(&self, proof_id: types::ExecutionProofId) -> bool { self.verified_execution_proofs .iter() - .any(|proof| proof.subnet_id == subnet_id) + .any(|proof| proof.proof_id == proof_id) } /// Get the number of unique subnet proofs we have @@ -226,8 +236,8 @@ impl PendingComponents { // Verify the proof is for the correct block // ExecutionBlockHash is a wrapper around Hash256, so we need to convert - // Don't insert duplicate proofs from the same subnet - if self.has_proof_from_subnet(proof.subnet_id) { + // Don't insert duplicate proofs + if self.has_proof_with_id(proof.proof_id) { return; } @@ -262,7 +272,7 @@ impl PendingComponents { &self, spec: &Arc, num_expected_columns_opt: Option, - min_execution_proofs_opt: Option, + has_execution_layer_and_proof_gen: bool, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -340,8 +350,16 @@ impl PendingComponents { return Ok(None); }; - // Check execution proof availability for ZK-VM mode - if let Some(min_proofs) = min_execution_proofs_opt { + // Check if this node needs execution proofs to validate blocks. + // Nodes that have EL and generate proofs validate via EL execution. + // Nodes that have EL but DON'T generate proofs are lightweight verifiers and wait for proofs. + // TODO(zkproofs): This is a technicality mainly because we cannot remove the EL on kurtosis + // ie each CL is coupled with an EL + let needs_execution_proofs = spec.zkvm_min_proofs_required().is_some() + && !has_execution_layer_and_proof_gen; + + if needs_execution_proofs { + let min_proofs = spec.zkvm_min_proofs_required().unwrap(); let num_proofs = self.execution_proof_subnet_count(); if num_proofs < min_proofs { // Not enough execution proofs yet @@ -470,9 +488,10 @@ pub struct DataAvailabilityCheckerInner { state_cache: StateLRUCache, custody_context: Arc>, spec: Arc, - /// Minimum number of execution proofs required from different subnets. - /// If None, execution proof checking is disabled (standard execution engine). - min_execution_proofs_required: Option, + /// Whether this node has an execution layer AND generates proofs. + /// - true: Node has EL and generates proofs → validates via EL execution + /// - false: Node either has no EL, or has EL but doesn't generate → waits for proofs (lightweight verifier) + has_execution_layer_and_proof_gen: bool, } // This enum is only used internally within the crate in the reconstruction function to improve @@ -490,22 +509,17 @@ impl DataAvailabilityCheckerInner { beacon_store: BeaconStore, custody_context: Arc>, spec: Arc, - min_execution_proofs_required: Option, + has_execution_layer_and_proof_gen: bool, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, - min_execution_proofs_required, + has_execution_layer_and_proof_gen, }) } - /// Returns the minimum number of execution proofs required (if ZK-VM mode enabled) - pub fn min_execution_proofs_required(&self) -> Option { - self.min_execution_proofs_required - } - /// Returns true if the block root is known, without altering the LRU ordering pub fn get_cached_block(&self, block_root: &Hash256) -> Option> { self.critical @@ -706,7 +720,7 @@ impl DataAvailabilityCheckerInner { if let Some(available_block) = pending_components.make_available( &self.spec, num_expected_columns_opt, - self.min_execution_proofs_required, + self.has_execution_layer_and_proof_gen, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { // Explicitly drop read lock before acquiring write lock @@ -1151,7 +1165,7 @@ mod test { test_store, custody_context, spec.clone(), - None, + false, ) .expect("should create cache"), ); From 1db670cc658b534cd6dd9c8967db4c065da354a7 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 15:20:42 +0000 Subject: [PATCH 30/67] fix missing topic --- beacon_node/lighthouse_network/src/types/topics.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 8230f91c0b1..29cdad2db2d 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -270,6 +270,7 @@ impl GossipTopic { PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, + EXECUTION_PROOF_TOPIC => GossipKind::ExecutionProof, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match subnet_topic_index(topic) { From 2af140622465291611f56c88b102bd9d03437bc3 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 16:28:41 +0000 Subject: [PATCH 31/67] grep rename --- beacon_node/beacon_chain/src/data_availability_checker.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 69b5d99a4c2..afe4dd66168 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -299,7 +299,7 @@ impl DataAvailabilityChecker { .get_verifier(subnet_id) .ok_or_else(|| { warn!(?subnet_id, "No verifier registered for subnet"); - AvailabilityCheckError::UnsupportedProofSubnet(subnet_id) + AvailabilityCheckError::UnsupportedProofID(subnet_id) })?; verifier.verify(proof).map_err(|e| { @@ -452,7 +452,7 @@ impl DataAvailabilityChecker { .get_verifier(proof_id) .ok_or_else(|| { warn!(?proof_id, "No verifier registered for proof ID"); - AvailabilityCheckError::UnsupportedProofSubnet(proof_id) + AvailabilityCheckError::UnsupportedProofID(proof_id) })?; // Verify the proof (proof contains block_hash internally) From 643719ef6e4e5012ecd577ff6a11c4b97c88725d Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 16:33:52 +0000 Subject: [PATCH 32/67] Add verify_execution_proof_for_gossip --- beacon_node/beacon_chain/src/beacon_chain.rs | 205 +++++++++++++++++++ 1 file changed, 205 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index deb11421e92..80e7c8a0257 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -26,6 +26,9 @@ use crate::data_availability_checker::{ DataAvailabilityChecker, DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use crate::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; @@ -54,6 +57,7 @@ use crate::observed_attesters::{ }; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; +use crate::observed_execution_proofs::ObservedExecutionProofs; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::PersistedBeaconChain; @@ -126,8 +130,10 @@ use store::{ KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{RayonPoolType, ShutdownReason, TaskExecutor}; +use tokio::sync::mpsc::UnboundedSender; use tokio_stream::Stream; use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; +use zkvm_execution_layer::GeneratorRegistry; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; use types::data_column_sidecar::ColumnIndex; @@ -343,6 +349,8 @@ pub enum BlockProcessStatus { pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); +pub type ProofGenerationEvent = (Hash256, Slot, Arc>); + pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< ::EthSpec, @@ -414,6 +422,8 @@ pub struct BeaconChain { pub observed_blob_sidecars: RwLock>>, /// Maintains a record of column sidecars seen over the gossip network. pub observed_column_sidecars: RwLock>>, + /// Maintains a record of execution proofs seen over the gossip network. + pub observed_execution_proofs: RwLock, /// Maintains a record of slashable message seen over the gossip network or RPC. pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. @@ -482,6 +492,10 @@ pub struct BeaconChain { pub kzg: Arc, /// RNG instance used by the chain. Currently used for shuffling column sidecars in block publishing. pub rng: Arc>>, + /// Registry of zkVM proof generators for altruistic proof generation + pub zkvm_generator_registry: Option>, + /// Sender to notify proof generation service of blocks needing proofs + pub proof_generation_tx: Option>>, } pub enum BeaconBlockResponseWrapper { @@ -2202,6 +2216,15 @@ impl BeaconChain { }) } + #[instrument(skip_all, level = "trace")] + pub fn verify_execution_proof_for_gossip( + self: &Arc, + execution_proof: Arc, + ) -> Result, GossipExecutionProofError> { + // TODO(zkproofs): Add metrics + GossipVerifiedExecutionProof::new(execution_proof, self) + } + #[instrument(skip_all, level = "trace")] pub fn verify_blob_sidecar_for_gossip( self: &Arc, @@ -3039,6 +3062,36 @@ impl BeaconChain { self.check_gossip_blob_availability_and_import(blob).await } + /// Process a gossip-verified execution proof by storing it in the DA checker. + /// + /// This method takes an execution proof that has already been validated via gossip + /// and stores it in the DataAvailabilityChecker. If all components for a block are + /// now available, the block will be imported to fork choice. + #[instrument(skip_all, level = "debug")] + pub async fn process_gossip_execution_proof( + self: &Arc, + execution_proof: GossipVerifiedExecutionProof, + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { + let block_root = execution_proof.block_root(); + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its execution proofs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + self.check_gossip_execution_proof_availability_and_import( + execution_proof, + publish_fn, + ) + .await + } + /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was /// imported or errors. #[instrument(skip_all, level = "debug")] @@ -3122,6 +3175,45 @@ impl BeaconChain { .await } + /// Process execution proofs retrieved via RPC and returns the `AvailabilityProcessingStatus`. + /// + /// This method handles execution proofs received from peers during block sync. The proofs + /// are verified and stored in the data availability checker. If all required components + /// (block, blobs/columns, and proofs) are available, the block is imported into fork choice. + pub async fn process_rpc_execution_proofs( + self: &Arc, + slot: Slot, + block_root: Hash256, + execution_proofs: Vec>, + ) -> Result { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its execution proofs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + // Validate that all proofs are for the expected block_root + for proof in &execution_proofs { + if proof.block_root != block_root { + return Err(BlockError::AvailabilityCheck( + AvailabilityCheckError::Unexpected(format!( + "Proof block_root mismatch: expected {}, got {}", + block_root, proof.block_root + )), + )); + } + } + + // TODO(zkproofs): We can emit SSE events for execution proofs yet + + self.check_rpc_execution_proof_availability_and_import(slot, block_root, execution_proofs) + .await + } + /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. pub async fn process_engine_blobs( self: &Arc, @@ -3564,6 +3656,30 @@ impl BeaconChain { .await } + /// Checks if the provided execution proof can make any cached blocks available, and imports + /// immediately if so, otherwise caches the proof in the data availability checker. + async fn check_gossip_execution_proof_availability_and_import( + self: &Arc, + execution_proof: GossipVerifiedExecutionProof, + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { + let block_root = execution_proof.block_root(); + let slot = execution_proof.slot(); + + // TODO(zkproofs): Can we avoid the clone + let proof_arc = execution_proof.into_inner(); + let proof = (*proof_arc).clone(); + + // Store the proof in the DA checker + let availability = self + .data_availability_checker + .put_verified_execution_proofs(block_root, std::iter::once(proof)) + .map_err(|e| BlockError::AvailabilityCheck(e))?; + + self.process_availability(slot, availability, publish_fn) + .await + } + fn check_blobs_for_slashability<'a>( self: &Arc, block_root: Hash256, @@ -3659,6 +3775,28 @@ impl BeaconChain { .await } + /// Checks if the provided execution proofs can make any cached blocks available, and imports + /// immediately if so, otherwise caches the proofs in the data availability checker. + async fn check_rpc_execution_proof_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + execution_proofs: Vec>, + ) -> Result { + // TODO(zkproofs): For optional proofs, they are currently not signed + // so we can't add any slashability checks here. We also don't want this + // because it could cause issues where we slash a validator for giving us bad + // proofs, but for nodes that don't need proofs (most of the network), they will + // not see this slashing or care about. + + let availability = self + .data_availability_checker + .put_rpc_execution_proofs(block_root, execution_proofs)?; + + self.process_availability(slot, availability, || Ok(())) + .await + } + fn check_columns_for_slashability<'a>( self: &Arc, block_root: Hash256, @@ -4035,6 +4173,20 @@ impl BeaconChain { current_slot, ); + // Notify proof generation service for altruistic proof generation + if let Some(ref proof_gen_tx) = self.proof_generation_tx { + let slot = signed_block.slot(); + let event = (block_root, slot, signed_block.clone()); + + if let Err(e) = proof_gen_tx.send(event) { + debug!( + error = ?e, + ?block_root, + "Failed to send proof generation event" + ); + } + } + Ok(block_root) } @@ -7266,6 +7418,59 @@ impl BeaconChain { .custody_context() .custody_columns_for_epoch(epoch_opt, &self.spec) } + + /// Returns a deterministic list of execution proof subnet IDs to request for a block in the given epoch. + /// + /// The selection is deterministic based on the epoch, ensuring all nodes request the same + /// subnets for blocks in the same epoch. Different epochs will result in different subnet + /// selections, providing rotation over time. + /// + /// # Arguments + /// * `epoch` - The epoch of the block + /// * `count` - Number of subnets to select (typically min_execution_proofs_required) + /// + /// # Returns + /// A vector of `count` subnet IDs, deterministically selected based on the epoch. + pub fn execution_proof_subnets_for_epoch( + &self, + epoch: Epoch, + count: usize, + ) -> Vec { + use types::EXECUTION_PROOF_TYPE_COUNT; + + let total_subnets = EXECUTION_PROOF_TYPE_COUNT as usize; + let count = std::cmp::min(count, total_subnets); + + if count == 0 { + return vec![]; + } + + // Use epoch as a deterministic seed + // Hash the epoch to get a pseudo-random but deterministic ordering + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + use std::hash::{Hash, Hasher}; + epoch.hash(&mut hasher); + let seed = hasher.finish(); + + // Create a deterministic permutation of subnet IDs based on the seed + let mut subnet_ids: Vec = (0..EXECUTION_PROOF_TYPE_COUNT).collect(); + + // Simple deterministic shuffle using the seed + // This is a Fisher-Yates shuffle variant using deterministic randomness + for i in (1..subnet_ids.len()).rev() { + // Use seed + i for deterministic pseudo-random index + let j = ((seed.wrapping_add(i as u64).wrapping_mul(2654435761)) % ((i + 1) as u64)) + as usize; + subnet_ids.swap(i, j); + } + + // Take the first `count` subnet IDs and convert to ExecutionProofId + subnet_ids + .into_iter() + .take(count) + .filter_map(|id| types::ExecutionProofId::new(id).ok()) + .collect() + } } impl Drop for BeaconChain { From 9e6ed12d917ee58c3463694e0af28ea5c2b6bb75 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 17:49:31 +0000 Subject: [PATCH 33/67] Handle incoming gossip and RPC messages --- beacon_node/beacon_processor/src/lib.rs | 33 +- .../gossip_methods.rs | 290 +++++++++++++++++- .../src/network_beacon_processor/mod.rs | 70 ++++- .../network_beacon_processor/rpc_methods.rs | 94 ++++++ .../network_beacon_processor/sync_methods.rs | 90 ++++++ beacon_node/network/src/router.rs | 59 +++- 6 files changed, 628 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 28ed0cca913..3ae37af529a 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -121,6 +121,7 @@ pub struct BeaconProcessorQueueLengths { gossip_block_queue: usize, gossip_blob_queue: usize, gossip_data_column_queue: usize, + gossip_execution_proof_queue: usize, delayed_block_queue: usize, status_queue: usize, bbrange_queue: usize, @@ -187,6 +188,7 @@ impl BeaconProcessorQueueLengths { gossip_block_queue: 1024, gossip_blob_queue: 1024, gossip_data_column_queue: 1024, + gossip_execution_proof_queue: 1024, delayed_block_queue: 1024, status_queue: 1024, bbrange_queue: 1024, @@ -579,6 +581,7 @@ pub enum Work { GossipBlock(AsyncFn), GossipBlobSidecar(AsyncFn), GossipDataColumnSidecar(AsyncFn), + GossipExecutionProof(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -597,6 +600,9 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcExecutionProofs { + process_fn: AsyncFn, + }, RpcCustodyColumn(AsyncFn), ColumnReconstruction(AsyncFn), IgnoredRpcBlock { @@ -609,6 +615,7 @@ pub enum Work { BlocksByRootsRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), + ExecutionProofsByRootsRequest(BlockingFn), DataColumnsByRootsRequest(BlockingFn), DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), @@ -641,6 +648,7 @@ pub enum WorkType { GossipBlock, GossipBlobSidecar, GossipDataColumnSidecar, + GossipExecutionProof, DelayedImportBlock, GossipVoluntaryExit, GossipProposerSlashing, @@ -651,6 +659,7 @@ pub enum WorkType { GossipLightClientOptimisticUpdate, RpcBlock, RpcBlobs, + RpcExecutionProofs, RpcCustodyColumn, ColumnReconstruction, IgnoredRpcBlock, @@ -661,6 +670,7 @@ pub enum WorkType { BlocksByRootsRequest, BlobsByRangeRequest, BlobsByRootsRequest, + ExecutionProofsByRootsRequest, DataColumnsByRootsRequest, DataColumnsByRangeRequest, GossipBlsToExecutionChange, @@ -688,6 +698,7 @@ impl Work { Work::GossipBlock(_) => WorkType::GossipBlock, Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::GossipExecutionProof(_) => WorkType::GossipExecutionProof, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, @@ -701,6 +712,7 @@ impl Work { Work::GossipBlsToExecutionChange(_) => WorkType::GossipBlsToExecutionChange, Work::RpcBlock { .. } => WorkType::RpcBlock, Work::RpcBlobs { .. } => WorkType::RpcBlobs, + Work::RpcExecutionProofs { .. } => WorkType::RpcExecutionProofs, Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, Work::ColumnReconstruction(_) => WorkType::ColumnReconstruction, Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, @@ -711,6 +723,7 @@ impl Work { Work::BlocksByRootsRequest(_) => WorkType::BlocksByRootsRequest, Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, + Work::ExecutionProofsByRootsRequest(_) => WorkType::ExecutionProofsByRootsRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, @@ -873,6 +886,7 @@ impl BeaconProcessor { let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let mut gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); + let mut gossip_execution_proof_queue = FifoQueue::new(queue_lengths.gossip_execution_proof_queue); let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); @@ -1055,6 +1069,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = gossip_data_column_queue.pop() { Some(item) + } else if let Some(item) = gossip_execution_proof_queue.pop() { + Some(item) } else if let Some(item) = column_reconstruction_queue.pop() { Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. @@ -1325,6 +1341,9 @@ impl BeaconProcessor { Work::GossipDataColumnSidecar { .. } => { gossip_data_column_queue.push(work, work_id) } + Work::GossipExecutionProof { .. } => { + gossip_execution_proof_queue.push(work, work_id) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id) } @@ -1351,6 +1370,8 @@ impl BeaconProcessor { rpc_block_queue.push(work, work_id) } Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id), + // TODO(zkproofs): Making a note that we are reusing the blob_queue + Work::RpcExecutionProofs { .. } => rpc_blob_queue.push(work, work_id), Work::RpcCustodyColumn { .. } => { rpc_custody_column_queue.push(work, work_id) } @@ -1385,6 +1406,7 @@ impl BeaconProcessor { gossip_bls_to_execution_change_queue.push(work, work_id) } Work::BlobsByRootsRequest { .. } => blbroots_queue.push(work, work_id), + Work::ExecutionProofsByRootsRequest { .. } => blbroots_queue.push(work, work_id), Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } @@ -1416,6 +1438,7 @@ impl BeaconProcessor { WorkType::GossipBlock => gossip_block_queue.len(), WorkType::GossipBlobSidecar => gossip_blob_queue.len(), WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::GossipExecutionProof => gossip_execution_proof_queue.len(), WorkType::DelayedImportBlock => delayed_block_queue.len(), WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), @@ -1429,7 +1452,9 @@ impl BeaconProcessor { lc_gossip_optimistic_update_queue.len() } WorkType::RpcBlock => rpc_block_queue.len(), - WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), + WorkType::RpcBlobs + | WorkType::RpcExecutionProofs + | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), WorkType::ColumnReconstruction => column_reconstruction_queue.len(), WorkType::ChainSegment => chain_segment_queue.len(), @@ -1439,6 +1464,7 @@ impl BeaconProcessor { WorkType::BlocksByRootsRequest => blbroots_queue.len(), WorkType::BlobsByRangeRequest => bbrange_queue.len(), WorkType::BlobsByRootsRequest => bbroots_queue.len(), + WorkType::ExecutionProofsByRootsRequest => bbroots_queue.len(), WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), WorkType::GossipBlsToExecutionChange => { @@ -1586,16 +1612,19 @@ impl BeaconProcessor { } => task_spawner.spawn_async(process_fn), Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } + | Work::RpcExecutionProofs { process_fn } | Work::RpcCustodyColumn(process_fn) | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) - | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { + | Work::GossipDataColumnSidecar(work) + | Work::GossipExecutionProof(work) => task_spawner.spawn_async(async move { work.await; }), Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) + | Work::ExecutionProofsByRootsRequest(process_fn) | Work::DataColumnsByRootsRequest(process_fn) | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index fa6b5fd2434..3340d065488 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -7,6 +7,9 @@ use crate::{ use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use beacon_chain::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use beacon_chain::store::Error; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -37,10 +40,10 @@ use store::hot_cold_store::HotColdDBError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, - DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, + DataColumnSubnetId, EthSpec, ExecutionProof, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, + SingleAttestation, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -768,6 +771,211 @@ impl NetworkBeaconProcessor { } } + /// Process a gossip execution proof. + /// + /// Validates the execution proof according to the gossip spec and processes it + /// through the DataAvailabilityChecker if valid. + pub async fn process_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Arc, + _seen_timestamp: Duration, + ) { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + + debug!( + %peer_id, + %proof_id, + %block_root, + "Received execution proof via gossip" + ); + + // Verify the execution proof for gossip + match self + .chain + .verify_execution_proof_for_gossip(execution_proof.clone()) + { + Ok(gossip_verified_proof) => { + debug!( + %block_root, + subnet_id = %gossip_verified_proof.subnet_id(), + "Successfully verified gossip execution proof" + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Process the verified proof through DA checker + self.process_gossip_verified_execution_proof( + peer_id, + gossip_verified_proof, + _seen_timestamp, + ) + .await + } + Err(err) => { + match err { + GossipExecutionProofError::PriorKnownUnpublished => { + debug!( + %block_root, + %proof_id, + "Gossip execution proof already processed via the EL. Accepting the proof without re-processing." + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Accept, + ); + } + GossipExecutionProofError::PriorKnown { block_root, proof_id, .. } => { + // Proof already known via gossip. No penalty, gossip filter should + // filter duplicates. + debug!( + %block_root, + %proof_id, + "Received already known execution proof. Ignoring the proof" + ); + } + GossipExecutionProofError::ParentUnknown { parent_root } => { + debug!( + action = "requesting parent", + %block_root, + %parent_root, + "Unknown parent hash for execution proof" + ); + // TODO(zkproofs): Implement parent lookup for execution proofs + // This might require creating a new SyncMessage variant + // For now, we just ignore the proof + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::BeaconChainError(_) => { + crit!( + error = ?err, + "Internal error when verifying execution proof" + ) + } + GossipExecutionProofError::ProofVerificationFailed(ref reason) => { + warn!( + error = ?err, + %block_root, + %proof_id, + %reason, + "Execution proof verification failed. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_verification_failed", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::ProofTooLarge { size, max_size } => { + warn!( + error = ?err, + %block_root, + %proof_id, + %size, + %max_size, + "Execution proof exceeds maximum size. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_too_large", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::BlockNotAvailable { block_root } => { + debug!( + error = ?err, + %block_root, + %proof_id, + "Block for execution proof not yet available. Ignoring the proof" + ); + // Block might arrive later, so don't penalize heavily + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::NotFinalizedDescendant { block_parent_root } => { + debug!( + error = ?err, + %block_root, + %block_parent_root, + %proof_id, + "Execution proof conflicts with finality. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_not_finalized_descendant", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::FutureSlot { message_slot, latest_permissible_slot } => { + debug!( + error = ?err, + %block_root, + %proof_id, + %message_slot, + %latest_permissible_slot, + "Execution proof from future slot. Ignoring the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_execution_proof_future_slot", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::PastFinalizedSlot { proof_slot, finalized_slot } => { + debug!( + error = ?err, + %block_root, + %proof_id, + %proof_slot, + %finalized_slot, + "Execution proof from past finalized slot. Ignoring the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_past_finalized", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + } + } + } + #[allow(clippy::too_many_arguments)] #[instrument( name = SPAN_PROCESS_GOSSIP_BLOB, @@ -1127,6 +1335,80 @@ impl NetworkBeaconProcessor { } } + async fn process_gossip_verified_execution_proof( + self: &Arc, + peer_id: PeerId, + verified_proof: GossipVerifiedExecutionProof, + _seen_duration: Duration, + ) { + let processing_start_time = Instant::now(); + let block_root = verified_proof.block_root(); + let proof_slot = verified_proof.slot(); + let subnet_id = verified_proof.subnet_id(); + + let result = self.chain.process_gossip_execution_proof(verified_proof, || Ok(())).await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "execution_proof"); + + match &result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + %block_root, + %subnet_id, + "Gossipsub execution proof processed, imported fully available block" + ); + self.chain.recompute_head_at_current_slot().await; + + debug!( + processing_time_ms = processing_start_time.elapsed().as_millis(), + "Execution proof full verification complete" + ); + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + %slot, + %subnet_id, + "Execution proof cached, block still needs more components" + ); + debug!( + %block_root, + %proof_slot, + %subnet_id, + "Execution proof cached for pending block" + ); + } + }, + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + ?block_root, + %subnet_id, + "Ignoring gossip execution proof for already imported block" + ); + } + Err(err) => { + debug!( + outcome = ?err, + ?block_root, + block_slot = %proof_slot, + %subnet_id, + "Invalid gossip execution proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_execution_proof", + ); + } + } + + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } + } + /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 85ccde1d591..6a9e28f204a 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -14,7 +14,7 @@ use beacon_processor::{ use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - LightClientUpdatesByRangeRequest, + ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, }; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, @@ -248,6 +248,32 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proof. + pub fn send_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Arc, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, @@ -468,6 +494,30 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proofs. `process_rpc_execution_proofs` reports + /// the result back to sync. + pub fn send_rpc_execution_proofs( + self: &Arc, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + if proofs.is_empty() { + return Ok(()); + } + let process_fn = self.clone().generate_rpc_execution_proofs_process_fn( + block_root, + proofs, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcExecutionProofs { process_fn }, + }) + } + /// Create a new `Work` event for some custody columns. `process_rpc_custody_columns` reports /// the result back to sync. pub fn send_rpc_custody_columns( @@ -612,6 +662,24 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `ExecutionProofsByRootRequest`s from the RPC network. + pub fn send_execution_proofs_by_roots_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_root_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRootsRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. pub fn send_data_columns_by_roots_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 58e02ffe007..c63acdae618 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -7,6 +7,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + ExecutionProofsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -14,6 +15,7 @@ use lighthouse_tracing::{ SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, }; @@ -365,6 +367,98 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle an `ExecutionProofsByRoot` request from the peer. + #[instrument( + name = SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + parent = None, + level = "debug", + skip_all, + fields( + peer_id = %peer_id, + client = tracing::field::Empty, + ) + )] + pub fn handle_execution_proofs_by_root_request( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_root_request_inner(peer_id, inbound_request_id, request), + Response::ExecutionProofsByRoot, + ); + } + + /// Handle an `ExecutionProofsByRoot` request from the peer. + fn handle_execution_proofs_by_root_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + let block_root = request.block_root; + let already_have_set: std::collections::HashSet<_> = request.already_have.iter().copied().collect(); + let count_needed = request.count_needed as usize; + + // Get all execution proofs we have for this block from the DA checker + let available_proofs = match self + .chain + .data_availability_checker + .get_execution_proofs(&block_root) + { + Some(proofs) => proofs, + None => { + // No proofs available for this block + debug!( + %peer_id, + %block_root, + "No execution proofs available for peer" + ); + return Ok(()); + } + }; + + // Filter out proofs the peer already has and send up to count_needed + let mut sent_count = 0; + for proof in available_proofs { + // Skip proofs the peer already has + if already_have_set.contains(&proof.proof_id) { + continue; + } + + // Send the proof + self.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(Some(proof)), + ); + + sent_count += 1; + + // Stop when we've sent the requested count + if sent_count >= count_needed { + break; + } + } + + debug!( + %peer_id, + %block_root, + requested = count_needed, + already_have = already_have_set.len(), + sent = sent_count, + "ExecutionProofsByRoot outgoing response processed" + ); + + Ok(()) + } + /// Handle a `DataColumnsByRoot` request from the peer. #[instrument( name = SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 1d99540c299..3e02799ca58 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -255,6 +255,21 @@ impl NetworkBeaconProcessor { Box::pin(process_fn) } + pub fn generate_rpc_execution_proofs_process_fn( + self: Arc, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> AsyncFn { + let process_fn = async move { + self.clone() + .process_rpc_execution_proofs(block_root, proofs, seen_timestamp, process_type) + .await; + }; + Box::pin(process_fn) + } + /// Attempt to process a list of blobs received from a direct RPC request. #[instrument( name = SPAN_PROCESS_RPC_BLOBS, @@ -884,4 +899,79 @@ impl NetworkBeaconProcessor { } } } + + /// Process execution proofs received via RPC. + pub async fn process_rpc_execution_proofs( + self: Arc>, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + // Get slot directly from the first proof. All proofs should be for the same block. + let slot = match proofs.first() { + Some(proof) => proof.slot, + None => { + debug!(?block_root, "No execution proofs to process"); + return; + } + }; + + let proof_ids: Vec<_> = proofs.iter().map(|p| p.proof_id).collect(); + + debug!( + ?proof_ids, + %block_root, + %slot, + proof_count = proofs.len(), + "RPC execution proofs received" + ); + + if let Ok(current_slot) = self.chain.slot() + && current_slot == slot + { + // let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + // TODO(zkproofs): Add dedicated metrics for execution proofs + } + + let result = self + .chain + .process_rpc_execution_proofs(slot, block_root, proofs) + .await; + + // TODO(zkproofs): Add dedicated metrics for execution proof processing + // register_process_result_metrics(&result, metrics::BlockSource::Rpc, "execution_proofs"); + + match &result { + Ok(AvailabilityProcessingStatus::Imported(hash)) => { + debug!( + result = "imported block with execution proofs", + %slot, + block_hash = %hash, + "Block components retrieved" + ); + self.chain.recompute_head_at_current_slot().await; + } + Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + debug!( + block_hash = %block_root, + %slot, + "Missing components over rpc (still need more proofs or other components)" + ); + } + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + block_hash = %block_root, + %slot, + "Execution proofs have already been imported" + ); + } + Err(_) => {} + } + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 60fe094bb7c..1cb0e113db7 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -24,7 +24,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ExecutionProof, ForkContext, SignedBeaconBlock, +}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -272,6 +274,10 @@ impl Router { request, ), ), + RequestType::ExecutionProofsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_roots_request(peer_id, inbound_request_id, request), + ), _ => {} } } @@ -309,6 +315,13 @@ impl Router { Response::DataColumnsByRange(data_column) => { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } + Response::ExecutionProofsByRoot(execution_proof) => { + self.on_execution_proofs_by_root_response( + peer_id, + app_request_id, + execution_proof, + ); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -384,6 +397,16 @@ impl Router { ), ) } + PubsubMessage::ExecutionProof(execution_proof) => { + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + timestamp_now(), + ), + ) + } PubsubMessage::VoluntaryExit(exit) => { debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( @@ -670,6 +693,40 @@ impl Router { }); } + /// Handle an `ExecutionProofsByRoot` response from the peer. + pub fn on_execution_proofs_by_root_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + let sync_request_id = match app_request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::SingleExecutionProof { .. } => id, + other => { + crit!(request = ?other, "ExecutionProofsByRoot response on incorrect request"); + return; + } + }, + AppRequestId::Router => { + crit!(%peer_id, "All ExecutionProofsByRoot requests belong to sync"); + return; + } + AppRequestId::Internal => unreachable!("Handled internally"), + }; + + trace!( + %peer_id, + "Received ExecutionProofsByRoot Response" + ); + self.send_to_sync(SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + seen_timestamp: timestamp_now(), + }); + } + /// Handle a `DataColumnsByRoot` response from the peer. pub fn on_data_columns_by_root_response( &mut self, From 1965ac7e254ac50ae9d93156cde03ccf099a1f8a Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 19:55:53 +0000 Subject: [PATCH 34/67] integrating proof fetching into block sync --- .../network/src/sync/block_lookups/common.rs | 53 +- .../network/src/sync/block_lookups/mod.rs | 8 +- .../sync/block_lookups/single_block_lookup.rs | 104 +++- beacon_node/network/src/sync/manager.rs | 70 ++- .../network/src/sync/network_context.rs | 153 +++++- .../src/sync/network_context/requests.rs | 8 +- .../src/sync/tests/execution_proof_tests.rs | 512 ++++++++++++++++++ beacon_node/network/src/sync/tests/lookups.rs | 91 +++- beacon_node/network/src/sync/tests/mod.rs | 1 + 9 files changed, 962 insertions(+), 38 deletions(-) create mode 100644 beacon_node/network/src/sync/tests/execution_proof_tests.rs diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index c6b05190871..3a9da8b66ea 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -2,7 +2,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, + BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState, PeerId, }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; @@ -12,7 +12,7 @@ use parking_lot::RwLock; use std::collections::HashSet; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, SignedBeaconBlock}; +use types::{DataColumnSidecarList, ExecutionProof, SignedBeaconBlock}; use super::SingleLookupId; use super::single_block_lookup::{ComponentRequests, DownloadResult}; @@ -22,6 +22,7 @@ pub enum ResponseType { Block, Blob, CustodyColumn, + ExecutionProof, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -215,3 +216,51 @@ impl RequestState for CustodyRequestState { &mut self.state } } + +impl RequestState for ProofRequestState { + type VerifiedResponseType = Vec>; + + fn make_request( + &self, + id: Id, + lookup_peers: Arc>>, + _min_proofs: usize, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.execution_proof_lookup_request(id, lookup_peers, self.block_root, self.min_proofs_required) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_execution_proofs_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::ExecutionProof + } + + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + request.proof_request + .as_mut() + .ok_or("no active proof request") + } + + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f8ffd298caf..36e002f078a 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,7 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; +pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; @@ -532,6 +532,9 @@ impl BlockLookups { BlockProcessType::SingleCustodyColumn(id) => { self.on_processing_result_inner::>(id, result, cx) } + BlockProcessType::SingleExecutionProof { id } => { + self.on_processing_result_inner::(id, result, cx) + } }; self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); } @@ -673,6 +676,9 @@ impl BlockLookups { ResponseType::CustodyColumn => { "lookup_custody_column_processing_failure" } + ResponseType::ExecutionProof => { + "lookup_execution_proof_processing_failure" + } }, ); } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 8fb3248a871..2f78fab99f6 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -17,7 +17,10 @@ use store::Hash256; use strum::IntoStaticStr; use tracing::{Span, debug_span}; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; +use types::{ + DataColumnSidecarList, EthSpec, ExecutionProof, SignedBeaconBlock, + Slot, +}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -63,6 +66,7 @@ pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, pub component_requests: ComponentRequests, + pub proof_request: Option, /// Peers that claim to have imported this set of block components. This state is shared with /// the custody request to have an updated view of the peers that claim to have imported the /// block associated with this lookup. The peer set of a lookup can change rapidly, and faster @@ -102,6 +106,7 @@ impl SingleBlockLookup { id, block_request_state: BlockRequestState::new(requested_block_root), component_requests: ComponentRequests::WaitingForBlock, + proof_request: None, peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, @@ -168,32 +173,53 @@ impl SingleBlockLookup { /// Returns true if the block has already been downloaded. pub fn all_components_processed(&self) -> bool { - self.block_request_state.state.is_processed() - && match &self.component_requests { - ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), - ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), - ComponentRequests::NotNeeded { .. } => true, - } + let block_processed = self.block_request_state.state.is_processed(); + + let da_component_processed = match &self.component_requests { + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::NotNeeded { .. } => true, + }; + + let proof_processed = self.proof_request + .as_ref() + .map(|request| request.state.is_processed()) + .unwrap_or(true); // If no proof request, consider it processed + + block_processed && da_component_processed && proof_processed } /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { - self.awaiting_parent.is_some() - || self.block_request_state.state.is_awaiting_event() - || match &self.component_requests { - // If components are waiting for the block request to complete, here we should - // check if the`block_request_state.state.is_awaiting_event(). However we already - // checked that above, so `WaitingForBlock => false` is equivalent. - ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => { - request.state.is_awaiting_event() - } - ComponentRequests::ActiveCustodyRequest(request) => { - request.state.is_awaiting_event() - } - ComponentRequests::NotNeeded { .. } => false, + if self.awaiting_parent.is_some() { + return true; + } + + if self.block_request_state.state.is_awaiting_event() { + return true; + } + + let da_awaiting = match &self.component_requests { + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => { + request.state.is_awaiting_event() + } + ComponentRequests::ActiveCustodyRequest(request) => { + request.state.is_awaiting_event() } + ComponentRequests::NotNeeded { .. } => false, + }; + + let proof_awaiting = self.proof_request + .as_ref() + .map(|request| request.state.is_awaiting_event()) + .unwrap_or(false); + + da_awaiting || proof_awaiting } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -239,6 +265,14 @@ impl SingleBlockLookup { } else { self.component_requests = ComponentRequests::NotNeeded("outside da window"); } + + if cx.chain.should_fetch_execution_proofs(block_epoch) { + if let Some(min_proofs) = cx.chain.min_execution_proofs_required() { + self.proof_request = Some( + ProofRequestState::new(self.block_root, min_proofs) + ); + } + } } else { // Wait to download the block before downloading blobs. Then we can be sure that the // block has data, so there's no need to do "blind" requests for all possible blobs and @@ -253,6 +287,7 @@ impl SingleBlockLookup { } } + // Progress DA component requests match &self.component_requests { ComponentRequests::WaitingForBlock => {} // do nothing ComponentRequests::ActiveBlobRequest(_, expected_blobs) => { @@ -264,6 +299,11 @@ impl SingleBlockLookup { ComponentRequests::NotNeeded { .. } => {} // do nothing } + // Progress proof request (separate from DA components) + if let Some(request) = &self.proof_request { + self.continue_request::(cx, request.min_proofs_required)?; + } + // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. // This case can happen if we receive the components from gossip during a retry. @@ -404,6 +444,26 @@ impl CustodyRequestState { } } +/// The state of the execution proof request component of a `SingleBlockLookup`. +#[derive(Derivative)] +#[derivative(Debug)] +pub struct ProofRequestState { + #[derivative(Debug = "ignore")] + pub block_root: Hash256, + pub state: SingleLookupRequestState>>, + pub min_proofs_required: usize, +} + +impl ProofRequestState { + pub fn new(block_root: Hash256, min_proofs_required: usize) -> Self { + Self { + block_root, + state: SingleLookupRequestState::new(), + min_proofs_required, + } + } +} + /// The state of the block request component of a `SingleBlockLookup`. #[derive(Derivative)] #[derivative(Debug)] diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index d7ba0280542..8756482d63e 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -45,6 +45,7 @@ use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, + ProofRequestState, }; use crate::sync::network_context::PeerGroup; use beacon_chain::block_verification_types::AsBlock; @@ -70,7 +71,8 @@ use std::time::Duration; use tokio::sync::mpsc; use tracing::{debug, error, info, trace}; use types::{ - BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, + BlobSidecar, DataColumnSidecar, EthSpec, ExecutionProof, ForkContext, Hash256, + SignedBeaconBlock, Slot, }; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -129,6 +131,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// An execution proof has been received from the RPC + RpcExecutionProof { + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, Arc>, Hash256), @@ -174,6 +184,7 @@ pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, SingleCustodyColumn(Id), + SingleExecutionProof { id: Id }, } impl BlockProcessType { @@ -181,7 +192,8 @@ impl BlockProcessType { match self { BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } - | BlockProcessType::SingleCustodyColumn(id) => *id, + | BlockProcessType::SingleCustodyColumn(id) + | BlockProcessType::SingleExecutionProof { id } => *id, } } } @@ -465,6 +477,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::SingleExecutionProof { id } => { + self.on_single_execution_proof_response(id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) } @@ -756,6 +771,17 @@ impl SyncManager { } => { self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) } + SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + seen_timestamp, + } => self.rpc_execution_proof_received( + sync_request_id, + peer_id, + execution_proof, + seen_timestamp, + ), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -1092,6 +1118,25 @@ impl SyncManager { } } + fn rpc_execution_proof_received( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + seen_timestamp: Duration, + ) { + match sync_request_id { + SyncRequestId::SingleExecutionProof { id } => self.on_single_execution_proof_response( + id, + peer_id, + RpcEvent::from_chunk(execution_proof, seen_timestamp), + ), + _ => { + crit!(%peer_id, "bad request id for execution_proof"); + } + } + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, @@ -1110,6 +1155,27 @@ impl SyncManager { } } + fn on_single_execution_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + execution_proof: RpcEvent>, + ) { + if let Some(resp) = self + .network + .on_single_execution_proof_response(id, peer_id, execution_proof) + { + self.block_lookups + .on_download_response::( + id, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), + &mut self.network, + ) + } + } + fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index ac2991c1474..8a01012bf54 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -35,6 +35,7 @@ pub use requests::LookupVerifyError; use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, + ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, }; #[cfg(test)] use slot_clock::SlotClock; @@ -50,7 +51,7 @@ use tracing::{Span, debug, debug_span, error, warn}; use types::blob_sidecar::FixedBlobSidecarList; use types::{ BlobSidecar, BlockImportSource, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - ForkContext, Hash256, SignedBeaconBlock, Slot, + ExecutionProof, ForkContext, Hash256, SignedBeaconBlock, Slot, }; pub mod custody; @@ -202,6 +203,9 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRoot requests data_columns_by_root_requests: ActiveRequests>, + /// A mapping of active ExecutionProofsByRoot requests + execution_proofs_by_root_requests: + ActiveRequests>, /// A mapping of active BlocksByRange requests blocks_by_range_requests: ActiveRequests>, @@ -290,6 +294,7 @@ impl SyncNetworkContext { blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), + execution_proofs_by_root_requests: ActiveRequests::new("execution_proofs_by_root"), blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), @@ -317,6 +322,7 @@ impl SyncNetworkContext { blocks_by_root_requests, blobs_by_root_requests, data_columns_by_root_requests, + execution_proofs_by_root_requests, blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, @@ -342,6 +348,10 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); + let execution_proofs_by_root_ids = execution_proofs_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleExecutionProof { id: *id }); let blocks_by_range_ids = blocks_by_range_requests .active_requests_of_peer(peer_id) .into_iter() @@ -358,6 +368,7 @@ impl SyncNetworkContext { blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) + .chain(execution_proofs_by_root_ids) .chain(blocks_by_range_ids) .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) @@ -414,6 +425,7 @@ impl SyncNetworkContext { blocks_by_root_requests, blobs_by_root_requests, data_columns_by_root_requests, + execution_proofs_by_root_requests, blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, @@ -435,6 +447,7 @@ impl SyncNetworkContext { .iter_request_peers() .chain(blobs_by_root_requests.iter_request_peers()) .chain(data_columns_by_root_requests.iter_request_peers()) + .chain(execution_proofs_by_root_requests.iter_request_peers()) .chain(blocks_by_range_requests.iter_request_peers()) .chain(blobs_by_range_requests.iter_request_peers()) .chain(data_columns_by_range_requests.iter_request_peers()) @@ -1018,6 +1031,100 @@ impl SyncNetworkContext { Ok(LookupRequestResult::RequestSent(id.req_id)) } + /// Request execution proofs for `block_root` + pub fn execution_proof_lookup_request( + &mut self, + lookup_id: SingleLookupId, + lookup_peers: Arc>>, + block_root: Hash256, + min_proofs_required: usize, + ) -> Result { + let active_request_count_by_peer = self.active_request_count_by_peer(); + let Some(peer_id) = lookup_peers + .read() + .iter() + .map(|peer| { + ( + // Prefer peers with less overall requests + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + // Random factor to break ties, otherwise the PeerID breaks ties + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, peer)| *peer) + else { + return Ok(LookupRequestResult::Pending("no peers")); + }; + + // Query DA checker for proofs we already have + let already_have = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .unwrap_or_default(); + + let current_count = already_have.len(); + + // Calculate how many more proofs we need + if current_count >= min_proofs_required { + // Already have enough proofs, no request needed + return Ok(LookupRequestResult::NoRequestNeeded( + "already have minimum proofs", + )); + } + + let count_needed = min_proofs_required - current_count; + + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; + + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: already_have.clone(), + count_needed, + }; + + let network_request = RequestType::ExecutionProofsByRoot( + request + .clone() + .into_request() + .map_err(RpcRequestSendError::InternalError)?, + ); + + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: network_request, + app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "ExecutionProofsByRoot", + ?block_root, + already_have_count = already_have.len(), + count_needed, + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + self.execution_proofs_by_root_requests.insert( + id, + peer_id, + // Don't expect max responses since peer might not have all the proofs we need + false, + ExecutionProofsByRootRequestItems::new(request), + Span::none(), + ); + + Ok(LookupRequestResult::RequestSent(id.req_id)) + } + /// Request to send a single `data_columns_by_root` request to the network. pub fn data_column_lookup_request( &mut self, @@ -1452,6 +1559,20 @@ impl SyncNetworkContext { self.on_rpc_response_result(id, "BlobsByRoot", resp, peer_id, |_| 1) } + pub(crate) fn on_single_execution_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + rpc_event: RpcEvent>, + ) -> Option>>> { + let resp = self + .execution_proofs_by_root_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(id, "ExecutionProofsByRoot", resp, peer_id, |proofs| { + proofs.len() + }) + } + #[allow(clippy::type_complexity)] pub(crate) fn on_data_columns_by_root_response( &mut self, @@ -1649,6 +1770,36 @@ impl SyncNetworkContext { }) } + pub fn send_execution_proofs_for_processing( + &self, + id: Id, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(?block_root, ?id, "Sending execution proofs for processing"); + // Lookup sync event safety: If `beacon_processor.send_rpc_execution_proofs` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type + beacon_processor + .send_rpc_execution_proofs( + block_root, + proofs, + seen_timestamp, + BlockProcessType::SingleExecutionProof { id }, + ) + .map_err(|e| { + error!( + error = ?e, + "Failed to send sync execution proofs to processor" + ); + SendErrorProcessor::SendError + }) + } + pub fn send_custody_columns_for_processing( &self, _id: Id, diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 3183c06d762..63249ed2a4b 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -5,7 +5,7 @@ use fnv::FnvHashMap; use lighthouse_network::PeerId; use strum::IntoStaticStr; use tracing::Span; -use types::{Hash256, Slot}; +use types::{ExecutionProofId, Hash256, Slot}; pub use blobs_by_range::BlobsByRangeRequestItems; pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; @@ -15,6 +15,9 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +pub use execution_proofs_by_root::{ + ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, +}; use crate::metrics; @@ -26,6 +29,7 @@ mod blocks_by_range; mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; +mod execution_proofs_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { @@ -34,8 +38,10 @@ pub enum LookupVerifyError { UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), UnrequestedSlot(Slot), + UnrequestedProof(ExecutionProofId), InvalidInclusionProof, DuplicatedData(Slot, u64), + DuplicatedProofIDs(ExecutionProofId), InternalError(String), } diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs new file mode 100644 index 00000000000..c726839a179 --- /dev/null +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -0,0 +1,512 @@ +use super::*; +use crate::sync::block_lookups::common::ResponseType; +use lighthouse_network::service::api_types::SyncRequestId; +use lighthouse_network::rpc::{RPCError, RpcErrorResponse}; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// Test successful execution proof fetch and verification +#[test] +fn test_proof_lookup_happy_path() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Get execution payload hash from the block + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger the unknown block (which should trigger proof request) + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + + // Expect block request + let block_id = rig.expect_block_lookup_request(block_root); + + // Send the block + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + // Now expect proof request + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send all requested proofs + // TODO(zkproofs): We should use min_required instead of hardcoding 2 proofs here + let proof_ids = vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ]; + rig.complete_single_lookup_proof_download(proof_id, peer_id, block_root, block_hash, proof_ids); + + // Proofs should be processed + rig.expect_block_process(ResponseType::ExecutionProof); + + // Block should be imported + rig.proof_component_processed_imported(block_root); + rig.expect_empty_network(); + rig.expect_no_active_lookups(); +} + +/// Test that empty proof response results in peer penalization +#[test] +fn test_proof_lookup_empty_response() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer sends stream terminator with no proofs + rig.single_lookup_proof_response(proof_id, peer_id, None); + + // Peer should be penalized for not providing proofs + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Should retry with different peer + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test partial proof response (peer doesn't have all requested proofs) +#[test] +fn test_proof_lookup_partial_response() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Requested 2 proofs but peer only sends 1 + let proof_0 = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0)); + rig.single_lookup_proof_response(proof_id, peer_id, None); // End stream early + + // Should penalize peer for not providing all requested proofs + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Should retry with another peer + let new_peer = rig.new_connected_peer(); + let retry_proof_id = rig.expect_proof_lookup_request(block_root); + + // Complete with all proofs + rig.complete_single_lookup_proof_download( + retry_proof_id, + new_peer, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} + +/// Test unrequested proof triggers penalization +#[test] +fn test_proof_lookup_unrequested_proof() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Requested proofs 0, 1 but peer sends proofs 5 (unrequested) + let unrequested_proof = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(5).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(unrequested_proof)); + + // Should penalize peer for sending unrequested data + rig.expect_penalty(peer_id, "UnrequestedProof"); + + // Should retry + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test duplicate proofs triggers penalization +#[test] +fn test_proof_lookup_duplicate_proof() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proof 0 twice + let proof_0_a = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + // TODO(zkproofs): In this case we have the same proofID but different proof_data + // zkVMs should be deterministic, so if this happens there is likely an issue somewhere + let proof_0_b = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![4, 5, 6], // Different data + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_a)); + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_b)); + + // Should penalize peer for duplicate proof + rig.expect_penalty(peer_id, "DuplicatedProof"); + + // Should retry + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test wrong block root in proof triggers penalization +#[test] +fn test_proof_lookup_wrong_block_root() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let wrong_root = Hash256::random(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proof with wrong block_root + let wrong_proof = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + wrong_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(wrong_proof)); + + // Should penalize peer + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); + + // Should retry + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test proof request timeout +#[test] +fn test_proof_lookup_timeout() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Simulate timeout by sending error + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::ErrorResponse( + RpcErrorResponse::ServerError, + "timeout".to_string(), + ), + }); + + // Should penalize peer for timeout + rig.expect_penalty(peer_id, "rpc_error"); + + // Should retry with different peer + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test peer disconnection during proof request +#[test] +fn test_proof_lookup_peer_disconnected() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer disconnects + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::Disconnected, + }); + + // Should retry with different peer (no penalty for disconnect) + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test multiple retries on failure +#[test] +fn test_proof_lookup_multiple_retries() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + // First attempt - empty response + let proof_id_1 = rig.expect_proof_lookup_request(block_root); + rig.single_lookup_proof_response(proof_id_1, peer_id, None); + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Second attempt - different peer, also fails + let peer_id_2 = rig.new_connected_peer(); + let proof_id_2 = rig.expect_proof_lookup_request(block_root); + rig.single_lookup_proof_response(proof_id_2, peer_id_2, None); + rig.expect_penalty(peer_id_2, "NotEnoughResponsesReturned"); + + // Third attempt - succeeds + let peer_id_3 = rig.new_connected_peer(); + let proof_id_3 = rig.expect_proof_lookup_request(block_root); + rig.complete_single_lookup_proof_download( + proof_id_3, + peer_id_3, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} + +/// Test proof lookup with no peers available +#[test] +fn test_proof_lookup_no_peers() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer fails and disconnects + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::Disconnected, + }); + + // Disconnect the peer + rig.peer_disconnected(peer_id); + + // Should not be able to find another peer immediately + // The lookup should remain active waiting for peers + assert_eq!(rig.active_single_lookups_count(), 1); +} + +/// Test successful proof verification after block already has blobs +#[test] +fn test_proof_lookup_with_existing_blobs() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + + // Get block + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.clone().into())); + rig.expect_block_process(ResponseType::Block); + + // Block might still be missing proofs even if blobs present + // Proofs are an additional requirement + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proofs + rig.complete_single_lookup_proof_download( + proof_id, + peer_id, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index fc641861754..4a6c5e60589 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -42,8 +42,8 @@ use tokio::sync::mpsc; use tracing::info; use types::{ BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, - ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, - data_column_sidecar::ColumnIndex, + ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkContext, ForkName, Hash256, + MinimalEthSpec as E, SignedBeaconBlock, Slot, data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; @@ -171,7 +171,7 @@ impl TestRig { self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob.into())); } - fn trigger_unknown_block_from_attestation(&mut self, block_root: Hash256, peer_id: PeerId) { + pub(super) fn trigger_unknown_block_from_attestation(&mut self, block_root: Hash256, peer_id: PeerId) { self.send_sync_message(SyncMessage::UnknownBlockHashFromAttestation( peer_id, block_root, )); @@ -184,7 +184,7 @@ impl TestRig { } } - fn rand_block(&mut self) -> SignedBeaconBlock { + pub(super) fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -228,7 +228,7 @@ impl TestRig { self.sync_manager.active_single_lookups() } - fn active_single_lookups_count(&self) -> usize { + pub(super) fn active_single_lookups_count(&self) -> usize { self.sync_manager.active_single_lookups().len() } @@ -321,7 +321,7 @@ impl TestRig { } #[track_caller] - fn expect_no_active_lookups(&self) { + pub(super) fn expect_no_active_lookups(&self) { self.expect_no_active_single_lookups(); } @@ -445,7 +445,7 @@ impl TestRig { }); } - fn single_lookup_block_response( + pub(super) fn single_lookup_block_response( &mut self, id: SingleLookupReqId, peer_id: PeerId, @@ -527,6 +527,74 @@ impl TestRig { ); } + /// Send a single execution proof response + pub(super) fn single_lookup_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + proof: Option>, + ) { + self.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::SingleExecutionProof { id }, + peer_id, + execution_proof: proof, + seen_timestamp: D, + }); + } + + /// Complete execution proof download by sending all requested proofs + pub(super) fn complete_single_lookup_proof_download( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + block_root: Hash256, + block_hash: ExecutionBlockHash, + subnet_ids: Vec, + ) { + for subnet_id in subnet_ids { + let proof = Arc::new( + ExecutionProof::new(subnet_id, types::Slot::new(0), block_hash, block_root, vec![1, 2, 3, 4]) + .unwrap(), + ); + self.single_lookup_proof_response(id, peer_id, Some(proof)); + } + // Send stream terminator + self.single_lookup_proof_response(id, peer_id, None); + } + + /// Expect an execution proof request for a specific block + pub(super) fn expect_proof_lookup_request(&mut self, block_root: Hash256) -> SingleLookupReqId { + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request, + app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), + .. + } => match request { + RequestType::ExecutionProofsByRoot(req) => { + if req.block_root == block_root { + Some(*id) + } else { + None + } + } + _ => None, + }, + _ => None, + }) + .unwrap_or_else(|_| panic!("Expected proof request for {block_root}")) + } + + /// Send a processing result indicating proofs were processed and block imported + pub(super) fn proof_component_processed_imported(&mut self, block_root: Hash256) { + let id = self.find_single_lookup_for(block_root); + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type: BlockProcessType::SingleBlock { id }, + result: BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported( + block_root, + )), + }); + } + fn complete_lookup_block_download(&mut self, block: SignedBeaconBlock) { let block_root = block.canonical_root(); let id = self.expect_block_lookup_request(block_root); @@ -786,7 +854,7 @@ impl TestRig { } #[track_caller] - fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + pub(super) fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { self.find_block_lookup_request(for_block) .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) } @@ -910,7 +978,7 @@ impl TestRig { } #[track_caller] - fn expect_block_process(&mut self, response_type: ResponseType) { + pub(super) fn expect_block_process(&mut self, response_type: ResponseType) { match response_type { ResponseType::Block => self .pop_received_processor_event(|ev| { @@ -927,6 +995,11 @@ impl TestRig { (ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn).then_some(()) }) .unwrap_or_else(|e| panic!("Expected column work event: {e}")), + ResponseType::ExecutionProof => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::RpcExecutionProofs).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected execution proofs work event: {e}")), } } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 23c14ff63ef..9b82f830bcb 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -18,6 +18,7 @@ use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use types::{ChainSpec, ForkName, MinimalEthSpec as E}; +mod execution_proof_tests; mod lookups; mod range; From 0e2a7400080c673b9b7ed7542c6d5eef181573d9 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 19:56:58 +0000 Subject: [PATCH 35/67] fix test --- .../src/execution_proof_verification.rs | 3 ++- beacon_node/beacon_chain/src/test_utils.rs | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/execution_proof_verification.rs b/beacon_node/beacon_chain/src/execution_proof_verification.rs index 79a22b7cc15..92272644d21 100644 --- a/beacon_node/beacon_chain/src/execution_proof_verification.rs +++ b/beacon_node/beacon_chain/src/execution_proof_verification.rs @@ -488,6 +488,7 @@ mod tests { .deterministic_keypairs(64) .fresh_ephemeral_store() .mock_execution_layer() + .zkvm_with_dummy_verifiers() .build(); harness.advance_slot(); @@ -556,7 +557,7 @@ mod tests { harness .chain .data_availability_checker - .put_execution_proofs(block_root, vec![proof.clone()]) + .put_rpc_execution_proofs(block_root, vec![proof.clone()]) .expect("Should put proof in DA cache"); // Verify it's in the cache diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1d575501563..0bf8fd479b4 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -211,6 +211,7 @@ pub struct Builder { validator_monitor_config: Option, genesis_state_builder: Option>, import_all_data_columns: bool, + zkvm_execution_layer_config: Option, runtime: TestRuntime, } @@ -357,6 +358,7 @@ where validator_monitor_config: None, genesis_state_builder: None, import_all_data_columns: false, + zkvm_execution_layer_config: None, runtime, } } @@ -530,6 +532,13 @@ where self } + /// Enable zkVM execution proof verification with dummy verifiers for testing. + pub fn zkvm_with_dummy_verifiers(mut self) -> Self { + self.zkvm_execution_layer_config = + Some(zkvm_execution_layer::ZKVMExecutionLayerConfig::default()); + self + } + pub fn with_genesis_state_builder( mut self, f: impl FnOnce(InteropGenesisBuilder) -> InteropGenesisBuilder, @@ -570,6 +579,12 @@ where .validator_monitor_config(validator_monitor_config) .rng(Box::new(StdRng::seed_from_u64(42))); + builder = if let Some(zkvm_config) = self.zkvm_execution_layer_config { + builder.zkvm_execution_layer_config(Some(zkvm_config)) + } else { + builder + }; + builder = if let Some(mutator) = self.initial_mutator { mutator(builder) } else { From ae89e1fd48debd8b8fa88fd3232923788be846ab Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 19:59:01 +0000 Subject: [PATCH 36/67] rename --- zkvm_execution_layer/src/engine_api.rs | 6 +++--- zkvm_execution_layer/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs index fced776da0f..89c98d2352e 100644 --- a/zkvm_execution_layer/src/engine_api.rs +++ b/zkvm_execution_layer/src/engine_api.rs @@ -3,17 +3,17 @@ use types::{EthSpec, ExecutionBlockHash, ExecPayload}; type PayloadId = [u8; 8]; -pub struct ZkVmEngineApi { +pub struct ZKVMEngineApi { _phantom: std::marker::PhantomData, } -impl Default for ZkVmEngineApi { +impl Default for ZKVMEngineApi { fn default() -> Self { Self::new() } } -impl ZkVmEngineApi { +impl ZKVMEngineApi { pub fn new() -> Self { Self { _phantom: std::marker::PhantomData, diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 2633158da6b..36353288d67 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -14,7 +14,7 @@ pub mod dummy_proof_verifier; pub mod engine_api; /// Re-export the main ZK-VM engine API and config -pub use engine_api::ZkVmEngineApi; +pub use engine_api::ZKVMEngineApi; pub use config::ZKVMExecutionLayerConfig; pub use registry_proof_gen::GeneratorRegistry; From f2c7f1f35d2a5de0a1d82a0cc3dc791483e6fc63 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 20:05:34 +0000 Subject: [PATCH 37/67] commit top level configurations and wiring --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/builder.rs | 75 +++++++++++++++++++++++ beacon_node/lighthouse_tracing/src/lib.rs | 2 + beacon_node/src/cli.rs | 39 +++++------- beacon_node/src/config.rs | 42 +++---------- 5 files changed, 103 insertions(+), 56 deletions(-) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 6b8b79c8efb..c5de5a4f839 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -16,6 +16,7 @@ ethereum_ssz = { workspace = true } execution_layer = { workspace = true } # TODO(zkproofs): add as a workspace dependency zkvm_execution_layer = { path = "../../zkvm_execution_layer" } +proof_generation_service = { path = "../proof_generation_service" } futures = { workspace = true } genesis = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index de185547a01..0797994bb8f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -9,6 +9,7 @@ use beacon_chain::data_availability_checker::start_availability_cache_maintenanc use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; +use beacon_chain::ProofGenerationEvent; use beacon_chain::{ BeaconChain, BeaconChainTypes, MigratorConfig, ServerSentEventHandler, builder::{BeaconChainBuilder, Witness}, @@ -20,6 +21,8 @@ use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; +use proof_generation_service; +use zkvm_execution_layer; use eth2::{ BeaconNodeHttpClient, Error as ApiError, Timeouts, types::{BlockId, StateId}, @@ -88,6 +91,7 @@ pub struct ClientBuilder { beacon_processor_config: Option, beacon_processor_channels: Option>, light_client_server_rv: Option>>, + proof_generation_rx: Option>>, eth_spec_instance: T::EthSpec, } @@ -122,6 +126,7 @@ where beacon_processor_config: None, beacon_processor_channels: None, light_client_server_rv: None, + proof_generation_rx: None, } } @@ -191,6 +196,18 @@ where Kzg::new_from_trusted_setup_no_precomp(&config.trusted_setup).map_err(kzg_err_msg)? }; + // Modify spec if zkvm mode is enabled via CLI + let spec = if let Some(zkvm_config) = &config.zkvm_execution_layer { + let mut modified_spec = (*spec).clone(); + + modified_spec.zkvm_enabled = true; + modified_spec.zkvm_min_proofs_required = zkvm_config.min_proofs_required; + + Arc::new(modified_spec) + } else { + spec + }; + let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) .store(store) .task_executor(context.executor.clone()) @@ -226,6 +243,44 @@ where builder }; + // Set up proof generation service if zkVM is configured with generation proof types + let builder = if let Some(ref zkvm_config) = config.zkvm_execution_layer { + if !zkvm_config.generation_proof_types.is_empty() { + // Validate that proof generation requires an execution layer + // Proof-generating nodes will validate blocks via EL execution, not proofs + if config.execution_layer.is_none() { + return Err( + "Proof generation requires an EL. \ + Nodes generating proofs must validate blocks via an execution layer. \ + To run a lightweight verifier node (without EL), omit --zkvm-generation-proof-types." + .into(), + ); + } + + // Create channel for proof generation events + let (proof_gen_tx, proof_gen_rx) = + tokio::sync::mpsc::unbounded_channel::>(); + + // Create generator registry with enabled proof types + let registry = Arc::new( + zkvm_execution_layer::GeneratorRegistry::new_with_dummy_generators( + zkvm_config.generation_proof_types.clone(), + ), + ); + + // Store receiver for later when we spawn the service + self.proof_generation_rx = Some(proof_gen_rx); + + builder + .zkvm_generator_registry(registry) + .proof_generation_tx(proof_gen_tx) + } else { + builder + } + } else { + builder + }; + let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false); // If the client is expect to resume but there's no beacon chain in the database, @@ -778,6 +833,26 @@ where beacon_chain.task_executor.clone(), beacon_chain.clone(), ); + + // Start proof generation service if configured + if let Some(proof_gen_rx) = self.proof_generation_rx { + let network_tx = self + .network_senders + .as_ref() + .ok_or("proof_generation_service requires network_senders")? + .network_send(); + + let service = proof_generation_service::ProofGenerationService::new( + beacon_chain.clone(), + proof_gen_rx, + network_tx, + ); + + runtime_context.executor.spawn( + async move { service.run().await }, + "proof_generation_service", + ); + } } Ok(Client { diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index 18a9874252a..7b156445db7 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -37,6 +37,7 @@ pub const SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST: &str = "handle_blobs_by_range_requ pub const SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST: &str = "handle_data_columns_by_range_request"; pub const SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST: &str = "handle_blocks_by_root_request"; pub const SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST: &str = "handle_blobs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = "handle_execution_proofs_by_root_request"; pub const SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST: &str = "handle_data_columns_by_root_request"; pub const SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE: &str = "handle_light_client_updates_by_range"; pub const SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP: &str = "handle_light_client_bootstrap"; @@ -68,6 +69,7 @@ pub const LH_BN_ROOT_SPAN_NAMES: &[&str] = &[ SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 0e641aee4b7..6f55a01a128 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -923,33 +923,24 @@ pub fn cli_app() -> Command { ) /* ZK-VM Execution Layer settings */ .arg( - Arg::new("zkvm-min-proofs") - .long("zkvm-min-proofs") - .value_name("NUM") - .help("Minimum number of execution proofs required from different subnets \ - before marking an execution payload as valid in ZK-VM mode. \ - When set, enables ZK-VM execution proof verification.") - .value_parser(clap::value_parser!(usize)) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("zkvm-subscribed-subnets") - .long("zkvm-subscribed-subnets") - .value_name("SUBNET_IDS") - .help("Comma-separated list of execution proof subnet IDs to subscribe to \ - (e.g., '0,1,2'). Required when --zkvm-min-proofs is set.") - .requires("zkvm-min-proofs") - .action(ArgAction::Set) + Arg::new("activate-zkvm") + .long("activate-zkvm") + .help("Activates ZKVM execution proof mode. Enables the node to subscribe to the \ + execution_proof gossip topic, receive and verify execution proofs from peers, \ + and advertise zkVM support in its ENR for peer discovery. \ + Use --zkvm-generation-proof-types to specify which proof types this node \ + should generate (optional - nodes can verify without generating).") + .action(ArgAction::SetTrue) .display_order(0) ) .arg( - Arg::new("zkvm-generation-subnets") - .long("zkvm-generation-subnets") - .value_name("SUBNET_IDS") - .help("Comma-separated list of execution proof subnet IDs to generate proofs for \ - (e.g., '0,1'). Must be a subset of --zkvm-subscribed-subnets.") - .requires("zkvm-subscribed-subnets") + Arg::new("zkvm-generation-proof-types") + .long("zkvm-generation-proof-types") + .value_name("PROOF_TYPE_IDS") + .help("Comma-separated list of proof type IDs to generate \ + (e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth). \ + Optional - nodes can verify proofs without generating them.") + .requires("activate-zkvm") .action(ArgAction::Set) .display_order(0) ) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f99072697ad..aaed70595b1 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -28,7 +28,7 @@ use std::str::FromStr; use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; -use types::{Checkpoint, Epoch, EthSpec, ExecutionProofSubnetId, Hash256, PublicKeyBytes}; +use types::{Checkpoint, Epoch, EthSpec, ExecutionProofId, Hash256, PublicKeyBytes}; use zkvm_execution_layer::ZKVMExecutionLayerConfig; const PURGE_DB_CONFIRMATION: &str = "confirm"; @@ -325,36 +325,17 @@ pub fn get_config( client_config.execution_layer = Some(el_config); // Parse ZK-VM execution layer config if provided - if let Some(min_proofs) = clap_utils::parse_optional::(cli_args, "zkvm-min-proofs")? { - // Parse subscribed subnets (required when min-proofs is set) - let subscribed_subnets_str: String = - clap_utils::parse_required(cli_args, "zkvm-subscribed-subnets")?; - - let subscribed_subnets = subscribed_subnets_str - .split(',') - .map(|s| s.trim().parse::()) - .collect::, _>>() - .map_err(|e| format!("Invalid subnet ID in --zkvm-subscribed-subnets: {}", e))? - .into_iter() - .map(|id| ExecutionProofSubnetId::new(id)) - .collect::, _>>() - .map_err(|e| format!("Invalid subnet ID: {}", e))?; - - // Parse proof generation subnets (optional) - // - // TODO(zkproofs): Since min-proofs required means no EL is required, and we can only set - // proof-gen here, then it means that even a proof generating validator will not directly have a - // EL attached, so they need to call out to a different node for making EL proofs. This sounds safer. - let generation_subnets = if let Some(gen_subnets_str) = - clap_utils::parse_optional::(cli_args, "zkvm-generation-subnets")? + if cli_args.get_flag("activate-zkvm") { + let generation_proof_types = if let Some(gen_types_str) = + clap_utils::parse_optional::(cli_args, "zkvm-generation-proof-types")? { - gen_subnets_str + gen_types_str .split(',') .map(|s| s.trim().parse::()) .collect::, _>>() - .map_err(|e| format!("Invalid subnet ID in --zkvm-generation-subnets: {}", e))? + .map_err(|e| format!("Invalid proof type ID in --zkvm-generation-proof-types: {}", e))? .into_iter() - .map(|id| ExecutionProofSubnetId::new(id)) + .map(|id| ExecutionProofId::new(id)) .collect::, _>>() .map_err(|e| format!("Invalid subnet ID: {}", e))? } else { @@ -363,22 +344,19 @@ pub fn get_config( // Build and validate the config let zkvm_config = ZKVMExecutionLayerConfig::builder() - .subscribed_subnets(subscribed_subnets) - .min_proofs_required(min_proofs) - .generation_subnets(generation_subnets) + .generation_proof_types(generation_proof_types) .build() .map_err(|e| format!("Invalid ZK-VM configuration: {}", e))?; client_config.zkvm_execution_layer = Some(zkvm_config); info!( - "ZK-VM mode enabled with min_proofs_required={}, subscribed_subnets={:?}", - min_proofs, + "ZKVM mode activated with generation_proof_types={:?}", client_config .zkvm_execution_layer .as_ref() .unwrap() - .subscribed_subnets + .generation_proof_types ); } From e32c6f6eb683b449bfeb482524e79d1f9eb35553 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 20:05:48 +0000 Subject: [PATCH 38/67] nit --- consensus/types/src/chain_spec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7e2b2ff625e..7a4ea7cdb6a 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -2042,7 +2042,7 @@ const fn default_min_epochs_for_blob_sidecars_requests() -> u64 { } const fn default_min_epochs_for_execution_proof_requests() -> u64 { - // TODO(zkproofs): add into specs with rational + // TODO(zkproofs): add into consensus-specs with rational 2 } From e7e535b405e1a3b4b2ed55fa02ae13c79c1752d7 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 20:08:33 +0000 Subject: [PATCH 39/67] commit min and max ssz sizes for execution_proof --- .../proof_generation_service/src/lib.rs | 4 +-- consensus/types/src/execution_proof.rs | 36 +++++++++++++++++-- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs index e674b63d830..1b020cedb17 100644 --- a/beacon_node/proof_generation_service/src/lib.rs +++ b/beacon_node/proof_generation_service/src/lib.rs @@ -220,9 +220,7 @@ impl ProofGenerationService { // 3. We already have the block, so we don't need the proof for ourselves // Publish the proof to the network - let pubsub_message = PubsubMessage::ExecutionProof( - Box::new((proof_id, Arc::new(proof))) - ); + let pubsub_message = PubsubMessage::ExecutionProof(Arc::new(proof)); let network_message = NetworkMessage::Publish { messages: vec![pubsub_message], diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs index eca33f63685..3b4e193d20a 100644 --- a/consensus/types/src/execution_proof.rs +++ b/consensus/types/src/execution_proof.rs @@ -1,6 +1,7 @@ use crate::{ExecutionBlockHash, Hash256, Slot, VariableList}; use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; +use ssz::Encode; +use ssz_derive::{Decode, Encode as DeriveEncode}; use ssz_types::typenum; use std::fmt::{self, Debug}; use tree_hash_derive::TreeHash; @@ -37,7 +38,7 @@ type ProofData = VariableList; /// Each proof is associated with a specific proof_id, which identifies the /// zkVM and EL combination used to generate it. Multiple proofs from different /// proof IDs can exist for the same execution payload, providing both zkVM and EL diversity. -#[derive(Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq, Eq)] +#[derive(Clone, Serialize, Deserialize, DeriveEncode, Decode, TreeHash, PartialEq, Eq)] pub struct ExecutionProof { /// Which proof type (zkVM+EL combination) this proof belongs to /// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc. @@ -101,6 +102,35 @@ impl ExecutionProof { pub fn proof_id(&self) -> ExecutionProofId { self.proof_id } + + /// Minimum size of an ExecutionProof in SSZ bytes (with empty proof_data) + /// TODO(zkproofs): If the proof_data is empty, then that is an invalid proof + pub fn min_size() -> usize { + use bls::FixedBytesExtended; + Self { + proof_id: ExecutionProofId::new(0).unwrap(), + slot: Slot::new(0), + block_hash: ExecutionBlockHash::zero(), + block_root: Hash256::zero(), + proof_data: ProofData::new(vec![]).unwrap(), + } + .as_ssz_bytes() + .len() + } + + /// Maximum size of an ExecutionProof in SSZ bytes (with max proof_data) + pub fn max_size() -> usize { + use bls::FixedBytesExtended; + Self { + proof_id: ExecutionProofId::new(0).unwrap(), + slot: Slot::new(0), + block_hash: ExecutionBlockHash::zero(), + block_root: Hash256::zero(), + proof_data: ProofData::new(vec![0u8; MAX_PROOF_DATA_BYTES]).unwrap(), + } + .as_ssz_bytes() + .len() + } } impl Debug for ExecutionProof { @@ -130,7 +160,7 @@ mod tests { let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); assert!(result.is_err()); - assert!(result.unwrap_err().contains("Proof data too large")); + assert!(result.unwrap_err().contains("Failed to create proof data")); } #[test] From 6f5d3f123a1f30dd4f62ab9e7cec479ee4abb3e5 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 20:09:57 +0000 Subject: [PATCH 40/67] add .yml config --- ...network_params_mixed_proof_gen_verify.yaml | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml diff --git a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml new file mode 100644 index 00000000000..edc59f78e5c --- /dev/null +++ b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml @@ -0,0 +1,32 @@ +# 3 nodes generate proofs, 1 node only verifies +participants: + # Proof generating nodes (nodes 1-3) + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --zkvm-generation-proof-types=0,1 + - --target-peers=3 + count: 3 + # Proof verifying only node (node 4) + # TODO(zkproofs): Currently there is no way to add no client here + # We likely want to use our dummy zkvm EL here + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --target-peers=3 + count: 1 +network_params: + electra_fork_epoch: 0 + fulu_fork_epoch: 1 + seconds_per_slot: 2 +global_log_level: debug +snooper_enabled: false +additional_services: + - dora + - prometheus_grafana From 0a8eb903bcb06246014a45a97430fb75f81f1ba0 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 20:21:23 +0000 Subject: [PATCH 41/67] cargo fmt --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++-- beacon_node/beacon_chain/src/builder.rs | 9 +- .../src/data_availability_checker.rs | 37 ++++---- .../overflow_lru_cache.rs | 4 +- .../src/execution_proof_verification.rs | 84 ++++++++++--------- beacon_node/beacon_chain/src/lib.rs | 2 +- .../src/observed_execution_proofs.rs | 28 ++++--- beacon_node/beacon_processor/src/lib.rs | 7 +- beacon_node/client/src/builder.rs | 9 +- .../lighthouse_network/src/discovery/mod.rs | 4 +- .../src/peer_manager/config.rs | 2 +- .../src/peer_manager/mod.rs | 2 +- .../src/peer_manager/peerdb/peer_info.rs | 2 +- .../lighthouse_network/src/rpc/codec.rs | 22 ++--- .../lighthouse_network/src/rpc/methods.rs | 11 ++- .../lighthouse_network/src/rpc/protocol.rs | 10 +-- .../src/rpc/rate_limiter.rs | 5 +- .../lighthouse_network/src/service/mod.rs | 12 ++- .../lighthouse_network/src/types/subnet.rs | 2 +- .../lighthouse_network/src/types/topics.rs | 4 +- .../lighthouse_network/tests/rpc_tests.rs | 28 +++---- beacon_node/lighthouse_tracing/src/lib.rs | 3 +- .../gossip_methods.rs | 28 +++++-- .../network_beacon_processor/rpc_methods.rs | 15 ++-- beacon_node/network/src/router.rs | 26 +++--- .../network/src/sync/block_lookups/common.rs | 14 +++- .../network/src/sync/block_lookups/mod.rs | 4 +- .../sync/block_lookups/single_block_lookup.rs | 26 +++--- beacon_node/network/src/sync/manager.rs | 6 +- .../requests/execution_proofs_by_root.rs | 10 +-- .../src/sync/tests/execution_proof_tests.rs | 7 +- beacon_node/network/src/sync/tests/lookups.rs | 23 +++-- .../proof_generation_service/src/lib.rs | 51 +++++++---- beacon_node/src/config.rs | 7 +- consensus/types/src/chain_spec.rs | 6 +- consensus/types/src/execution_proof.rs | 3 +- consensus/types/src/execution_proof_id.rs | 4 +- zkvm_execution_layer/src/config.rs | 7 +- zkvm_execution_layer/src/dummy_proof_gen.rs | 10 ++- zkvm_execution_layer/src/engine_api.rs | 4 +- zkvm_execution_layer/src/lib.rs | 2 +- .../src/proof_verification.rs | 2 +- 42 files changed, 310 insertions(+), 247 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 669e55cba6b..a5033bb469a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -27,13 +27,13 @@ use crate::data_availability_checker::{ DataAvailabilityChecker, DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; -use crate::execution_proof_verification::{ - GossipExecutionProofError, GossipVerifiedExecutionProof, -}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; +use crate::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; @@ -133,12 +133,12 @@ use task_executor::{RayonPoolType, ShutdownReason, TaskExecutor}; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::Stream; use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; -use zkvm_execution_layer::GeneratorRegistry; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; use types::data_column_sidecar::ColumnIndex; use types::payload::BlockProductionVersion; use types::*; +use zkvm_execution_layer::GeneratorRegistry; pub type ForkChoiceError = fork_choice::Error; @@ -3085,11 +3085,8 @@ impl BeaconChain { return Err(BlockError::DuplicateFullyImported(block_root)); } - self.check_gossip_execution_proof_availability_and_import( - execution_proof, - publish_fn, - ) - .await + self.check_gossip_execution_proof_availability_and_import(execution_proof, publish_fn) + .await } /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index a0074f61f4d..522cf18c63a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -718,10 +718,7 @@ where } /// Sets a `Sender` to notify the proof generation service of new blocks. - pub fn proof_generation_tx( - mut self, - sender: UnboundedSender>, - ) -> Self { + pub fn proof_generation_tx(mut self, sender: UnboundedSender>) -> Self { self.proof_generation_tx = Some(sender); self } @@ -996,8 +993,8 @@ where }; debug!(?custody_context, "Loaded persisted custody context"); - let has_execution_layer_and_proof_gen = self.execution_layer.is_some() - && self.zkvm_generator_registry.is_some(); + let has_execution_layer_and_proof_gen = + self.execution_layer.is_some() && self.zkvm_generator_registry.is_some(); let beacon_chain = BeaconChain { spec: self.spec.clone(), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index d92874fc335..80bfc5e9e96 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -211,10 +211,7 @@ impl DataAvailabilityChecker { /// Get all execution proofs we have for a block. /// Used when responding to RPC requests. - pub fn get_execution_proofs( - &self, - block_root: &Hash256, - ) -> Option>> { + pub fn get_execution_proofs(&self, block_root: &Hash256) -> Option>> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| { @@ -289,17 +286,16 @@ impl DataAvailabilityChecker { // If the chain spec enables zkVM, the node must have --activate-zkvm flag set. return Err(AvailabilityCheckError::ProofVerificationError( "Node is receiving execution proofs but zkVM verification is not enabled. \ - Use --activate-zkvm flag to enable proof verification.".to_string() + Use --activate-zkvm flag to enable proof verification." + .to_string(), )); }; let subnet_id = proof.proof_id; - let verifier = verifier_registry - .get_verifier(subnet_id) - .ok_or_else(|| { - warn!(?subnet_id, "No verifier registered for subnet"); - AvailabilityCheckError::UnsupportedProofID(subnet_id) - })?; + let verifier = verifier_registry.get_verifier(subnet_id).ok_or_else(|| { + warn!(?subnet_id, "No verifier registered for subnet"); + AvailabilityCheckError::UnsupportedProofID(subnet_id) + })?; verifier.verify(proof).map_err(|e| { AvailabilityCheckError::ProofVerificationError(format!( @@ -418,7 +414,10 @@ impl DataAvailabilityChecker { components.and_then(|c| c.block.as_ref().and_then(|b| b.execution_payload_hash())) }) .ok_or_else(|| { - warn!(?block_root, "Cannot verify proofs: block not in cache or has no execution payload"); + warn!( + ?block_root, + "Cannot verify proofs: block not in cache or has no execution payload" + ); AvailabilityCheckError::MissingExecutionPayload })?; @@ -447,12 +446,10 @@ impl DataAvailabilityChecker { }); } - let verifier = verifier_registry - .get_verifier(proof_id) - .ok_or_else(|| { - warn!(?proof_id, "No verifier registered for proof ID"); - AvailabilityCheckError::UnsupportedProofID(proof_id) - })?; + let verifier = verifier_registry.get_verifier(proof_id).ok_or_else(|| { + warn!(?proof_id, "No verifier registered for proof ID"); + AvailabilityCheckError::UnsupportedProofID(proof_id) + })?; // Verify the proof (proof contains block_hash internally) match verifier.verify(&proof) { @@ -810,9 +807,9 @@ impl DataAvailabilityChecker { /// /// Note: This follows the same pattern as blob retention: proofs are required starting from /// the zkvm_fork epoch, but only retained for a configured number of epochs. - /// + /// /// TODO(zkproofs): We don't store proofs forever and we also don't store - /// blobs forever, perhaps we should because when the blob disappears, we may not + /// blobs forever, perhaps we should because when the blob disappears, we may not /// be able to remake the proof when we put blobs in blocks. /// We don't for now because proofs are quite large at the moment. /// diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 33963661a20..ee6d8372953 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -355,8 +355,8 @@ impl PendingComponents { // Nodes that have EL but DON'T generate proofs are lightweight verifiers and wait for proofs. // TODO(zkproofs): This is a technicality mainly because we cannot remove the EL on kurtosis // ie each CL is coupled with an EL - let needs_execution_proofs = spec.zkvm_min_proofs_required().is_some() - && !has_execution_layer_and_proof_gen; + let needs_execution_proofs = + spec.zkvm_min_proofs_required().is_some() && !has_execution_layer_and_proof_gen; if needs_execution_proofs { let min_proofs = spec.zkvm_min_proofs_required().unwrap(); diff --git a/beacon_node/beacon_chain/src/execution_proof_verification.rs b/beacon_node/beacon_chain/src/execution_proof_verification.rs index 92272644d21..f20d0494dda 100644 --- a/beacon_node/beacon_chain/src/execution_proof_verification.rs +++ b/beacon_node/beacon_chain/src/execution_proof_verification.rs @@ -108,7 +108,9 @@ pub struct GossipVerifiedExecutionProof, } -impl std::fmt::Debug for GossipVerifiedExecutionProof { +impl std::fmt::Debug + for GossipVerifiedExecutionProof +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("GossipVerifiedExecutionProof") .field("block_root", &self.block_root) @@ -327,7 +329,7 @@ fn verify_slot_greater_than_latest_finalized_slot( } /// Verify the zkVM proof. -/// +/// /// Note: This is expensive fn verify_zkvm_proof( execution_proof: &ExecutionProof, @@ -395,10 +397,8 @@ mod tests { let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); let proof = create_test_execution_proof(proof_id, future_slot, Hash256::random()); - let result = validate_execution_proof_for_gossip::<_, Observe>( - Arc::new(proof), - &harness.chain, - ); + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); assert!(matches!( result.err(), @@ -420,22 +420,25 @@ mod tests { harness.advance_slot(); // Advance chain to create finalized slot - harness.extend_chain( - 32, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ).await; - - let finalized_slot = harness.finalized_checkpoint().epoch.start_slot(E::slots_per_epoch()); + harness + .extend_chain( + 32, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let finalized_slot = harness + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); // Create proof for slot before finalized let old_slot = finalized_slot.saturating_sub(1u64); let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); let proof = create_test_execution_proof(proof_id, old_slot, Hash256::random()); - let result = validate_execution_proof_for_gossip::<_, Observe>( - Arc::new(proof), - &harness.chain, - ); + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); assert!(matches!( result.err(), @@ -461,10 +464,8 @@ mod tests { let block_root = harness.chain.head_beacon_block_root(); let proof = create_test_execution_proof(proof_id, current_slot, block_root); - let result = validate_execution_proof_for_gossip::<_, Observe>( - Arc::new(proof), - &harness.chain, - ); + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); match result { Ok(_) => {} @@ -495,19 +496,19 @@ mod tests { let current_slot = harness.get_current_slot(); let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); let block_root = Hash256::random(); - let proof = Arc::new(create_test_execution_proof(proof_id, current_slot, block_root)); + let proof = Arc::new(create_test_execution_proof( + proof_id, + current_slot, + block_root, + )); - let result1 = validate_execution_proof_for_gossip::<_, Observe>( - proof.clone(), - &harness.chain, - ); + let result1 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); assert!(result1.is_ok()); // Should now be rejected as duplicate - let result2 = validate_execution_proof_for_gossip::<_, Observe>( - proof.clone(), - &harness.chain, - ); + let result2 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); assert!( matches!( @@ -551,7 +552,11 @@ mod tests { let current_slot = harness.get_current_slot(); let block_root = Hash256::random(); - let proof = Arc::new(create_test_execution_proof(subnet_id, current_slot, block_root)); + let proof = Arc::new(create_test_execution_proof( + subnet_id, + current_slot, + block_root, + )); // Put the proof directly into the DA checker cache (this can happen if it arritves via RPC) harness @@ -581,10 +586,8 @@ mod tests { ); // Now it arrives via gossip - let result = validate_execution_proof_for_gossip::<_, Observe>( - proof.clone(), - &harness.chain, - ); + let result = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); // Should be rejected with PriorKnownUnpublished (safe to propagate) assert!( @@ -608,13 +611,14 @@ mod tests { ); // Second gossip attempt should be rejected as PriorKnown (not PriorKnownUnpublished) - let result2 = validate_execution_proof_for_gossip::<_, Observe>( - proof.clone(), - &harness.chain, - ); + let result2 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); assert!( - matches!(result2.err(), Some(GossipExecutionProofError::PriorKnown { .. })), + matches!( + result2.err(), + Some(GossipExecutionProofError::PriorKnown { .. }) + ), "Second gossip should be rejected as PriorKnown (already observed)" ); } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7b7a3d0bb8f..686b56e63eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -20,11 +20,11 @@ pub mod chain_config; pub mod custody_context; pub mod data_availability_checker; pub mod data_column_verification; -pub mod execution_proof_verification; mod early_attester_cache; mod errors; pub mod events; pub mod execution_payload; +pub mod execution_proof_verification; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; diff --git a/beacon_node/beacon_chain/src/observed_execution_proofs.rs b/beacon_node/beacon_chain/src/observed_execution_proofs.rs index 154d7f32150..3851cfa261d 100644 --- a/beacon_node/beacon_chain/src/observed_execution_proofs.rs +++ b/beacon_node/beacon_chain/src/observed_execution_proofs.rs @@ -4,7 +4,7 @@ //! This cache prevents DoS attacks where an attacker repeatedly gossips the same execution proof, //! forcing expensive zkVM verification operations. Only proofs that have passed basic gossip //! validation and proof verification should be added to this cache. -//! +//! //! TODO(zkproofs): we want the proofs to be signed and then we can just add them to the cache //! once the signature has been verified like `observed_data_sidecars` @@ -68,7 +68,7 @@ impl ObservedExecutionProofs { /// Returns `true` if the proof was already observed (duplicate), `false` if it's new. /// /// Returns an error if the proof's slot is at or below the finalized slot. - /// Note: This shouldn't happen because it means we've received a proof for + /// Note: This shouldn't happen because it means we've received a proof for /// a finalized block pub fn observe_proof( &mut self, @@ -341,19 +341,25 @@ mod tests { let proof_id = ExecutionProofId::new(0).unwrap(); // Slot 50 should be rejected (finalized) - assert!(cache - .is_known(Slot::new(50), Hash256::from_low_u64_be(50), proof_id) - .is_err()); + assert!( + cache + .is_known(Slot::new(50), Hash256::from_low_u64_be(50), proof_id) + .is_err() + ); // Slot 51 should still be present (> finalized) - assert!(cache - .is_known(Slot::new(51), Hash256::from_low_u64_be(51), proof_id) - .unwrap()); + assert!( + cache + .is_known(Slot::new(51), Hash256::from_low_u64_be(51), proof_id) + .unwrap() + ); // Slot 52 should still be present - assert!(cache - .is_known(Slot::new(52), Hash256::from_low_u64_be(52), proof_id) - .unwrap()); + assert!( + cache + .is_known(Slot::new(52), Hash256::from_low_u64_be(52), proof_id) + .unwrap() + ); } #[test] diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 3ae37af529a..7d21d4ec641 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -886,7 +886,8 @@ impl BeaconProcessor { let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let mut gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); - let mut gossip_execution_proof_queue = FifoQueue::new(queue_lengths.gossip_execution_proof_queue); + let mut gossip_execution_proof_queue = + FifoQueue::new(queue_lengths.gossip_execution_proof_queue); let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); @@ -1406,7 +1407,9 @@ impl BeaconProcessor { gossip_bls_to_execution_change_queue.push(work, work_id) } Work::BlobsByRootsRequest { .. } => blbroots_queue.push(work, work_id), - Work::ExecutionProofsByRootsRequest { .. } => blbroots_queue.push(work, work_id), + Work::ExecutionProofsByRootsRequest { .. } => { + blbroots_queue.push(work, work_id) + } Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c324be30a3f..b5fd4e6621e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -4,12 +4,12 @@ use crate::compute_light_client_updates::{ }; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; +use beacon_chain::ProofGenerationEvent; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::ProofGenerationEvent; use beacon_chain::{ BeaconChain, BeaconChainTypes, MigratorConfig, ServerSentEventHandler, builder::{BeaconChainBuilder, Witness}, @@ -21,8 +21,6 @@ use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; -use proof_generation_service; -use zkvm_execution_layer; use eth2::{ BeaconNodeHttpClient, Error as ApiError, Timeouts, types::{BlockId, StateId}, @@ -34,6 +32,7 @@ use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use lighthouse_network::{NetworkGlobals, prometheus_client::registry::Registry}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; +use proof_generation_service; use rand::SeedableRng; use rand::rngs::{OsRng, StdRng}; use slasher::Slasher; @@ -50,6 +49,7 @@ use types::{ BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, }; +use zkvm_execution_layer; /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; @@ -91,7 +91,8 @@ pub struct ClientBuilder { beacon_processor_config: Option, beacon_processor_channels: Option>, light_client_server_rv: Option>>, - proof_generation_rx: Option>>, + proof_generation_rx: + Option>>, eth_spec_instance: T::EthSpec, } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index f5662ca0a34..d89c953ae1e 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -16,8 +16,8 @@ use network_utils::enr_ext::{CombinedKeyExt, EnrExt, peer_id_to_node_id}; use alloy_rlp::bytes::Bytes; use enr::{ - ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, - SYNC_COMMITTEE_BITFIELD_ENR_KEY, ZKVM_ENABLED_ENR_KEY, + ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY, + ZKVM_ENABLED_ENR_KEY, }; use futures::prelude::*; use futures::stream::FuturesUnordered; diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index fc77171cee7..57a5fa68a23 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -21,7 +21,7 @@ pub struct Config { /// Whether quic is enabled. pub quic_enabled: bool, /// Whether execution proofs are enabled. - pub execution_proof_enabled : bool, + pub execution_proof_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ddcf27b35db..453224cbe22 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1021,7 +1021,7 @@ impl PeerManager { // Check if we have enough zkVM-enabled peers // Count peers subscribed to the execution_proof gossip topic // TODO(zkproofs): Note that since peers do not advertise whether - // they are proof generating, we cannot favour them. This is + // they are proof generating, we cannot favour them. This is // fine for optional proofs and mandatory proofs will imply // that the builder who is well connected will propagate it // to most of the network. diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index f47a34f069b..483da11be0b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -282,7 +282,7 @@ impl PeerInfo { // Check if the peer has zkVM enabled (execution proof support) if let Some(enr) = self.enr.as_ref() { - return enr.zkvm_enabled() + return enr.zkvm_enabled(); } false diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index a603b774d23..34c42fdd041 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -17,12 +17,11 @@ use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, - ExecutionProof, ForkContext, ForkName, Hash256, - LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, - SignedBeaconBlockGloas, + ExecutionProof, ForkContext, ForkName, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, }; use unsigned_varint::codec::Uvi; @@ -575,8 +574,9 @@ fn handle_rpc_request( let request = ExecutionProofsByRootRequest::from_ssz_bytes(decoded_buffer) .map_err(RPCError::SSZDecodeError)?; - request.validate(spec) - .map_err(|e| RPCError::InvalidData(e))?; + request + .validate(spec) + .map_err(RPCError::InvalidData)?; Ok(Some(RequestType::ExecutionProofsByRoot(request))) } @@ -927,9 +927,9 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, - FixedBytesExtended, FullPayload, KzgCommitment, KzgProof, Signature, - SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, FixedBytesExtended, + FullPayload, KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, + blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index b165f06f768..b297ce8f08f 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -17,8 +17,8 @@ use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ExecutionProof, ExecutionProofId, ForkContext, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, - RuntimeVariableList, SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, + SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; /// Maximum length of error message. @@ -839,7 +839,10 @@ impl RpcSuccessResponse { Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), // TODO(zkproofs): Change this when we add Slot to ExecutionProof - Self::ExecutionProofsByRoot(_) | Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + Self::ExecutionProofsByRoot(_) + | Self::MetaData(_) + | Self::Status(_) + | Self::Pong(_) => None, } } } @@ -1023,4 +1026,4 @@ impl std::fmt::Display for ExecutionProofsByRootRequest { self.count_needed ) } -} \ No newline at end of file +} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 108b73f326d..0428f8787a3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -416,7 +416,10 @@ impl SupportedProtocol { ]); } if fork_context.spec.is_zkvm_enabled() { - supported.push(ProtocolId::new(SupportedProtocol::ExecutionProofsByRootV1, Encoding::SSZSnappy)); + supported.push(ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )); } supported } @@ -661,10 +664,7 @@ pub fn rpc_data_column_limits( pub fn rpc_execution_proof_limits() -> RpcLimits { // TODO(zkproofs): Can max proof size change over hardforks? - RpcLimits::new( - ExecutionProof::min_size(), - ExecutionProof::max_size(), - ) + RpcLimits::new(ExecutionProof::min_size(), ExecutionProof::max_size()) } /* Inbound upgrade */ diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 46a26c43630..f70b29cfe45 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -337,7 +337,10 @@ impl RPCRateLimiter { .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) - .set_quota(Protocol::ExecutionProofsByRoot, execution_proofs_by_root_quota) + .set_quota( + Protocol::ExecutionProofsByRoot, + execution_proofs_by_root_quota, + ) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index b8fd30f66fa..4002f8fd44e 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1665,9 +1665,11 @@ impl Network { RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } - RpcSuccessResponse::ExecutionProofsByRoot(resp) => { - self.build_response(id, peer_id, Response::ExecutionProofsByRoot(Some(resp))) - } + RpcSuccessResponse::ExecutionProofsByRoot(resp) => self.build_response( + id, + peer_id, + Response::ExecutionProofsByRoot(Some(resp)), + ), // Should never be reached RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1697,7 +1699,9 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), - ResponseTermination::ExecutionProofsByRoot => Response::ExecutionProofsByRoot(None), + ResponseTermination::ExecutionProofsByRoot => { + Response::ExecutionProofsByRoot(None) + } ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 43b59b12273..2d5ca95bf50 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -14,7 +14,7 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), - /// Represents execution proof support. + /// Represents execution proof support. // /// Note: ExecutionProof uses a single gossip topic (not multiple topics), /// but we track it here for ENR-based peer discovery to find zkVM-enabled peers. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 29cdad2db2d..901e2f1c5aa 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,9 +2,7 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ - ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned, -}; +use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; use crate::Subnet; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 76318b3e851..d4ae1dadb31 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -17,8 +17,8 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, FixedBytesExtended, - ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, RuntimeVariableList, Signature, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, FixedBytesExtended, ForkName, + Hash256, KzgCommitment, KzgProof, MinimalEthSpec, RuntimeVariableList, Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; @@ -1772,8 +1772,8 @@ fn test_tcp_execution_proofs_by_root_single() { let rpc_request = RequestType::ExecutionProofsByRoot( ExecutionProofsByRootRequest::new( block_root, - vec![], // No proofs already have - 2, // Request 2 proofs + vec![], // No proofs already have + 2, // Request 2 proofs ) .unwrap(), ); @@ -1836,7 +1836,11 @@ fn test_tcp_execution_proofs_by_root_single() { if request_type == rpc_request { debug!("Receiver got request"); // Send the proof - receiver.send_response(peer_id, inbound_request_id, rpc_response.clone()); + receiver.send_response( + peer_id, + inbound_request_id, + rpc_response.clone(), + ); // Send stream termination receiver.send_response( peer_id, @@ -1899,12 +1903,7 @@ fn test_tcp_execution_proofs_by_root_chunked() { // ExecutionProofsByRoot Request for multiple proofs let rpc_request = RequestType::ExecutionProofsByRoot( - ExecutionProofsByRootRequest::new( - block_root, - vec![], - proof_ids.len(), - ) - .unwrap(), + ExecutionProofsByRootRequest::new(block_root, vec![], proof_ids.len()).unwrap(), ); // Create proofs for each proof ID @@ -2032,12 +2031,7 @@ fn test_tcp_execution_proofs_by_root_empty_response() { let block_root = Hash256::random(); let rpc_request = RequestType::ExecutionProofsByRoot( - ExecutionProofsByRootRequest::new( - block_root, - vec![], - 2, - ) - .unwrap(), + ExecutionProofsByRootRequest::new(block_root, vec![], 2).unwrap(), ); let mut received_termination = false; diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index e62ba0f5428..dd9e9f1ebb2 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -39,7 +39,8 @@ pub const SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST: &str = "handle_blobs_by_range_requ pub const SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST: &str = "handle_data_columns_by_range_request"; pub const SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST: &str = "handle_blocks_by_root_request"; pub const SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST: &str = "handle_blobs_by_root_request"; -pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = "handle_execution_proofs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = + "handle_execution_proofs_by_root_request"; pub const SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST: &str = "handle_data_columns_by_root_request"; pub const SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE: &str = "handle_light_client_updates_by_range"; pub const SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP: &str = "handle_light_client_bootstrap"; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 240cc43352d..bdc0581ce98 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -41,9 +41,10 @@ use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, - SingleAttestation, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -828,7 +829,11 @@ impl NetworkBeaconProcessor { MessageAcceptance::Accept, ); } - GossipExecutionProofError::PriorKnown { block_root, proof_id, .. } => { + GossipExecutionProofError::PriorKnown { + block_root, + proof_id, + .. + } => { // Proof already known via gossip. No penalty, gossip filter should // filter duplicates. debug!( @@ -931,7 +936,10 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } - GossipExecutionProofError::FutureSlot { message_slot, latest_permissible_slot } => { + GossipExecutionProofError::FutureSlot { + message_slot, + latest_permissible_slot, + } => { debug!( error = ?err, %block_root, @@ -951,7 +959,10 @@ impl NetworkBeaconProcessor { MessageAcceptance::Ignore, ); } - GossipExecutionProofError::PastFinalizedSlot { proof_slot, finalized_slot } => { + GossipExecutionProofError::PastFinalizedSlot { + proof_slot, + finalized_slot, + } => { debug!( error = ?err, %block_root, @@ -1346,7 +1357,10 @@ impl NetworkBeaconProcessor { let proof_slot = verified_proof.slot(); let subnet_id = verified_proof.subnet_id(); - let result = self.chain.process_gossip_execution_proof(verified_proof, || Ok(())).await; + let result = self + .chain + .process_gossip_execution_proof(verified_proof, || Ok(())) + .await; register_process_result_metrics(&result, metrics::BlockSource::Gossip, "execution_proof"); match &result { diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 396431ec1aa..e617a2b5b29 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -15,9 +15,9 @@ use lighthouse_tracing::{ SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, - SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, - SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, - SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, + SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, + SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, + SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, }; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; @@ -415,7 +415,11 @@ impl NetworkBeaconProcessor { self.terminate_response_stream( peer_id, inbound_request_id, - self.handle_execution_proofs_by_root_request_inner(peer_id, inbound_request_id, request), + self.handle_execution_proofs_by_root_request_inner( + peer_id, + inbound_request_id, + request, + ), Response::ExecutionProofsByRoot, ); } @@ -428,7 +432,8 @@ impl NetworkBeaconProcessor { request: ExecutionProofsByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let block_root = request.block_root; - let already_have_set: std::collections::HashSet<_> = request.already_have.iter().copied().collect(); + let already_have_set: std::collections::HashSet<_> = + request.already_have.iter().copied().collect(); let count_needed = request.count_needed as usize; // Get all execution proofs we have for this block from the DA checker diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 1cb0e113db7..eb02ddad921 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -274,10 +274,15 @@ impl Router { request, ), ), - RequestType::ExecutionProofsByRoot(request) => self.handle_beacon_processor_send_result( - self.network_beacon_processor - .send_execution_proofs_by_roots_request(peer_id, inbound_request_id, request), - ), + RequestType::ExecutionProofsByRoot(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_roots_request( + peer_id, + inbound_request_id, + request, + ), + ), _ => {} } } @@ -316,11 +321,7 @@ impl Router { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } Response::ExecutionProofsByRoot(execution_proof) => { - self.on_execution_proofs_by_root_response( - peer_id, - app_request_id, - execution_proof, - ); + self.on_execution_proofs_by_root_response(peer_id, app_request_id, execution_proof); } // Light client responses should not be received Response::LightClientBootstrap(_) @@ -397,16 +398,15 @@ impl Router { ), ) } - PubsubMessage::ExecutionProof(execution_proof) => { - self.handle_beacon_processor_send_result( + PubsubMessage::ExecutionProof(execution_proof) => self + .handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_execution_proof( message_id, peer_id, execution_proof, timestamp_now(), ), - ) - } + ), PubsubMessage::VoluntaryExit(exit) => { debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 3a9da8b66ea..64da1ae61fc 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -2,7 +2,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState, PeerId, + BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, ProofRequestState, }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; @@ -227,8 +227,13 @@ impl RequestState for ProofRequestState { _min_proofs: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.execution_proof_lookup_request(id, lookup_peers, self.block_root, self.min_proofs_required) - .map_err(LookupRequestError::SendFailedNetwork) + cx.execution_proof_lookup_request( + id, + lookup_peers, + self.block_root, + self.min_proofs_required, + ) + .map_err(LookupRequestError::SendFailedNetwork) } fn send_for_processing( @@ -251,7 +256,8 @@ impl RequestState for ProofRequestState { } fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { - request.proof_request + request + .proof_request .as_mut() .ok_or("no active proof request") } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 36e002f078a..6212c63a119 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,9 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState}; +pub use single_block_lookup::{ + BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState, +}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 2f78fab99f6..5f4869c0f7e 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -17,10 +17,7 @@ use store::Hash256; use strum::IntoStaticStr; use tracing::{Span, debug_span}; use types::blob_sidecar::FixedBlobSidecarList; -use types::{ - DataColumnSidecarList, EthSpec, ExecutionProof, SignedBeaconBlock, - Slot, -}; +use types::{DataColumnSidecarList, EthSpec, ExecutionProof, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -182,10 +179,11 @@ impl SingleBlockLookup { ComponentRequests::NotNeeded { .. } => true, }; - let proof_processed = self.proof_request + let proof_processed = self + .proof_request .as_ref() .map(|request| request.state.is_processed()) - .unwrap_or(true); // If no proof request, consider it processed + .unwrap_or(true); // If no proof request, consider it processed block_processed && da_component_processed && proof_processed } @@ -205,16 +203,13 @@ impl SingleBlockLookup { // check if the`block_request_state.state.is_awaiting_event(). However we already // checked that above, so `WaitingForBlock => false` is equivalent. ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => { - request.state.is_awaiting_event() - } - ComponentRequests::ActiveCustodyRequest(request) => { - request.state.is_awaiting_event() - } + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_awaiting_event(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_awaiting_event(), ComponentRequests::NotNeeded { .. } => false, }; - let proof_awaiting = self.proof_request + let proof_awaiting = self + .proof_request .as_ref() .map(|request| request.state.is_awaiting_event()) .unwrap_or(false); @@ -268,9 +263,8 @@ impl SingleBlockLookup { if cx.chain.should_fetch_execution_proofs(block_epoch) { if let Some(min_proofs) = cx.chain.min_execution_proofs_required() { - self.proof_request = Some( - ProofRequestState::new(self.block_root, min_proofs) - ); + self.proof_request = + Some(ProofRequestState::new(self.block_root, min_proofs)); } } } else { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 72a228b7c27..c0af69d7a40 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1255,9 +1255,9 @@ impl SyncManager { peer_id: PeerId, execution_proof: RpcEvent>, ) { - if let Some(resp) = self - .network - .on_single_execution_proof_response(id, peer_id, execution_proof) + if let Some(resp) = + self.network + .on_single_execution_proof_response(id, peer_id, execution_proof) { self.block_lookups .on_download_response::( diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs index c3fad2cb0ac..257d6e1a311 100644 --- a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs @@ -13,12 +13,8 @@ pub struct ExecutionProofsByRootSingleBlockRequest { impl ExecutionProofsByRootSingleBlockRequest { pub fn into_request(self) -> Result { - ExecutionProofsByRootRequest::new( - self.block_root, - self.already_have, - self.count_needed, - ) - .map_err(|e| e.to_string()) + ExecutionProofsByRootRequest::new(self.block_root, self.already_have, self.count_needed) + .map_err(|e| e.to_string()) } } @@ -69,4 +65,4 @@ impl ActiveRequestItems for ExecutionProofsByRootRequestItems { fn consume(&mut self) -> Vec { std::mem::take(&mut self.items) } -} \ No newline at end of file +} diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs index c726839a179..ce006172187 100644 --- a/beacon_node/network/src/sync/tests/execution_proof_tests.rs +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -1,7 +1,7 @@ use super::*; use crate::sync::block_lookups::common::ResponseType; -use lighthouse_network::service::api_types::SyncRequestId; use lighthouse_network::rpc::{RPCError, RpcErrorResponse}; +use lighthouse_network::service::api_types::SyncRequestId; use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; /// Test successful execution proof fetch and verification @@ -330,10 +330,7 @@ fn test_proof_lookup_timeout() { rig.send_sync_message(SyncMessage::RpcError { sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, peer_id, - error: RPCError::ErrorResponse( - RpcErrorResponse::ServerError, - "timeout".to_string(), - ), + error: RPCError::ErrorResponse(RpcErrorResponse::ServerError, "timeout".to_string()), }); // Should penalize peer for timeout diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 4a6c5e60589..08812602466 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -43,7 +43,8 @@ use tracing::info; use types::{ BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkContext, ForkName, Hash256, - MinimalEthSpec as E, SignedBeaconBlock, Slot, data_column_sidecar::ColumnIndex, + MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; @@ -171,7 +172,11 @@ impl TestRig { self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob.into())); } - pub(super) fn trigger_unknown_block_from_attestation(&mut self, block_root: Hash256, peer_id: PeerId) { + pub(super) fn trigger_unknown_block_from_attestation( + &mut self, + block_root: Hash256, + peer_id: PeerId, + ) { self.send_sync_message(SyncMessage::UnknownBlockHashFromAttestation( peer_id, block_root, )); @@ -553,8 +558,14 @@ impl TestRig { ) { for subnet_id in subnet_ids { let proof = Arc::new( - ExecutionProof::new(subnet_id, types::Slot::new(0), block_hash, block_root, vec![1, 2, 3, 4]) - .unwrap(), + ExecutionProof::new( + subnet_id, + types::Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), ); self.single_lookup_proof_response(id, peer_id, Some(proof)); } @@ -589,9 +600,7 @@ impl TestRig { let id = self.find_single_lookup_for(block_root); self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type: BlockProcessType::SingleBlock { id }, - result: BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported( - block_root, - )), + result: BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), }); } diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs index 1b020cedb17..728d2eb2d23 100644 --- a/beacon_node/proof_generation_service/src/lib.rs +++ b/beacon_node/proof_generation_service/src/lib.rs @@ -11,7 +11,7 @@ use types::{EthSpec, ExecPayload, ExecutionProofId, Hash256, SignedBeaconBlock, /// This service receives notifications about newly imported blocks and generates /// execution proofs for blocks that don't have proofs yet. This allows any node /// (not just the block proposer) to generate and publish proofs. -/// +/// /// Note: While proofs are optional, we don't have the proposer making proofs /// for their own block. The proposer should insert the block into their own /// chain, so this should trigger. @@ -65,8 +65,8 @@ impl ProofGenerationService { block: Arc>, ) { // Check if proofs are required for this epoch - // TODO(zkproofs): alternative is to only enable this when - // the zkvm fork is enabled. Check if this is possible + // TODO(zkproofs): alternative is to only enable this when + // the zkvm fork is enabled. Check if this is possible let block_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); if !self .chain @@ -137,9 +137,16 @@ impl ProofGenerationService { } /// Check if a proof already exists for this block - fn check_if_proof_exists(&self, slot: Slot, block_root: Hash256, proof_id: ExecutionProofId) -> bool { + fn check_if_proof_exists( + &self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> bool { let observed = self.chain.observed_execution_proofs.read(); - observed.is_known(slot, block_root, proof_id).unwrap_or(false) + observed + .is_known(slot, block_root, proof_id) + .unwrap_or(false) } /// Spawn a task to generate a proof @@ -204,7 +211,10 @@ impl ProofGenerationService { // Double-check that proof didn't arrive via gossip while we were generating let observed = chain.observed_execution_proofs.read(); - if observed.is_known(slot, block_root, proof_id).unwrap_or(false) { + if observed + .is_known(slot, block_root, proof_id) + .unwrap_or(false) + { info!( slot = ?slot, proof_id = ?proof_id, @@ -306,7 +316,10 @@ mod tests { let proof_id = ExecutionProofId::new(0).unwrap(); // Should return false for a proof that hasn't been observed - assert_eq!(service.check_if_proof_exists(slot, block_root, proof_id), false); + assert_eq!( + service.check_if_proof_exists(slot, block_root, proof_id), + false + ); } #[tokio::test] @@ -331,7 +344,10 @@ mod tests { .unwrap(); // Should return true for an observed proof - assert_eq!(service.check_if_proof_exists(slot, block_root, proof_id), true); + assert_eq!( + service.check_if_proof_exists(slot, block_root, proof_id), + true + ); } #[tokio::test] @@ -349,11 +365,13 @@ mod tests { harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ).await; + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let block = harness.chain.head_snapshot().beacon_block.clone(); let block_root = block.canonical_root(); @@ -365,8 +383,9 @@ mod tests { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; // Should not have published any proofs because epoch doesn't require them - assert!(network_rx.try_recv().is_err(), "Should not publish proofs when epoch doesn't require them"); + assert!( + network_rx.try_recv().is_err(), + "Should not publish proofs when epoch doesn't require them" + ); } - - } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 62ed66945fc..ca3c67c4f77 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -347,7 +347,12 @@ pub fn get_config( .split(',') .map(|s| s.trim().parse::()) .collect::, _>>() - .map_err(|e| format!("Invalid proof type ID in --zkvm-generation-proof-types: {}", e))? + .map_err(|e| { + format!( + "Invalid proof type ID in --zkvm-generation-proof-types: {}", + e + ) + })? .into_iter() .map(|id| ExecutionProofId::new(id)) .collect::, _>>() diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 92b4831ec09..7a96fa09fe5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1226,7 +1226,8 @@ impl ChainSpec { /* * Networking zkvm specific */ - min_epochs_for_execution_proof_requests: default_min_epochs_for_execution_proof_requests(), + min_epochs_for_execution_proof_requests: + default_min_epochs_for_execution_proof_requests(), /* * Application specific @@ -1587,7 +1588,8 @@ impl ChainSpec { /* * Networking zkvm specific */ - min_epochs_for_execution_proof_requests: default_min_epochs_for_execution_proof_requests(), + min_epochs_for_execution_proof_requests: + default_min_epochs_for_execution_proof_requests(), /* * Application specific diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs index 3b4e193d20a..0b74d6286ea 100644 --- a/consensus/types/src/execution_proof.rs +++ b/consensus/types/src/execution_proof.rs @@ -174,5 +174,4 @@ mod tests { let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); assert!(result.is_ok()); } - -} \ No newline at end of file +} diff --git a/consensus/types/src/execution_proof_id.rs b/consensus/types/src/execution_proof_id.rs index 4122b54b055..c180f5e0412 100644 --- a/consensus/types/src/execution_proof_id.rs +++ b/consensus/types/src/execution_proof_id.rs @@ -5,7 +5,7 @@ use tree_hash::TreeHash; /// Number of execution proofs /// Each proof represents a different zkVM+EL combination -/// +/// /// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future pub const EXECUTION_PROOF_TYPE_COUNT: u8 = 8; @@ -141,4 +141,4 @@ mod tests { assert_eq!(proof_id.as_usize(), idx); } } -} \ No newline at end of file +} diff --git a/zkvm_execution_layer/src/config.rs b/zkvm_execution_layer/src/config.rs index f7ab2ab6f1e..7ed6f6125c1 100644 --- a/zkvm_execution_layer/src/config.rs +++ b/zkvm_execution_layer/src/config.rs @@ -2,7 +2,6 @@ use serde::{Deserialize, Serialize}; use std::collections::HashSet; use types::{execution_proof::DEFAULT_MIN_PROOFS_REQUIRED, ExecutionProofId}; - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ZKVMExecutionLayerConfig { /// Minimum number of proofs required from _different_ proof types (proof_ids) @@ -88,7 +87,9 @@ impl ZKVMExecutionLayerConfigBuilder { /// Build the configuration pub fn build(self) -> Result { let config = ZKVMExecutionLayerConfig { - min_proofs_required: self.min_proofs_required.unwrap_or(DEFAULT_MIN_PROOFS_REQUIRED), + min_proofs_required: self + .min_proofs_required + .unwrap_or(DEFAULT_MIN_PROOFS_REQUIRED), generation_proof_types: self.generation_proof_types, proof_cache_size: self.proof_cache_size.unwrap_or(1024), }; @@ -164,7 +165,7 @@ mod tests { let config = ZKVMExecutionLayerConfig::builder() .add_generation_proof_type(proof_type_0) - .min_proofs_required(2) + .min_proofs_required(2) .build(); assert!(config.is_ok()); diff --git a/zkvm_execution_layer/src/dummy_proof_gen.rs b/zkvm_execution_layer/src/dummy_proof_gen.rs index bfbbb8d8e5c..596dd90f99d 100644 --- a/zkvm_execution_layer/src/dummy_proof_gen.rs +++ b/zkvm_execution_layer/src/dummy_proof_gen.rs @@ -94,8 +94,14 @@ mod tests { let block_root = Hash256::repeat_byte(99); // Generate twice - let proof1 = generator.generate(slot, &block_hash, &block_root).await.unwrap(); - let proof2 = generator.generate(slot, &block_hash, &block_root).await.unwrap(); + let proof1 = generator + .generate(slot, &block_hash, &block_root) + .await + .unwrap(); + let proof2 = generator + .generate(slot, &block_hash, &block_root) + .await + .unwrap(); // Should be identical assert_eq!(proof1.proof_data_slice(), proof2.proof_data_slice()); diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs index 89c98d2352e..8b266a7e50f 100644 --- a/zkvm_execution_layer/src/engine_api.rs +++ b/zkvm_execution_layer/src/engine_api.rs @@ -1,5 +1,5 @@ -use execution_layer::{PayloadStatus, Error as ExecutionLayerError, BlockProposalContentsType}; -use types::{EthSpec, ExecutionBlockHash, ExecPayload}; +use execution_layer::{BlockProposalContentsType, Error as ExecutionLayerError, PayloadStatus}; +use types::{EthSpec, ExecPayload, ExecutionBlockHash}; type PayloadId = [u8; 8]; diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 36353288d67..6f3adc71f5e 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -13,9 +13,9 @@ pub mod dummy_proof_verifier; /// Engine API implementation for ZK-VM execution pub mod engine_api; +pub use config::ZKVMExecutionLayerConfig; /// Re-export the main ZK-VM engine API and config pub use engine_api::ZKVMEngineApi; -pub use config::ZKVMExecutionLayerConfig; pub use registry_proof_gen::GeneratorRegistry; #[test] diff --git a/zkvm_execution_layer/src/proof_verification.rs b/zkvm_execution_layer/src/proof_verification.rs index 56b484320fd..164f56bd1ef 100644 --- a/zkvm_execution_layer/src/proof_verification.rs +++ b/zkvm_execution_layer/src/proof_verification.rs @@ -29,7 +29,7 @@ pub trait ProofVerifier: Send + Sync { /// Verify that the proof is valid. /// /// TODO(zkproofs): we can probably collapse Ok(false) and Err or make Ok(false) an enum variant - /// + /// /// Returns: /// - Ok(true) if valid, /// - Ok(false) if invalid (but well-formed) From aff99acb067a9e533aa4bd5e13d290d02e285e37 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Tue, 28 Oct 2025 20:21:38 +0000 Subject: [PATCH 42/67] delete proof cache --- zkvm_execution_layer/src/lib.rs | 1 - zkvm_execution_layer/src/proof_cache.rs | 324 ------------------------ 2 files changed, 325 deletions(-) delete mode 100644 zkvm_execution_layer/src/proof_cache.rs diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 6f3adc71f5e..6acf2e7a7b8 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -1,5 +1,4 @@ pub mod config; -pub mod proof_cache; pub mod proof_generation; pub mod proof_verification; diff --git a/zkvm_execution_layer/src/proof_cache.rs b/zkvm_execution_layer/src/proof_cache.rs deleted file mode 100644 index 5f204a35140..00000000000 --- a/zkvm_execution_layer/src/proof_cache.rs +++ /dev/null @@ -1,324 +0,0 @@ -use lru::LruCache; -use std::num::NonZeroUsize; -use std::sync::Arc; -use tokio::sync::RwLock; -use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId}; - -/// Thread-safe LRU cache for execution proofs -/// -/// Stores proofs indexed by execution block hash. -/// -/// Note: Multiple proofs from different subnets can exist for the same block hash. -pub struct ProofCache { - cache: Arc>>>, -} - -impl ProofCache { - /// Create a new proof cache with the specified capacity - pub fn new(capacity: usize) -> Self { - let capacity = NonZeroUsize::new(capacity).expect("Cache capacity must be > 0"); - Self { - cache: Arc::new(RwLock::new(LruCache::new(capacity))), - } - } - - /// Insert a proof into the cache - /// - /// TODO(zkproofs): Add more docs - pub async fn insert(&self, proof: ExecutionProof) { - let block_hash = proof.block_hash; - let mut cache = self.cache.write().await; - - cache - .get_or_insert_mut(block_hash, Vec::new) - // TODO(zkproofs): can replace this with a HashSet so we don't need this - .retain(|p| p.proof_id != proof.proof_id); - - cache.get_mut(&block_hash).unwrap().push(proof); - } - - /// Get all proofs for a specific block hash - pub async fn get(&self, block_hash: &ExecutionBlockHash) -> Option> { - let cache = self.cache.read().await; - cache.peek(block_hash).cloned() - } - - /// Get proofs for a specific block hash from specific subnets - /// - /// TODO(zkproofs): This is cloning proofs, so can be expensive - pub async fn get_from_subnets( - &self, - block_hash: &ExecutionBlockHash, - proof_ids: &[ExecutionProofId], - ) -> Vec { - let cache = self.cache.read().await; - - cache - .peek(block_hash) - .map(|proofs| { - proofs - .iter() - .filter(|p| proof_ids.contains(&p.proof_id)) - .cloned() - .collect() - }) - .unwrap_or_default() - } - - /// Check if we have the minimum required number of proofs from _different_ subnets - pub async fn has_required_proofs( - &self, - block_hash: &ExecutionBlockHash, - min_required: usize, - ) -> bool { - let cache = self.cache.read().await; - - cache - .peek(block_hash) - .map(|proofs| proofs.len() >= min_required) - .unwrap_or(false) - } - - /// Get the number of unique proofs we have for a particular execution payload - pub async fn proof_count(&self, block_hash: &ExecutionBlockHash) -> usize { - let cache = self.cache.read().await; - - cache - .peek(block_hash) - .map(|proofs| proofs.len()) - .unwrap_or(0) - } - - /// Check if a proof exists from a specific subnet for a block - pub async fn has_proof_from_subnet( - &self, - block_hash: &ExecutionBlockHash, - proof_id: ExecutionProofId, - ) -> bool { - let cache = self.cache.read().await; - - cache - .peek(block_hash) - .map(|proofs| proofs.iter().any(|p| p.proof_id == proof_id)) - .unwrap_or(false) - } - - /// Remove all proofs for a specific block hash - pub async fn remove(&self, block_hash: &ExecutionBlockHash) -> Option> { - let mut cache = self.cache.write().await; - cache.pop(block_hash) - } - - /// Clear all cached proofs - pub async fn clear(&self) { - let mut cache = self.cache.write().await; - cache.clear(); - } - - /// Get the current number of entries in the cache - pub async fn len(&self) -> usize { - let cache = self.cache.read().await; - cache.len() - } - - /// Check if the cache is empty - pub async fn is_empty(&self) -> bool { - let cache = self.cache.read().await; - cache.is_empty() - } -} - -impl Clone for ProofCache { - fn clone(&self) -> Self { - Self { - cache: Arc::clone(&self.cache), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use types::Hash256; - - fn create_test_proof( - proof_id: ExecutionProofId, - block_hash: ExecutionBlockHash, - ) -> ExecutionProof { - use types::{FixedBytesExtended, Slot}; - ExecutionProof::new( - proof_id, - Slot::new(100), - block_hash, - Hash256::zero(), - vec![1, 2, 3], - ) - .unwrap() - } - - #[tokio::test] - async fn test_cache_insert_and_get() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - let proof = create_test_proof(subnet_0, block_hash); - - cache.insert(proof.clone()).await; - - let retrieved = cache.get(&block_hash).await; - assert!(retrieved.is_some()); - assert_eq!(retrieved.unwrap().len(), 1); - } - - #[tokio::test] - async fn test_cache_multiple_subnets() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let subnet_1 = ExecutionProofId::new(1).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - - let proof_0 = create_test_proof(subnet_0, block_hash); - let proof_1 = create_test_proof(subnet_1, block_hash); - - cache.insert(proof_0).await; - cache.insert(proof_1).await; - - let proofs = cache.get(&block_hash).await.unwrap(); - assert_eq!(proofs.len(), 2); - assert_eq!(cache.proof_count(&block_hash).await, 2); - } - - #[tokio::test] - async fn test_cache_replace_same_subnet() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - - let mut proof_1 = create_test_proof(subnet_0, block_hash); - proof_1.proof_data = vec![1].into(); // modify proof_data, so its a different execution proof - let proof_2 = create_test_proof(subnet_0, block_hash); - - cache.insert(proof_1).await; - cache.insert(proof_2.clone()).await; - - let proofs = cache.get(&block_hash).await.unwrap(); - assert_eq!(proofs.len(), 1); // Should only have one proof from subnet 0 - - assert_eq!(proofs[0], proof_2); // proof_2 should replace proof_1, since they are for the same subnet and blockhash - } - - #[tokio::test] - async fn test_has_required_proofs() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let subnet_1 = ExecutionProofId::new(1).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - - assert!(!cache.has_required_proofs(&block_hash, 2).await); - - cache.insert(create_test_proof(subnet_0, block_hash)).await; - assert!(!cache.has_required_proofs(&block_hash, 2).await); - - cache.insert(create_test_proof(subnet_1, block_hash)).await; - assert!(cache.has_required_proofs(&block_hash, 2).await); - } - - #[tokio::test] - async fn test_has_proof_from_subnet() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let subnet_1 = ExecutionProofId::new(1).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - - assert!(!cache.has_proof_from_subnet(&block_hash, subnet_0).await); - - cache.insert(create_test_proof(subnet_0, block_hash)).await; - - assert!(cache.has_proof_from_subnet(&block_hash, subnet_0).await); - assert!(!cache.has_proof_from_subnet(&block_hash, subnet_1).await); - } - - #[tokio::test] - async fn test_get_from_subnets() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let subnet_1 = ExecutionProofId::new(1).unwrap(); - let subnet_2 = ExecutionProofId::new(2).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - - cache.insert(create_test_proof(subnet_0, block_hash)).await; - cache.insert(create_test_proof(subnet_1, block_hash)).await; - cache.insert(create_test_proof(subnet_2, block_hash)).await; - - let proofs = cache - .get_from_subnets(&block_hash, &[subnet_0, subnet_2]) - .await; - assert_eq!(proofs.len(), 2); - assert!(proofs.iter().any(|p| p.proof_id == subnet_0)); - assert!(proofs.iter().any(|p| p.proof_id == subnet_2)); - assert!(!proofs.iter().any(|p| p.proof_id == subnet_1)); - } - - #[tokio::test] - async fn test_cache_remove() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let block_hash = ExecutionBlockHash::repeat_byte(1); - - cache.insert(create_test_proof(subnet_0, block_hash)).await; - assert!(cache.get(&block_hash).await.is_some()); - - let removed = cache.remove(&block_hash).await; - assert!(removed.is_some()); - assert!(cache.get(&block_hash).await.is_none()); - } - - #[tokio::test] - async fn test_cache_clear() { - let cache = ProofCache::new(10); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let block_hash_1 = ExecutionBlockHash::repeat_byte(1); - let block_hash_2 = ExecutionBlockHash::repeat_byte(2); - - cache - .insert(create_test_proof(subnet_0, block_hash_1)) - .await; - cache - .insert(create_test_proof(subnet_0, block_hash_2)) - .await; - - assert_eq!(cache.len().await, 2); - - cache.clear().await; - - assert_eq!(cache.len().await, 0); - assert!(cache.is_empty().await); - } - - #[tokio::test] - async fn test_cache_lru_eviction() { - let cache = ProofCache::new(2); - let subnet_0 = ExecutionProofId::new(0).unwrap(); - let block_hash_1 = ExecutionBlockHash::repeat_byte(1); - let block_hash_2 = ExecutionBlockHash::repeat_byte(2); - let block_hash_3 = ExecutionBlockHash::repeat_byte(3); - - cache - .insert(create_test_proof(subnet_0, block_hash_1)) - .await; - cache - .insert(create_test_proof(subnet_0, block_hash_2)) - .await; - cache - .insert(create_test_proof(subnet_0, block_hash_3)) - .await; - - // Cache should only hold 2 entries - assert_eq!(cache.len().await, 2); - - // block_hash_1 should be evicted (last recently used) - assert!(cache.get(&block_hash_1).await.is_none()); - assert!(cache.get(&block_hash_2).await.is_some()); - assert!(cache.get(&block_hash_3).await.is_some()); - } -} From ea49ea784535fb63f786a97b13f3b10c13363200 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Wed, 29 Oct 2025 00:42:55 +0000 Subject: [PATCH 43/67] clippy --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- .../src/data_availability_checker.rs | 3 +- .../src/observed_execution_proofs.rs | 2 +- .../lighthouse_network/tests/rpc_tests.rs | 2 +- .../network_beacon_processor/rpc_methods.rs | 21 ++++++------ .../network_beacon_processor/sync_methods.rs | 2 +- .../sync/block_lookups/single_block_lookup.rs | 6 ++-- beacon_node/network/src/sync/tests/lookups.rs | 13 ++------ .../proof_generation_service/src/lib.rs | 32 ++++++++----------- beacon_node/src/config.rs | 2 +- .../src/dummy_proof_verifier.rs | 2 +- zkvm_execution_layer/src/engine_api.rs | 4 +-- zkvm_execution_layer/src/lib.rs | 5 --- 13 files changed, 36 insertions(+), 60 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a5033bb469a..b9a4f8ab2e7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3671,7 +3671,7 @@ impl BeaconChain { let availability = self .data_availability_checker .put_verified_execution_proofs(block_root, std::iter::once(proof)) - .map_err(|e| BlockError::AvailabilityCheck(e))?; + .map_err(BlockError::AvailabilityCheck)?; self.process_availability(slot, availability, publish_fn) .await diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 80bfc5e9e96..8b0b714c71f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -121,6 +121,7 @@ impl Debug for Availability { } impl DataAvailabilityChecker { + #[allow(clippy::too_many_arguments)] pub fn new( complete_blob_backfill: bool, slot_clock: T::SlotClock, @@ -464,7 +465,7 @@ impl DataAvailabilityChecker { "Proof verification failed: proof is invalid" ); return Err(AvailabilityCheckError::InvalidProof { - proof_id: proof_id, + proof_id, reason: "Proof verification returns false".to_string(), }); } diff --git a/beacon_node/beacon_chain/src/observed_execution_proofs.rs b/beacon_node/beacon_chain/src/observed_execution_proofs.rs index 3851cfa261d..e927ecad68f 100644 --- a/beacon_node/beacon_chain/src/observed_execution_proofs.rs +++ b/beacon_node/beacon_chain/src/observed_execution_proofs.rs @@ -85,7 +85,7 @@ impl ObservedExecutionProofs { } let key = ProofKey::new(slot, block_root); - let proof_ids = self.items.entry(key).or_insert_with(HashSet::new); + let proof_ids = self.items.entry(key).or_default(); let was_duplicate = !proof_ids.insert(proof_id); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index d4ae1dadb31..d55a3c307ed 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1894,7 +1894,7 @@ fn test_tcp_execution_proofs_by_root_chunked() { let block_root = Hash256::random(); let block_hash = ExecutionBlockHash::from_root(Hash256::random()); - let proof_ids = vec![ + let proof_ids = [ ExecutionProofId::new(0).unwrap(), ExecutionProofId::new(1).unwrap(), ExecutionProofId::new(2).unwrap(), diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index e617a2b5b29..f063d7e8380 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -437,21 +437,18 @@ impl NetworkBeaconProcessor { let count_needed = request.count_needed as usize; // Get all execution proofs we have for this block from the DA checker - let available_proofs = match self + let Some(available_proofs) = self .chain .data_availability_checker .get_execution_proofs(&block_root) - { - Some(proofs) => proofs, - None => { - // No proofs available for this block - debug!( - %peer_id, - %block_root, - "No execution proofs available for peer" - ); - return Ok(()); - } + else { + // No proofs available for this block + debug!( + %peer_id, + %block_root, + "No execution proofs available for peer" + ); + return Ok(()); }; // Filter out proofs the peer already has and send up to count_needed diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index ecae5f44ab1..0ec9de668ed 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1006,7 +1006,7 @@ impl NetworkBeaconProcessor { self: Arc>, block_root: Hash256, proofs: Vec>, - seen_timestamp: Duration, + _seen_timestamp: Duration, process_type: BlockProcessType, ) { // Get slot directly from the first proof. All proofs should be for the same block. diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 5f4869c0f7e..590ebc4763b 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -262,10 +262,8 @@ impl SingleBlockLookup { } if cx.chain.should_fetch_execution_proofs(block_epoch) { - if let Some(min_proofs) = cx.chain.min_execution_proofs_required() { - self.proof_request = - Some(ProofRequestState::new(self.block_root, min_proofs)); - } + self.proof_request = cx.chain.min_execution_proofs_required() + .map(|min_proofs| ProofRequestState::new(self.block_root, min_proofs)); } } else { // Wait to download the block before downloading blobs. Then we can be sure that the diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 08812602466..684cfcc3116 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -577,19 +577,10 @@ impl TestRig { pub(super) fn expect_proof_lookup_request(&mut self, block_root: Hash256) -> SingleLookupReqId { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { - request, + request: RequestType::ExecutionProofsByRoot(req), app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), .. - } => match request { - RequestType::ExecutionProofsByRoot(req) => { - if req.block_root == block_root { - Some(*id) - } else { - None - } - } - _ => None, - }, + } if req.block_root == block_root => Some(*id), _ => None, }) .unwrap_or_else(|_| panic!("Expected proof request for {block_root}")) diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs index 728d2eb2d23..80c2a83e98c 100644 --- a/beacon_node/proof_generation_service/src/lib.rs +++ b/beacon_node/proof_generation_service/src/lib.rs @@ -162,16 +162,13 @@ impl ProofGenerationService { let chain = self.chain.clone(); // Get the generator for this proof type - let generator = match registry.get_generator(proof_id) { - Some(gen) => gen, - None => { - debug!( - slot = ?slot, - proof_id = ?proof_id, - "No generator found for proof type" - ); - return; - } + let Some(generator) = registry.get_generator(proof_id) else { + debug!( + slot = ?slot, + proof_id = ?proof_id, + "No generator found for proof type" + ); + return; }; // Spawn the generation task (async because generator.generate() is async) @@ -293,12 +290,11 @@ mod tests { /// Create a test harness with minimal setup fn build_test_harness(validator_count: usize) -> TestHarness { - let harness = BeaconChainHarness::builder(E::default()) + BeaconChainHarness::builder(E) .default_spec() .deterministic_keypairs(validator_count) .fresh_ephemeral_store() - .build(); - harness + .build() } #[tokio::test] @@ -316,9 +312,8 @@ mod tests { let proof_id = ExecutionProofId::new(0).unwrap(); // Should return false for a proof that hasn't been observed - assert_eq!( - service.check_if_proof_exists(slot, block_root, proof_id), - false + assert!( + !service.check_if_proof_exists(slot, block_root, proof_id) ); } @@ -344,9 +339,8 @@ mod tests { .unwrap(); // Should return true for an observed proof - assert_eq!( - service.check_if_proof_exists(slot, block_root, proof_id), - true + assert!( + service.check_if_proof_exists(slot, block_root, proof_id) ); } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ca3c67c4f77..2697061d891 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -354,7 +354,7 @@ pub fn get_config( ) })? .into_iter() - .map(|id| ExecutionProofId::new(id)) + .map(ExecutionProofId::new) .collect::, _>>() .map_err(|e| format!("Invalid subnet ID: {}", e))? } else { diff --git a/zkvm_execution_layer/src/dummy_proof_verifier.rs b/zkvm_execution_layer/src/dummy_proof_verifier.rs index b7cc6178f78..b7d06a852c5 100644 --- a/zkvm_execution_layer/src/dummy_proof_verifier.rs +++ b/zkvm_execution_layer/src/dummy_proof_verifier.rs @@ -81,7 +81,7 @@ mod tests { let result = verifier.verify(&proof); assert!(result.is_ok()); - assert_eq!(result.unwrap(), true); + assert!(result.unwrap()); } #[tokio::test] diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs index 8b266a7e50f..c0f7c4ebde2 100644 --- a/zkvm_execution_layer/src/engine_api.rs +++ b/zkvm_execution_layer/src/engine_api.rs @@ -21,9 +21,9 @@ impl ZKVMEngineApi { } /// Verify a new execution payload using ZK proof - pub async fn new_payload<'a>( + pub async fn new_payload( &self, - _execution_payload: &'a impl ExecPayload, + _execution_payload: &impl ExecPayload, ) -> Result { // TODO(zkproofs): There are some engine_api checks that should be made, but these should be // done when we have the proof, check the EL newPayload method to see what these are diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs index 6acf2e7a7b8..d8a6ec74af7 100644 --- a/zkvm_execution_layer/src/lib.rs +++ b/zkvm_execution_layer/src/lib.rs @@ -16,8 +16,3 @@ pub use config::ZKVMExecutionLayerConfig; /// Re-export the main ZK-VM engine API and config pub use engine_api::ZKVMEngineApi; pub use registry_proof_gen::GeneratorRegistry; - -#[test] -fn add() { - assert!(1 + 1 == 2) -} From 3e256d008b915136285c6a79235eb7edf3349afc Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Fri, 31 Oct 2025 22:52:10 +0000 Subject: [PATCH 44/67] proof_gen only with bash script --- .../network_params_proof_gen_only.sh | 155 ++++++++++++++++++ .../network_params_proof_gen_only.yaml | 21 +++ 2 files changed, 176 insertions(+) create mode 100755 scripts/local_testnet/network_params_proof_gen_only.sh create mode 100644 scripts/local_testnet/network_params_proof_gen_only.yaml diff --git a/scripts/local_testnet/network_params_proof_gen_only.sh b/scripts/local_testnet/network_params_proof_gen_only.sh new file mode 100755 index 00000000000..70c2c8f5c69 --- /dev/null +++ b/scripts/local_testnet/network_params_proof_gen_only.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +# Helper script for monitoring execution proof generation and gossip +# Usage: ./network_params_proof_gen_only.sh [command] +# ENCLAVE=my-testnet ./network_params_proof_gen_only.sh [command] +# +# Set ENCLAVE environment variable to use a different testnet. +# Default: local-testnet + +ENCLAVE="${ENCLAVE:-local-testnet}" + +# Color output +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +case "${1:-help}" in + generation) + echo -e "${GREEN}=== Proof Generation and Publishing ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep -E "(Generating execution proof|Proof successfully published)" | tail -5 + done + ;; + + gossip-subscribe) + echo -e "${GREEN}=== ExecutionProof Topic Subscriptions ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Subscribed to topic.*execution_proof" + done + ;; + + gossip-receive) + echo -e "${GREEN}=== Received Execution Proofs via Gossip ===${NC}" + for i in 1 2 3 4; do + count=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Received execution proof via gossip" | wc -l) + echo -e "${YELLOW}Node $i:${NC} $count proofs received" + done + ;; + + gossip-verified) + echo -e "${GREEN}=== Verified Execution Proofs ===${NC}" + for i in 1 2 3 4; do + count=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Successfully verified gossip execution proof" | wc -l) + echo -e "${YELLOW}Node $i:${NC} $count proofs verified" + done + ;; + + errors) + echo -e "${GREEN}=== Checking for Errors ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + no_peers=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "NoPeersSubscribedToTopic.*execution_proof" | wc -l) + failed_sub=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Failed to subscribe.*execution_proof" | wc -l) + + if [ "$no_peers" -gt 0 ]; then + echo -e "${RED}NoPeersSubscribedToTopic errors: $no_peers${NC}" + else + echo -e "${GREEN}NoPeersSubscribedToTopic errors: 0${NC}" + fi + + if [ "$failed_sub" -gt 0 ]; then + echo -e "${RED}Failed subscription errors: $failed_sub${NC}" + else + echo -e "${GREEN}Failed subscription errors: 0${NC}" + fi + done + ;; + + zkvm-logs) + echo -e "${GREEN}=== ZKVM Debug Logs ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "ZKVM:" | head -5 + done + ;; + + fork-transition) + echo -e "${GREEN}=== Fork Transition Logs ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep -E "(Subscribing to new fork|subscribe_new_fork_topics called)" + done + ;; + + stats) + echo -e "${GREEN}=== Execution Proof Statistics ===${NC}" + for i in 1 2 3 4; do + generated=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Generating execution proof" | wc -l) + published=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Proof successfully published" | wc -l) + received=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Received execution proof via gossip" | wc -l) + verified=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Successfully verified gossip execution proof" | wc -l) + + echo -e "${YELLOW}Node $i:${NC}" + echo -e " Generated: $generated" + echo -e " Published: $published" + echo -e " Received: $received" + echo -e " Verified: $verified" + done + ;; + + follow) + NODE="${2:-1}" + echo -e "${GREEN}=== Following Execution Proof Logs for Node $NODE ===${NC}" + echo -e "${YELLOW}Press Ctrl+C to stop${NC}" + kurtosis service logs $ENCLAVE cl-$NODE-lighthouse-geth -f | grep --line-buffered -E "(Generating execution proof|Proof successfully published|Received execution proof via gossip|Successfully verified gossip execution proof)" + ;; + + all) + echo -e "${GREEN}=== Complete Execution Proof Report ===${NC}\n" + $0 zkvm-logs + echo -e "\n" + $0 fork-transition + echo -e "\n" + $0 gossip-subscribe + echo -e "\n" + $0 stats + echo -e "\n" + $0 errors + ;; + + help|*) + echo "Helper script for monitoring execution proof generation and gossip" + echo "" + echo "Usage: $0 [command]" + echo " ENCLAVE=name $0 [command]" + echo "" + echo "Environment Variables:" + echo " ENCLAVE - Testnet enclave name (default: local-testnet)" + echo "" + echo "Commands:" + echo " generation - Show proof generation and publishing logs" + echo " gossip-subscribe - Show ExecutionProof topic subscriptions" + echo " gossip-receive - Count received proofs on each node" + echo " gossip-verified - Count verified proofs on each node" + echo " errors - Check for gossip errors" + echo " zkvm-logs - Show ZKVM debug logs" + echo " fork-transition - Show fork transition logs" + echo " stats - Show proof statistics for all nodes" + echo " follow [node] - Follow proof logs in real-time (default: node 1)" + echo " all - Show complete report" + echo " help - Show this help message" + echo "" + echo "Examples:" + echo " # Use default testnet (local-testnet)" + echo " $0 stats" + echo " $0 follow 2" + echo " $0 all" + echo "" + echo " # Use custom testnet enclave" + echo " ENCLAVE=my-testnet $0 stats" + ;; +esac diff --git a/scripts/local_testnet/network_params_proof_gen_only.yaml b/scripts/local_testnet/network_params_proof_gen_only.yaml new file mode 100644 index 00000000000..aea91efb92b --- /dev/null +++ b/scripts/local_testnet/network_params_proof_gen_only.yaml @@ -0,0 +1,21 @@ +# Network configuration for testing execution proof generation +# All nodes have execution layers and are configured to generate proofs +participants: + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --zkvm-generation-proof-types=0,1 + - --target-peers=3 + count: 4 +network_params: + electra_fork_epoch: 0 + fulu_fork_epoch: 1 + seconds_per_slot: 2 +global_log_level: debug +snooper_enabled: false +additional_services: + - dora + - prometheus_grafana \ No newline at end of file From 5a873bef942331d0e1cd94a837f76b46903ea2b0 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 3 Nov 2025 14:56:56 +0000 Subject: [PATCH 45/67] add dummy_el --- Cargo.lock | 34 ++ Cargo.toml | 1 + dummy_el/Cargo.toml | 20 + dummy_el/Dockerfile | 32 ++ dummy_el/README.md | 24 + dummy_el/geth-wrapper.sh | 29 ++ dummy_el/src/main.rs | 445 ++++++++++++++++++ ...network_params_mixed_proof_gen_verify.yaml | 2 +- 8 files changed, 586 insertions(+), 1 deletion(-) create mode 100644 dummy_el/Cargo.toml create mode 100644 dummy_el/Dockerfile create mode 100644 dummy_el/README.md create mode 100644 dummy_el/geth-wrapper.sh create mode 100644 dummy_el/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 3adac6c382a..52c80166149 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -741,6 +741,8 @@ dependencies = [ "http 1.3.0", "http-body 1.0.1", "http-body-util", + "hyper 1.6.0", + "hyper-util", "itoa", "matchit", "memchr", @@ -749,10 +751,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper 1.0.2", + "tokio", "tower 0.5.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -773,6 +780,7 @@ dependencies = [ "sync_wrapper 1.0.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -2476,6 +2484,22 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" +[[package]] +name = "dummy_el" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "clap", + "hex", + "jsonwebtoken", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "ecdsa" version = "0.14.8" @@ -8590,6 +8614,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_repr" version = "0.1.20" diff --git a/Cargo.toml b/Cargo.toml index c277b738ebd..50faebe429c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,6 +65,7 @@ members = [ "crypto/eth2_wallet", "crypto/kzg", "database_manager", + "dummy_el", "lcli", "lighthouse", "lighthouse/environment", diff --git a/dummy_el/Cargo.toml b/dummy_el/Cargo.toml new file mode 100644 index 00000000000..7b25c4a679d --- /dev/null +++ b/dummy_el/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "dummy_el" +version = "0.1.0" +edition = "2024" + +[dependencies] +axum = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } +clap = { workspace = true } +anyhow = { workspace = true } +jsonwebtoken = "9" +hex = { workspace = true } + +[[bin]] +name = "dummy_el" +path = "src/main.rs" diff --git a/dummy_el/Dockerfile b/dummy_el/Dockerfile new file mode 100644 index 00000000000..1ece25c7225 --- /dev/null +++ b/dummy_el/Dockerfile @@ -0,0 +1,32 @@ +# Multi-stage build for dummy_el +FROM rust:1.88.0-bullseye AS builder + +WORKDIR /build + +# Copy the entire workspace (needed for workspace structure) +COPY . . + +# Build only dummy_el in release mode +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/build/target \ + cargo build --release -p dummy_el && \ + cp target/release/dummy_el /dummy_el + +# Runtime stage with minimal Ubuntu image +FROM ubuntu:22.04 + +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Copy the binary from builder +COPY --from=builder /dummy_el /usr/local/bin/dummy_el + +# Create a fake 'geth' binary that runs dummy_el instead +# Kurtosis will call "geth init ..." and "geth --..." but we'll run dummy_el +COPY --from=builder /build/dummy_el/geth-wrapper.sh /usr/local/bin/geth +RUN chmod +x /usr/local/bin/geth + +# Expose default Engine API port +EXPOSE 8551 diff --git a/dummy_el/README.md b/dummy_el/README.md new file mode 100644 index 00000000000..0c3361a4a94 --- /dev/null +++ b/dummy_el/README.md @@ -0,0 +1,24 @@ +# Using Dummy EL + +This is a dummy EL that can be used with proof verification nodes. These nodes do not require an EL to function since they just take in proofs. + +## Quick Start + +### 1. Build the Docker Image + +From the lighthouse repository root: + +```bash +docker build -f dummy_el/Dockerfile -t dummy_el:local . +``` + +### 2. Adding to Kurtosis + +In Kurtosis, you can add the following: + +```yaml + - el_type: geth + el_image: dummy_el:local +``` + +Note that we need to use el_type `geth` as kurtosis will be looking for a binary named geth. We wrap calls to the Geth binary so that they are processed by our dummy_el. \ No newline at end of file diff --git a/dummy_el/geth-wrapper.sh b/dummy_el/geth-wrapper.sh new file mode 100644 index 00000000000..8112bb44e9c --- /dev/null +++ b/dummy_el/geth-wrapper.sh @@ -0,0 +1,29 @@ +#!/bin/sh +set -e + +# This is a wrapper that pretends to be geth but actually runs dummy_el +# Kurtosis calls: geth init ... && geth --authrpc.port=8551 ... +# We ignore the init, and when we see the actual geth command with authrpc.port, we start dummy_el + +echo "[dummy_el geth-wrapper] Called with: $@" + +# Check if this is the "geth init" command and ignore it +if echo "$@" | grep -q "init"; then + echo "[dummy_el geth-wrapper] Ignoring 'geth init' command" + exit 0 +fi + +# If we're here, it's the actual geth run command +# Kurtosis mounts JWT secret at /jwt/jwtsecret +JWT_PATH="/jwt/jwtsecret" + +echo "[dummy_el geth-wrapper] Starting dummy_el instead of geth" + +# Run dummy_el with JWT if available, otherwise without +if [ -f "$JWT_PATH" ]; then + echo "[dummy_el geth-wrapper] Using JWT from $JWT_PATH" + exec /usr/local/bin/dummy_el --host 0.0.0.0 --port 8551 --jwt-secret "$JWT_PATH" +else + echo "[dummy_el geth-wrapper] WARNING: No JWT file found at $JWT_PATH" + exec /usr/local/bin/dummy_el --host 0.0.0.0 --port 8551 +fi diff --git a/dummy_el/src/main.rs b/dummy_el/src/main.rs new file mode 100644 index 00000000000..fd0aa4520a7 --- /dev/null +++ b/dummy_el/src/main.rs @@ -0,0 +1,445 @@ +use axum::{ + extract::State, + http::{Request, StatusCode}, + middleware::{self, Next}, + response::Response, + routing::post, + Json, Router, +}; +use clap::Parser; +use jsonwebtoken::{Algorithm, DecodingKey, Validation}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value as JsonValue}; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::Arc; +use tracing::{debug, error, info, warn}; + +const JSONRPC_VERSION: &str = "2.0"; +const JWT_SECRET_LENGTH: usize = 32; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[arg(long, default_value = "8551", help = "Engine API port")] + port: u16, + + #[arg(long, default_value = "127.0.0.1")] + host: String, + + #[arg(long, help = "Path to JWT secret file (hex encoded)")] + jwt_secret: Option, + + #[arg(long, default_value = "8545", help = "HTTP RPC port")] + rpc_port: u16, + + #[arg(long, default_value = "8546", help = "WebSocket port")] + ws_port: u16, + + #[arg(long, default_value = "9001", help = "Metrics port")] + metrics_port: u16, + + #[arg(long, default_value = "30303", help = "P2P discovery port (TCP/UDP)")] + p2p_port: u16, +} + +#[derive(Debug, Clone)] +struct AppState { + jwt_secret: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JwtClaims { + iat: u64, + #[serde(skip_serializing_if = "Option::is_none")] + id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + clv: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcRequest { + jsonrpc: String, + method: String, + params: JsonValue, + id: JsonValue, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcResponse { + jsonrpc: String, + #[serde(skip_serializing_if = "Option::is_none")] + result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, + id: JsonValue, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcError { + code: i64, + message: String, +} + +async fn auth_middleware( + State(state): State>, + request: Request, + next: Next, +) -> Result { + // If no JWT secret is configured, skip auth + if state.jwt_secret.is_none() { + return Ok(next.run(request).await); + } + + let jwt_secret = state.jwt_secret.as_ref().unwrap(); + + // Check for Authorization header + let auth_header = request + .headers() + .get("Authorization") + .and_then(|h| h.to_str().ok()); + + match auth_header { + Some(auth) if auth.starts_with("Bearer ") => { + let token = &auth[7..]; // Skip "Bearer " + + // Validate JWT token + let mut validation = Validation::new(Algorithm::HS256); + validation.validate_exp = false; + validation.required_spec_claims.remove("exp"); + + match jsonwebtoken::decode::( + token, + &DecodingKey::from_secret(jwt_secret), + &validation, + ) { + Ok(_) => { + debug!("JWT authentication successful"); + Ok(next.run(request).await) + } + Err(e) => { + warn!("JWT validation failed: {:?}", e); + Err((StatusCode::UNAUTHORIZED, "Invalid JWT token".to_string())) + } + } + } + Some(_) => { + warn!("Authorization header present but not in Bearer format"); + Err(( + StatusCode::UNAUTHORIZED, + "Authorization header must be in format: Bearer ".to_string(), + )) + } + None => { + warn!("Missing Authorization header"); + Err(( + StatusCode::UNAUTHORIZED, + "Missing Authorization header".to_string(), + )) + } + } +} + +async fn handle_rpc( + State(_state): State>, + Json(request): Json, +) -> (StatusCode, Json) { + info!( + method = %request.method, + params = ?request.params, + "Received RPC request" + ); + + let result = match request.method.as_str() { + "eth_syncing" => { + debug!("eth_syncing: returning false (not syncing)"); + Ok(json!(false)) + } + "eth_getBlockByNumber" => { + debug!("eth_getBlockByNumber: returning null"); + Ok(json!(null)) + } + "eth_getBlockByHash" => { + debug!("eth_getBlockByHash: returning null"); + Ok(json!(null)) + } + "engine_newPayloadV1" | "engine_newPayloadV2" | "engine_newPayloadV3" | "engine_newPayloadV4" => { + debug!("{}: returning SYNCING status", request.method); + Ok(json!({ + "status": "SYNCING", + "latestValidHash": null, + "validationError": null + })) + } + "engine_forkchoiceUpdatedV1" | "engine_forkchoiceUpdatedV2" | "engine_forkchoiceUpdatedV3" => { + debug!("{}: returning SYNCING status", request.method); + Ok(json!({ + "payloadStatus": { + "status": "SYNCING", + "latestValidHash": null, + "validationError": null + }, + "payloadId": null + })) + } + "engine_getPayloadV1" | "engine_getPayloadV2" | "engine_getPayloadV3" | "engine_getPayloadV4" | "engine_getPayloadV5" => { + debug!("{}: returning error (payload not available)", request.method); + Err(JsonRpcError { + code: -38001, + message: "Unknown payload".to_string(), + }) + } + "engine_getPayloadBodiesByHashV1" => { + debug!("engine_getPayloadBodiesByHashV1: returning empty array"); + Ok(json!([])) + } + "engine_getPayloadBodiesByRangeV1" => { + debug!("engine_getPayloadBodiesByRangeV1: returning empty array"); + Ok(json!([])) + } + "engine_exchangeCapabilities" => { + let capabilities = vec![ + "engine_newPayloadV1", + "engine_newPayloadV2", + "engine_newPayloadV3", + "engine_newPayloadV4", + "engine_getPayloadV1", + "engine_getPayloadV2", + "engine_getPayloadV3", + "engine_getPayloadV4", + "engine_getPayloadV5", + "engine_forkchoiceUpdatedV1", + "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", + "engine_getPayloadBodiesByHashV1", + "engine_getPayloadBodiesByRangeV1", + "engine_getClientVersionV1", + "engine_getBlobsV1", + "engine_getBlobsV2", + ]; + debug!("engine_exchangeCapabilities: returning {} capabilities", capabilities.len()); + Ok(json!(capabilities)) + } + "engine_getClientVersionV1" => { + debug!("engine_getClientVersionV1: returning client info"); + Ok(json!([{ + "code": "DM", + "name": "Dummy-EL", + "version": "v0.1.0", + "commit": "00000000" + }])) + } + "engine_getBlobsV1" | "engine_getBlobsV2" => { + debug!("{}: returning empty array", request.method); + Ok(json!([])) + } + _ => { + info!(method = %request.method, "Method not found"); + Err(JsonRpcError { + code: -32601, + message: format!("Method not found: {}", request.method), + }) + } + }; + + let response = match result { + Ok(result) => JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: Some(result), + error: None, + id: request.id, + }, + Err(error) => JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: None, + error: Some(error), + id: request.id, + }, + }; + + info!(method = %request.method, success = response.error.is_none(), "RPC response sent"); + (StatusCode::OK, Json(response)) +} + +// Simple RPC handler without JWT auth for non-Engine API ports +async fn handle_simple_rpc(Json(request): Json) -> (StatusCode, Json) { + debug!(method = %request.method, "Received simple RPC request"); + + let result: Result = match request.method.as_str() { + "admin_nodeInfo" => { + Ok(json!({ + "id": "0ecd4a2c5f7c2a304e3acbec67efea275510d31c304fe47f4e626a2ebd5fb101", + "name": "Dummy-EL/v0.1.0", + "enode": "enode://dummy@127.0.0.1:30303", + "enr": "enr:-Iq4QDummy0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "ip": "127.0.0.1", + "ports": { + "discovery": 30303, + "listener": 30303 + } + })) + } + _ => { + // For any other method, just return a success response + Ok(json!(null)) + } + }; + + let response = JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: Some(result.unwrap_or(json!(null))), + error: None, + id: request.id, + }; + + (StatusCode::OK, Json(response)) +} + +fn strip_prefix(s: &str) -> &str { + s.strip_prefix("0x").unwrap_or(s) +} + +fn read_jwt_secret(path: &PathBuf) -> anyhow::Result> { + let contents = std::fs::read_to_string(path)?; + let hex_str = strip_prefix(contents.trim()); + let bytes = hex::decode(hex_str)?; + + if bytes.len() != JWT_SECRET_LENGTH { + anyhow::bail!( + "Invalid JWT secret length. Expected {} bytes, got {}", + JWT_SECRET_LENGTH, + bytes.len() + ); + } + + Ok(bytes) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let args = Args::parse(); + + // Read JWT secret if provided + let jwt_secret = match &args.jwt_secret { + Some(path) => { + match read_jwt_secret(path) { + Ok(secret) => { + info!("JWT secret loaded from {:?}", path); + Some(secret) + } + Err(e) => { + error!("Failed to read JWT secret from {:?}: {}", path, e); + return Err(e); + } + } + } + None => { + warn!("No JWT secret provided - authentication disabled!"); + warn!("This is insecure and should only be used for testing"); + None + } + }; + + info!( + host = %args.host, + engine_port = args.port, + rpc_port = args.rpc_port, + ws_port = args.ws_port, + metrics_port = args.metrics_port, + p2p_port = args.p2p_port, + jwt_auth = jwt_secret.is_some(), + "Starting Dummy Execution Layer" + ); + + let state = Arc::new(AppState { jwt_secret }); + + // Engine API server (port 8551) with JWT auth + let engine_app = Router::new() + .route("/", post(handle_rpc)) + .layer(middleware::from_fn_with_state(state.clone(), auth_middleware)) + .with_state(state.clone()); + + let engine_addr = format!("{}:{}", args.host, args.port) + .parse::() + .expect("Invalid engine address"); + + info!("Engine API listening on http://{}", engine_addr); + + // Simple RPC server for HTTP RPC (port 8545) - no JWT auth + let rpc_app = Router::new().route("/", post(handle_simple_rpc)); + let rpc_addr = format!("{}:{}", args.host, args.rpc_port) + .parse::() + .expect("Invalid RPC address"); + info!("HTTP RPC listening on http://{}", rpc_addr); + + // Simple RPC server for WebSocket (port 8546) - no JWT auth + let ws_app = Router::new().route("/", post(handle_simple_rpc)); + let ws_addr = format!("{}:{}", args.host, args.ws_port) + .parse::() + .expect("Invalid WebSocket address"); + info!("WebSocket RPC listening on http://{}", ws_addr); + + // Simple server for metrics (port 9001) + let metrics_app = Router::new().route("/", post(handle_simple_rpc)); + let metrics_addr = format!("{}:{}", args.host, args.metrics_port) + .parse::() + .expect("Invalid metrics address"); + info!("Metrics listening on http://{}", metrics_addr); + + // Bind P2P discovery ports (TCP and UDP) - just to satisfy Kurtosis port checks + let p2p_tcp_addr = format!("{}:{}", args.host, args.p2p_port) + .parse::() + .expect("Invalid P2P TCP address"); + let p2p_udp_addr = format!("{}:{}", args.host, args.p2p_port) + .parse::() + .expect("Invalid P2P UDP address"); + + // Spawn P2P TCP listener in a task to keep it alive + let p2p_tcp_listener = tokio::net::TcpListener::bind(p2p_tcp_addr).await?; + info!("P2P TCP listening on {}", p2p_tcp_addr); + let p2p_tcp_task = tokio::spawn(async move { + loop { + // Accept connections but do nothing with them + if let Ok((_socket, _addr)) = p2p_tcp_listener.accept().await { + // Connection accepted, just drop it + } + } + }); + + // Spawn P2P UDP listener in a task to keep it alive + let p2p_udp_socket = tokio::net::UdpSocket::bind(p2p_udp_addr).await?; + info!("P2P UDP listening on {}", p2p_udp_addr); + let p2p_udp_task = tokio::spawn(async move { + let mut buf = [0u8; 1024]; + loop { + // Receive packets but do nothing with them + let _ = p2p_udp_socket.recv(&mut buf).await; + } + }); + + info!("Ready to accept requests on all ports"); + + // Spawn all servers concurrently + let engine_listener = tokio::net::TcpListener::bind(engine_addr).await?; + let rpc_listener = tokio::net::TcpListener::bind(rpc_addr).await?; + let ws_listener = tokio::net::TcpListener::bind(ws_addr).await?; + let metrics_listener = tokio::net::TcpListener::bind(metrics_addr).await?; + + tokio::select! { + result = axum::serve(engine_listener, engine_app) => result?, + result = axum::serve(rpc_listener, rpc_app) => result?, + result = axum::serve(ws_listener, ws_app) => result?, + result = axum::serve(metrics_listener, metrics_app) => result?, + _ = p2p_tcp_task => {}, + _ = p2p_udp_task => {}, + } + + Ok(()) +} diff --git a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml index edc59f78e5c..f0d0967a166 100644 --- a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml +++ b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml @@ -14,7 +14,7 @@ participants: # TODO(zkproofs): Currently there is no way to add no client here # We likely want to use our dummy zkvm EL here - el_type: geth - el_image: ethereum/client-go:latest + el_image: dummy_el:local cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: From 64e5a45a974fb2bd6d72f2475d05040a6728be3e Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Mon, 3 Nov 2025 22:41:22 +0000 Subject: [PATCH 46/67] update Dockerfile to use caches --- Dockerfile | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index f925836e48e..50bf1e5898e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,20 @@ FROM rust:1.88.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev -COPY . lighthouse +WORKDIR /lighthouse + ARG FEATURES ARG PROFILE=release ARG CARGO_USE_GIT_CLI=true ENV FEATURES=$FEATURES ENV PROFILE=$PROFILE ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI -RUN cd lighthouse && make +ENV CARGO_INCREMENTAL=1 + +COPY . . +# Persist the registry and target file across builds. See: https://docs.docker.com/build/cache/optimize/#use-cache-mounts +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/lighthouse/target \ + make FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ @@ -15,4 +22,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file From 0fe4341975dac9e3be9f85fa318fcd878d428aca Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sat, 13 Dec 2025 15:35:56 +0000 Subject: [PATCH 47/67] Merge unstable into optional-proofs (#2) * Update local testnet scripts for the fulu fork (#8489) * Remove `fulu-devnet-3` testing on CI * Delete `scripts/local_testnet/network_params_das.yaml` and consolidate it into the main `network_params.yaml` file we use on CI * Delete enclave before building image, so it doesn't cause slow image building. Co-Authored-By: Jimmy Chen * Fix data columns sorting when reconstructing blobs (#8510) Closes https://github.com/sigp/lighthouse/issues/8509 Co-Authored-By: Antoine James * Instrument attestation signing. (#8508) We noticed attestation signing taking 2+ seconds on some of our hoodi nodes and the current traces doesn't provide enough details. This PR adds a few more spans to the `attestation_duty_cycle` code path in the VC. Before: image After: image Co-Authored-By: Jimmy Chen * Always use committee index 0 when getting attestation data (#8171) * #8046 Split the function `publish_attestations_and_aggregates` into `publish_attestations` and `handle_aggregates`, so that for attestations, only 1 task is spawned. Co-Authored-By: Tan Chee Keong Co-Authored-By: chonghe <44791194+chong-he@users.noreply.github.com> Co-Authored-By: Michael Sproul Co-Authored-By: Michael Sproul * Move deposit contract artifacts to /target (#8518) Alternative to: - https://github.com/sigp/lighthouse/pull/8488 Refactors deposit_contract crate to comply with Rust build conventions by placing generated artifacts in the build output directory. Co-Authored-By: Michael Sproul * Move beacon state endpoints to a separate module. (#8529) Part of the http api refactor to move endpoint handlers to separate modules. This should improve code maintainability, incremental compilation time and rust analyzer performance. Co-Authored-By: Jimmy Chen * Refactor `consensus/types` (#7827) Organize and categorize `consensus/types` into modules based on their relation to key consensus structures/concepts. This is a precursor to a sensible public interface. While this refactor is very opinionated, I am open to suggestions on module names, or type groupings if my current ones are inappropriate. Co-Authored-By: Mac L * Move validator http endpoints to a separate module (#8536) Continuation of: * #8529 Moving `/validator` endpoints out of `http_api` to a separation module. This should improve code maintainability, incremental compilation time and rust analyzer performance. This is a tedious but straight forward change, so we're going with a pair & insta-merge approach to avoid painful & slow async review. @michaelsproul and I paired on the first commit - I believe we are almost done, will pair with @pawanjay176 tomorrow to wrap it up and merge tomorrow. (cc @macladson ) Co-Authored-By: Jimmy Chen * Move beacon pool http api to its own separate module (#8543) Continuation of: * #8536 Moving `/beacon/pool` endpoints out of `http_api` to a separation module. This should improve code maintainability, incremental compilation time and rust analyzer performance. This is a tedious but straight forward change, so we're going with a pair & insta-merge approach to avoid painful & slow async review Co-Authored-By: Jimmy Chen * Reduce `eth2` dependency space (#8524) Remove certain dependencies from `eth2`, and feature-gate others which are only used by certain endpoints. | Removed | Optional | Dev only | | -------- | -------- | -------- | | `either` `enr` `libp2p-identity` `multiaddr` | `protoarray` `eth2_keystore` `eip_3076` `zeroize` `reqwest-eventsource` `futures` `futures-util` | `rand` `test_random_derive` | This is done by adding an `events` feature which enables the events endpoint and its associated dependencies. The `lighthouse` feature also enables its associated dependencies making them optional. The networking-adjacent dependencies were removed by just having certain fields use a `String` instead of an explicit network type. This means the user should handle conversion at the call site instead. This is a bit spicy, but I believe `PeerId`, `Enr` and `Multiaddr` are easily converted to and from `String`s so I think it's fine and reduces our dependency space by a lot. The alternative is to feature gate these types behind a `network` feature instead. Co-Authored-By: Mac L * Clarify `alloy` dependencies (#8550) Previously, we had a pinned version of `alloy` to fix some crate compatibility issues we encountered during the migration away from `ethers`. Now that the migration is complete we should remove the pin. This also updates alloy crates to their latest versions. Co-Authored-By: Mac L * Remove `consensus/types` re-exports (#8540) There are certain crates which we re-export within `types` which creates a fragmented DevEx, where there are various ways to import the same crates. ```rust // consensus/types/src/lib.rs pub use bls::{ AggregatePublicKey, AggregateSignature, Error as BlsError, Keypair, PUBLIC_KEY_BYTES_LEN, PublicKey, PublicKeyBytes, SIGNATURE_BYTES_LEN, SecretKey, Signature, SignatureBytes, get_withdrawal_credentials, }; pub use context_deserialize::{ContextDeserialize, context_deserialize}; pub use fixed_bytes::FixedBytesExtended; pub use milhouse::{self, List, Vector}; pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; pub use superstruct::superstruct; ``` This PR removes these re-exports and makes it explicit that these types are imported from a non-`consensus/types` crate. Co-Authored-By: Mac L * Fix testnet script (#8557) Fix an issue where a kurtosis testnet script was failing because no supernodes were provided ``` There was an error interpreting Starlark code Evaluation error: fail: Fulu fork is enabled (epoch: 0) but no supernodes are configured, no nodes have 128 or more validators, and perfect_peerdas_enabled is not enabled. Either configure a supernode, ensure at least one node has 128+ validators, or enable perfect_peerdas_enabled in network_params with 16 participants. at [github.com/ethpandaops/ethereum-package/main.star:83:57]: run at [github.com/ethpandaops/ethereum-package/src/package_io/input_parser.star:377:17]: input_parser at [0:0]: fail ``` Co-Authored-By: Eitan Seri-Levi Co-Authored-By: Pawan Dhananjay * Do not request attestation data when attestation duty is empty (#8559) Co-Authored-By: Tan Chee Keong * Rust 1.92 lints (#8567) Co-Authored-By: Eitan Seri-Levi --------- Co-authored-by: Jimmy Chen Co-authored-by: Jimmy Chen Co-authored-by: 0xMushow <105550256+0xMushow@users.noreply.github.com> Co-authored-by: Antoine James Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> Co-authored-by: Michael Sproul Co-authored-by: Michael Sproul Co-authored-by: Mac L Co-authored-by: Eitan Seri-Levi Co-authored-by: Pawan Dhananjay Co-authored-by: Tan Chee Keong --- .github/workflows/local-testnet.yml | 2 +- Cargo.lock | 138 +- Cargo.toml | 16 +- .../src/validator/slashing_protection.rs | 3 +- beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 5 +- .../beacon_chain/src/attester_cache.rs | 7 +- .../beacon_chain/src/beacon_block_streamer.rs | 6 +- beacon_node/beacon_chain/src/beacon_chain.rs | 5 +- .../src/beacon_fork_choice_store.rs | 3 +- .../beacon_chain/src/beacon_proposer_cache.rs | 5 +- .../beacon_chain/src/block_times_cache.rs | 2 +- .../beacon_chain/src/block_verification.rs | 3 +- beacon_node/beacon_chain/src/builder.rs | 4 +- .../overflow_lru_cache.rs | 9 +- beacon_node/beacon_chain/src/errors.rs | 3 +- .../beacon_chain/src/graffiti_calculator.rs | 3 +- .../beacon_chain/src/historical_blocks.rs | 3 +- beacon_node/beacon_chain/src/kzg_utils.rs | 36 +- .../src/naive_aggregation_pool.rs | 7 +- .../beacon_chain/src/observed_aggregates.rs | 3 +- .../beacon_chain/src/observed_attesters.rs | 5 +- .../src/observed_block_producers.rs | 3 +- .../beacon_chain/src/observed_slashable.rs | 3 +- .../beacon_chain/src/shuffling_cache.rs | 1 + .../beacon_chain/src/single_attestation.rs | 4 +- .../src/sync_committee_verification.rs | 5 +- beacon_node/beacon_chain/src/test_utils.rs | 11 +- .../beacon_chain/src/validator_monitor.rs | 6 +- .../src/validator_pubkey_cache.rs | 7 +- .../tests/attestation_production.rs | 5 +- .../tests/attestation_verification.rs | 8 +- .../beacon_chain/tests/blob_verification.rs | 1 + .../beacon_chain/tests/block_verification.rs | 2 + .../beacon_chain/tests/column_verification.rs | 1 + .../beacon_chain/tests/op_verification.rs | 1 + beacon_node/beacon_chain/tests/rewards.rs | 3 +- .../beacon_chain/tests/schema_stability.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 5 +- .../tests/sync_committee_verification.rs | 7 +- beacon_node/beacon_chain/tests/tests.rs | 5 +- .../beacon_chain/tests/validator_monitor.rs | 3 +- beacon_node/builder_client/Cargo.toml | 2 + beacon_node/builder_client/src/lib.rs | 9 +- beacon_node/execution_layer/Cargo.toml | 4 +- beacon_node/execution_layer/src/engine_api.rs | 6 +- .../execution_layer/src/engine_api/http.rs | 8 +- .../src/engine_api/json_structures.rs | 4 +- beacon_node/execution_layer/src/lib.rs | 3 +- .../test_utils/execution_block_generator.rs | 7 +- .../src/test_utils/mock_builder.rs | 12 +- .../src/test_utils/mock_execution_layer.rs | 3 +- .../execution_layer/src/versioned_hashes.rs | 3 +- beacon_node/genesis/Cargo.toml | 1 + beacon_node/genesis/src/interop.rs | 6 +- beacon_node/http_api/Cargo.toml | 5 +- beacon_node/http_api/src/beacon/mod.rs | 2 + beacon_node/http_api/src/beacon/pool.rs | 522 ++ beacon_node/http_api/src/beacon/states.rs | 787 +++ beacon_node/http_api/src/block_id.rs | 9 +- beacon_node/http_api/src/lib.rs | 5002 ++++++----------- beacon_node/http_api/src/light_client.rs | 3 +- beacon_node/http_api/src/produce_block.rs | 1 + beacon_node/http_api/src/publish_blocks.rs | 11 +- beacon_node/http_api/src/sync_committees.rs | 2 +- beacon_node/http_api/src/utils.rs | 90 + beacon_node/http_api/src/validator.rs | 22 - beacon_node/http_api/src/validator/mod.rs | 972 ++++ beacon_node/http_api/src/version.rs | 12 +- .../tests/broadcast_validation_tests.rs | 5 +- beacon_node/http_api/tests/fork_tests.rs | 6 +- .../http_api/tests/interactive_tests.rs | 5 +- beacon_node/http_api/tests/tests.rs | 26 +- beacon_node/lighthouse_network/Cargo.toml | 5 +- .../lighthouse_network/src/discovery/mod.rs | 3 +- .../src/peer_manager/mod.rs | 3 +- .../lighthouse_network/src/rpc/codec.rs | 12 +- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/src/rpc/protocol.rs | 3 +- .../lighthouse_network/src/types/mod.rs | 3 +- .../lighthouse_network/src/types/topics.rs | 3 +- .../lighthouse_network/tests/common.rs | 6 +- .../lighthouse_network/tests/rpc_tests.rs | 9 +- beacon_node/network/Cargo.toml | 2 + .../src/network_beacon_processor/tests.rs | 5 +- beacon_node/network/src/service.rs | 3 +- beacon_node/network/src/status.rs | 3 +- .../src/sync/block_lookups/parent_chain.rs | 3 +- .../src/sync/block_sidecar_coupling.rs | 3 +- beacon_node/network/src/sync/tests/lookups.rs | 2 +- beacon_node/operation_pool/Cargo.toml | 4 + beacon_node/operation_pool/src/attestation.rs | 3 +- .../operation_pool/src/attestation_storage.rs | 8 +- beacon_node/operation_pool/src/lib.rs | 4 +- beacon_node/operation_pool/src/persistence.rs | 1 + .../operation_pool/src/reward_cache.rs | 5 +- beacon_node/src/config.rs | 1 + beacon_node/store/Cargo.toml | 3 + beacon_node/store/src/chunked_vector.rs | 3 + beacon_node/store/src/config.rs | 2 +- .../store/src/database/leveldb_impl.rs | 3 +- beacon_node/store/src/errors.rs | 2 +- beacon_node/store/src/hdiff.rs | 3 +- beacon_node/store/src/hot_cold_store.rs | 2 + beacon_node/store/src/iter.rs | 4 +- beacon_node/store/src/partial_beacon_state.rs | 5 +- common/account_utils/Cargo.toml | 1 + .../src/validator_definitions.rs | 3 +- common/deposit_contract/Cargo.toml | 5 +- common/deposit_contract/build.rs | 9 +- common/deposit_contract/src/lib.rs | 31 +- common/eip_3076/Cargo.toml | 2 + common/eip_3076/src/lib.rs | 5 +- common/eth2/Cargo.toml | 30 +- .../eth2}/src/beacon_response.rs | 8 +- common/eth2/src/error.rs | 2 + common/eth2/src/lib.rs | 25 +- common/eth2/src/lighthouse_vc/http_client.rs | 1 + common/eth2/src/lighthouse_vc/std_types.rs | 3 +- common/eth2/src/lighthouse_vc/types.rs | 2 +- common/eth2/src/types.rs | 27 +- common/eth2_network_config/Cargo.toml | 1 + common/eth2_network_config/src/lib.rs | 3 +- common/health_metrics/Cargo.toml | 2 +- common/monitoring_api/Cargo.toml | 2 +- common/validator_dir/src/builder.rs | 4 +- common/validator_dir/src/validator_dir.rs | 3 +- common/validator_dir/tests/tests.rs | 3 +- consensus/fork_choice/Cargo.toml | 1 + consensus/fork_choice/src/fork_choice.rs | 3 +- consensus/fork_choice/tests/tests.rs | 7 +- consensus/proto_array/Cargo.toml | 1 + .../src/fork_choice_test_definition.rs | 5 +- .../fork_choice_test_definition/no_votes.rs | 2 +- consensus/proto_array/src/proto_array.rs | 5 +- .../src/proto_array_fork_choice.rs | 8 +- consensus/state_processing/Cargo.toml | 3 + .../src/common/get_attesting_indices.rs | 5 +- .../src/common/slash_validator.rs | 1 + consensus/state_processing/src/epoch_cache.rs | 5 +- consensus/state_processing/src/genesis.rs | 1 + .../src/per_block_processing.rs | 1 + .../altair/sync_committee.rs | 6 +- .../process_operations.rs | 3 +- .../per_block_processing/signature_sets.rs | 15 +- .../src/per_block_processing/tests.rs | 3 + .../per_block_processing/verify_deposit.rs | 1 + .../altair/participation_flag_updates.rs | 2 +- .../epoch_processing_summary.rs | 5 +- .../src/per_epoch_processing/errors.rs | 3 +- .../historical_roots_update.rs | 2 +- .../justification_and_finalization_state.rs | 3 +- .../src/per_epoch_processing/resets.rs | 3 +- .../src/per_epoch_processing/single_pass.rs | 7 +- .../src/per_epoch_processing/slashings.rs | 3 +- .../src/per_slot_processing.rs | 1 + .../state_processing/src/state_advance.rs | 3 +- .../state_processing/src/upgrade/altair.rs | 3 +- .../state_processing/src/upgrade/capella.rs | 3 +- .../state_processing/src/upgrade/fulu.rs | 5 +- consensus/types/Cargo.toml | 4 + consensus/types/benches/benches.rs | 3 +- .../{ => attestation}/aggregate_and_proof.rs | 19 +- .../src/{ => attestation}/attestation.rs | 29 +- .../src/{ => attestation}/attestation_data.rs | 11 +- .../src/{ => attestation}/attestation_duty.rs | 3 +- .../src/{ => attestation}/beacon_committee.rs | 2 +- .../types/src/{ => attestation}/checkpoint.rs | 8 +- .../{ => attestation}/indexed_attestation.rs | 21 +- consensus/types/src/attestation/mod.rs | 39 + .../{ => attestation}/participation_flags.rs | 6 +- .../{ => attestation}/pending_attestation.rs | 7 +- .../src/{ => attestation}/selection_proof.rs | 12 +- .../src/{ => attestation}/shuffling_id.rs | 9 +- .../signed_aggregate_and_proof.rs | 21 +- .../types/src/{ => attestation}/subnet_id.rs | 14 +- .../types/src/{ => block}/beacon_block.rs | 43 +- .../src/{ => block}/beacon_block_body.rs | 95 +- .../src/{ => block}/beacon_block_header.rs | 11 +- consensus/types/src/block/mod.rs | 26 + .../src/{ => block}/signed_beacon_block.rs | 38 +- .../{ => block}/signed_beacon_block_header.rs | 14 +- .../types/src/{ => builder}/builder_bid.rs | 22 +- consensus/types/src/builder/mod.rs | 6 + .../consolidation_request.rs | 10 +- consensus/types/src/consolidation/mod.rs | 5 + .../pending_consolidation.rs | 6 +- .../src/{ => core}/application_domain.rs | 0 consensus/types/src/{ => core}/chain_spec.rs | 23 +- .../types/src/{ => core}/config_and_preset.rs | 15 +- consensus/types/src/{ => core}/consts.rs | 2 +- consensus/types/src/{ => core}/enr_fork_id.rs | 5 +- consensus/types/src/{ => core}/eth_spec.rs | 24 +- consensus/types/src/{ => core}/graffiti.rs | 11 +- consensus/types/src/core/mod.rs | 44 + .../types/src/{ => core}/non_zero_usize.rs | 0 consensus/types/src/{ => core}/preset.rs | 4 +- .../types/src/{ => core}/relative_epoch.rs | 3 +- .../types/src/{ => core}/signing_data.rs | 7 +- consensus/types/src/{ => core}/slot_data.rs | 2 +- consensus/types/src/{ => core}/slot_epoch.rs | 10 +- .../types/src/{ => core}/slot_epoch_macros.rs | 0 consensus/types/src/{ => core}/sqlite.rs | 3 +- .../types/src/{ => data}/blob_sidecar.rs | 28 +- .../{ => data}/data_column_custody_group.rs | 10 +- .../src/{ => data}/data_column_sidecar.rs | 21 +- .../src/{ => data}/data_column_subnet_id.rs | 25 +- consensus/types/src/data/mod.rs | 23 + consensus/types/src/{ => deposit}/deposit.rs | 9 +- .../types/src/{ => deposit}/deposit_data.rs | 11 +- .../src/{ => deposit}/deposit_message.rs | 11 +- .../src/{ => deposit}/deposit_request.rs | 8 +- .../{ => deposit}/deposit_tree_snapshot.rs | 5 +- consensus/types/src/deposit/mod.rs | 13 + .../src/{ => deposit}/pending_deposit.rs | 10 +- .../bls_to_execution_change.rs | 11 +- .../types/src/{ => execution}/eth1_data.rs | 7 +- .../{ => execution}/execution_block_hash.rs | 9 +- .../{ => execution}/execution_block_header.rs | 7 +- .../src/{ => execution}/execution_payload.rs | 26 +- .../execution_payload_header.rs | 27 +- .../src/{ => execution}/execution_requests.rs | 13 +- consensus/types/src/execution/mod.rs | 36 + .../types/src/{ => execution}/payload.rs | 122 +- .../signed_bls_to_execution_change.rs | 6 +- consensus/types/src/exit/mod.rs | 5 + .../src/{ => exit}/signed_voluntary_exit.rs | 6 +- .../types/src/{ => exit}/voluntary_exit.rs | 15 +- consensus/types/src/{ => fork}/fork.rs | 5 +- .../types/src/{ => fork}/fork_context.rs | 14 +- consensus/types/src/{ => fork}/fork_data.rs | 9 +- consensus/types/src/fork/fork_macros.rs | 60 + consensus/types/src/{ => fork}/fork_name.rs | 71 +- .../types/src/fork/fork_version_decode.rs | 6 + consensus/types/src/fork/mod.rs | 15 + consensus/types/src/kzg_ext/consts.rs | 3 + consensus/types/src/kzg_ext/mod.rs | 27 + consensus/types/src/lib.rs | 421 +- consensus/types/src/light_client/consts.rs | 21 + consensus/types/src/light_client/error.rs | 41 + .../light_client_bootstrap.rs | 63 +- .../light_client_finality_update.rs | 54 +- .../{ => light_client}/light_client_header.rs | 55 +- .../light_client_optimistic_update.rs | 31 +- .../{ => light_client}/light_client_update.rs | 156 +- consensus/types/src/light_client/mod.rs | 37 + consensus/types/src/runtime_fixed_vector.rs | 90 - consensus/types/src/runtime_var_list.rs | 387 -- .../src/{ => slashing}/attester_slashing.rs | 14 +- consensus/types/src/slashing/mod.rs | 8 + .../src/{ => slashing}/proposer_slashing.rs | 7 +- .../types/src/{ => state}/activation_queue.rs | 6 +- .../src/{beacon_state => state}/balance.rs | 0 .../types/src/{ => state}/beacon_state.rs | 531 +- .../committee_cache.rs | 38 +- .../types/src/{ => state}/epoch_cache.rs | 9 +- .../src/{beacon_state => state}/exit_cache.rs | 10 +- .../types/src/{ => state}/historical_batch.rs | 12 +- .../src/{ => state}/historical_summary.rs | 11 +- .../types/src/{beacon_state => state}/iter.rs | 8 +- consensus/types/src/state/mod.rs | 35 + .../progressive_balances_cache.rs | 14 +- .../{beacon_state => state}/pubkey_cache.rs | 2 +- .../slashings_cache.rs | 3 +- .../contribution_and_proof.rs | 15 +- consensus/types/src/sync_committee/mod.rs | 25 + .../signed_contribution_and_proof.rs | 15 +- .../{ => sync_committee}/sync_aggregate.rs | 14 +- .../sync_aggregator_selection_data.rs | 10 +- .../{ => sync_committee}/sync_committee.rs | 10 +- .../sync_committee_contribution.rs | 14 +- .../sync_committee_message.rs | 14 +- .../sync_committee_subscription.rs | 3 +- .../src/{ => sync_committee}/sync_duty.rs | 9 +- .../sync_selection_proof.rs | 25 +- .../{ => sync_committee}/sync_subnet_id.rs | 17 +- .../generate_deterministic_keypairs.rs | 5 +- .../generate_random_block_and_blobs.rs | 18 +- consensus/types/src/test_utils/mod.rs | 24 +- .../src/test_utils/test_random/address.rs | 4 +- .../test_random/aggregate_signature.rs | 6 +- .../src/test_utils/test_random/bitfield.rs | 9 +- .../src/test_utils/test_random/hash256.rs | 4 +- .../test_utils/test_random/kzg_commitment.rs | 4 +- .../src/test_utils/test_random/kzg_proof.rs | 7 +- .../types/src/test_utils/test_random/mod.rs | 15 + .../src/test_utils/test_random/public_key.rs | 6 +- .../test_random/public_key_bytes.rs | 6 +- .../src/test_utils/test_random/secret_key.rs | 6 +- .../src/test_utils/test_random/signature.rs | 6 +- .../test_utils/test_random/signature_bytes.rs | 6 +- .../{ => test_random}/test_random.rs | 23 +- .../src/test_utils/test_random/uint256.rs | 4 +- consensus/types/src/validator/mod.rs | 9 + .../proposer_preparation_data.rs | 3 +- .../types/src/{ => validator}/validator.rs | 16 +- .../validator_registration_data.rs | 4 +- .../{ => validator}/validator_subscription.rs | 3 +- consensus/types/src/withdrawal/mod.rs | 9 + .../pending_partial_withdrawal.rs | 6 +- .../types/src/{ => withdrawal}/withdrawal.rs | 12 +- .../withdrawal_credentials.rs | 7 +- .../{ => withdrawal}/withdrawal_request.rs | 7 +- .../tests.rs => tests/committee_cache.rs} | 11 +- .../beacon_state/tests.rs => tests/state.rs} | 18 +- lcli/Cargo.toml | 1 + lcli/src/generate_bootnode_enr.rs | 3 +- lcli/src/http_sync.rs | 1 - lighthouse/tests/account_manager.rs | 2 +- lighthouse/tests/validator_manager.rs | 1 + scripts/local_testnet/README.md | 2 +- scripts/local_testnet/network_params.yaml | 30 +- scripts/local_testnet/network_params_das.yaml | 41 - scripts/local_testnet/start_local_testnet.sh | 10 +- .../tests/checkpoint-sync-config-devnet.yaml | 24 - .../tests/genesis-sync-config-electra.yaml | 3 +- scripts/tests/genesis-sync-config-fulu.yaml | 3 +- scripts/tests/network_params.yaml | 3 +- slasher/Cargo.toml | 3 + slasher/src/attester_record.rs | 4 +- slasher/src/database.rs | 9 +- slasher/src/test_utils.rs | 9 +- testing/ef_tests/Cargo.toml | 3 + .../src/cases/merkle_proof_validity.rs | 5 +- testing/ef_tests/src/cases/ssz_generic.rs | 6 +- testing/ef_tests/tests/tests.rs | 1 + .../execution_engine_integration/Cargo.toml | 9 +- .../src/test_rig.rs | 5 +- .../src/transactions.rs | 4 +- testing/simulator/Cargo.toml | 1 + testing/simulator/src/checks.rs | 3 +- testing/state_transition_vectors/Cargo.toml | 2 + testing/state_transition_vectors/src/main.rs | 8 +- testing/web3signer_tests/Cargo.toml | 3 + testing/web3signer_tests/src/lib.rs | 3 + .../beacon_node_fallback/Cargo.toml | 1 + .../beacon_node_fallback/src/lib.rs | 3 +- .../doppelganger_service/Cargo.toml | 1 + .../doppelganger_service/src/lib.rs | 3 +- validator_client/http_api/Cargo.toml | 5 +- validator_client/http_api/src/keystores.rs | 3 +- validator_client/http_api/src/lib.rs | 5 +- validator_client/http_api/src/remotekeys.rs | 3 +- validator_client/http_api/src/test_utils.rs | 1 + validator_client/http_api/src/tests.rs | 1 + .../http_api/src/tests/keystores.rs | 4 + .../initialized_validators/src/lib.rs | 3 +- .../lighthouse_validator_store/Cargo.toml | 1 + .../lighthouse_validator_store/src/lib.rs | 11 +- validator_client/signing_method/Cargo.toml | 2 + validator_client/signing_method/src/lib.rs | 3 + .../signing_method/src/web3signer.rs | 1 + .../slashing_protection/Cargo.toml | 2 + .../src/attestation_tests.rs | 3 +- .../src/bin/test_generator.rs | 3 +- .../slashing_protection/src/block_tests.rs | 3 +- .../src/extra_interchange_tests.rs | 2 +- .../src/interchange_test.rs | 4 +- .../slashing_protection/src/lib.rs | 5 +- .../src/slashing_database.rs | 5 +- .../slashing_protection/tests/migration.rs | 3 +- .../src/attestation_service.rs | 373 +- .../validator_services/src/block_service.rs | 3 +- .../validator_services/src/duties_service.rs | 3 +- .../validator_services/src/sync.rs | 3 +- .../src/sync_committee_service.rs | 5 +- validator_client/validator_store/Cargo.toml | 1 + validator_client/validator_store/src/lib.rs | 7 +- validator_manager/Cargo.toml | 1 + validator_manager/src/common.rs | 1 + validator_manager/src/create_validators.rs | 2 + validator_manager/src/delete_validators.rs | 2 +- validator_manager/src/exit_validators.rs | 3 +- validator_manager/src/list_validators.rs | 3 +- validator_manager/src/move_validators.rs | 3 +- 375 files changed, 7074 insertions(+), 5742 deletions(-) create mode 100644 beacon_node/http_api/src/beacon/mod.rs create mode 100644 beacon_node/http_api/src/beacon/pool.rs create mode 100644 beacon_node/http_api/src/beacon/states.rs create mode 100644 beacon_node/http_api/src/utils.rs delete mode 100644 beacon_node/http_api/src/validator.rs create mode 100644 beacon_node/http_api/src/validator/mod.rs rename {consensus/types => common/eth2}/src/beacon_response.rs (97%) rename consensus/types/src/{ => attestation}/aggregate_and_proof.rs (93%) rename consensus/types/src/{ => attestation}/attestation.rs (97%) rename consensus/types/src/{ => attestation}/attestation_data.rs (87%) rename consensus/types/src/{ => attestation}/attestation_duty.rs (92%) rename consensus/types/src/{ => attestation}/beacon_committee.rs (92%) rename consensus/types/src/{ => attestation}/checkpoint.rs (88%) rename consensus/types/src/{ => attestation}/indexed_attestation.rs (96%) create mode 100644 consensus/types/src/attestation/mod.rs rename consensus/types/src/{ => attestation}/participation_flags.rs (96%) rename consensus/types/src/{ => attestation}/pending_attestation.rs (84%) rename consensus/types/src/{ => attestation}/selection_proof.rs (95%) rename consensus/types/src/{ => attestation}/shuffling_id.rs (93%) rename consensus/types/src/{ => attestation}/signed_aggregate_and_proof.rs (90%) rename consensus/types/src/{ => attestation}/subnet_id.rs (97%) rename consensus/types/src/{ => block}/beacon_block.rs (96%) rename consensus/types/src/{ => block}/beacon_block_body.rs (93%) rename consensus/types/src/{ => block}/beacon_block_header.rs (90%) create mode 100644 consensus/types/src/block/mod.rs rename consensus/types/src/{ => block}/signed_beacon_block.rs (95%) rename consensus/types/src/{ => block}/signed_beacon_block_header.rs (84%) rename consensus/types/src/{ => builder}/builder_bid.rs (93%) create mode 100644 consensus/types/src/builder/mod.rs rename consensus/types/src/{ => consolidation}/consolidation_request.rs (84%) create mode 100644 consensus/types/src/consolidation/mod.rs rename consensus/types/src/{ => consolidation}/pending_consolidation.rs (86%) rename consensus/types/src/{ => core}/application_domain.rs (100%) rename consensus/types/src/{ => core}/chain_spec.rs (99%) rename consensus/types/src/{ => core}/config_and_preset.rs (95%) rename consensus/types/src/{ => core}/consts.rs (94%) rename consensus/types/src/{ => core}/enr_fork_id.rs (95%) rename consensus/types/src/{ => core}/eth_spec.rs (98%) rename consensus/types/src/{ => core}/graffiti.rs (98%) create mode 100644 consensus/types/src/core/mod.rs rename consensus/types/src/{ => core}/non_zero_usize.rs (100%) rename consensus/types/src/{ => core}/preset.rs (99%) rename consensus/types/src/{ => core}/relative_epoch.rs (99%) rename consensus/types/src/{ => core}/signing_data.rs (85%) rename consensus/types/src/{ => core}/slot_data.rs (92%) rename consensus/types/src/{ => core}/slot_epoch.rs (98%) rename consensus/types/src/{ => core}/slot_epoch_macros.rs (100%) rename consensus/types/src/{ => core}/sqlite.rs (96%) rename consensus/types/src/{ => data}/blob_sidecar.rs (94%) rename consensus/types/src/{ => data}/data_column_custody_group.rs (98%) rename consensus/types/src/{ => data}/data_column_sidecar.rs (94%) rename consensus/types/src/{ => data}/data_column_subnet_id.rs (80%) create mode 100644 consensus/types/src/data/mod.rs rename consensus/types/src/{ => deposit}/deposit.rs (78%) rename consensus/types/src/{ => deposit}/deposit_data.rs (86%) rename consensus/types/src/{ => deposit}/deposit_message.rs (81%) rename consensus/types/src/{ => deposit}/deposit_request.rs (86%) rename consensus/types/src/{ => deposit}/deposit_tree_snapshot.rs (95%) create mode 100644 consensus/types/src/deposit/mod.rs rename consensus/types/src/{ => deposit}/pending_deposit.rs (78%) rename consensus/types/src/{ => execution}/bls_to_execution_change.rs (83%) rename consensus/types/src/{ => execution}/eth1_data.rs (86%) rename consensus/types/src/{ => execution}/execution_block_hash.rs (96%) rename consensus/types/src/{ => execution}/execution_block_header.rs (98%) rename consensus/types/src/{ => execution}/execution_payload.rs (92%) rename consensus/types/src/{ => execution}/execution_payload_header.rs (96%) rename consensus/types/src/{ => execution}/execution_requests.rs (93%) create mode 100644 consensus/types/src/execution/mod.rs rename consensus/types/src/{ => execution}/payload.rs (91%) rename consensus/types/src/{ => execution}/signed_bls_to_execution_change.rs (78%) create mode 100644 consensus/types/src/exit/mod.rs rename consensus/types/src/{ => exit}/signed_voluntary_exit.rs (84%) rename consensus/types/src/{ => exit}/voluntary_exit.rs (90%) rename consensus/types/src/{ => fork}/fork.rs (96%) rename consensus/types/src/{ => fork}/fork_context.rs (97%) rename consensus/types/src/{ => fork}/fork_data.rs (88%) create mode 100644 consensus/types/src/fork/fork_macros.rs rename consensus/types/src/{ => fork}/fork_name.rs (84%) create mode 100644 consensus/types/src/fork/fork_version_decode.rs create mode 100644 consensus/types/src/fork/mod.rs create mode 100644 consensus/types/src/kzg_ext/consts.rs create mode 100644 consensus/types/src/kzg_ext/mod.rs create mode 100644 consensus/types/src/light_client/consts.rs create mode 100644 consensus/types/src/light_client/error.rs rename consensus/types/src/{ => light_client}/light_client_bootstrap.rs (88%) rename consensus/types/src/{ => light_client}/light_client_finality_update.rs (89%) rename consensus/types/src/{ => light_client}/light_client_header.rs (91%) rename consensus/types/src/{ => light_client}/light_client_optimistic_update.rs (94%) rename consensus/types/src/{ => light_client}/light_client_update.rs (86%) create mode 100644 consensus/types/src/light_client/mod.rs delete mode 100644 consensus/types/src/runtime_fixed_vector.rs delete mode 100644 consensus/types/src/runtime_var_list.rs rename consensus/types/src/{ => slashing}/attester_slashing.rs (96%) create mode 100644 consensus/types/src/slashing/mod.rs rename consensus/types/src/{ => slashing}/proposer_slashing.rs (86%) rename consensus/types/src/{ => state}/activation_queue.rs (95%) rename consensus/types/src/{beacon_state => state}/balance.rs (100%) rename consensus/types/src/{ => state}/beacon_state.rs (88%) rename consensus/types/src/{beacon_state => state}/committee_cache.rs (93%) rename consensus/types/src/{ => state}/epoch_cache.rs (97%) rename consensus/types/src/{beacon_state => state}/exit_cache.rs (97%) rename consensus/types/src/{ => state}/historical_batch.rs (81%) rename consensus/types/src/{ => state}/historical_summary.rs (87%) rename consensus/types/src/{beacon_state => state}/iter.rs (95%) create mode 100644 consensus/types/src/state/mod.rs rename consensus/types/src/{beacon_state => state}/progressive_balances_cache.rs (98%) rename consensus/types/src/{beacon_state => state}/pubkey_cache.rs (98%) rename consensus/types/src/{beacon_state => state}/slashings_cache.rs (96%) rename consensus/types/src/{ => sync_committee}/contribution_and_proof.rs (88%) create mode 100644 consensus/types/src/sync_committee/mod.rs rename consensus/types/src/{ => sync_committee}/signed_contribution_and_proof.rs (87%) rename consensus/types/src/{ => sync_committee}/sync_aggregate.rs (91%) rename consensus/types/src/{ => sync_committee}/sync_aggregator_selection_data.rs (82%) rename consensus/types/src/{ => sync_committee}/sync_committee.rs (95%) rename consensus/types/src/{ => sync_committee}/sync_committee_contribution.rs (93%) rename consensus/types/src/{ => sync_committee}/sync_committee_message.rs (88%) rename consensus/types/src/{ => sync_committee}/sync_committee_subscription.rs (96%) rename consensus/types/src/{ => sync_committee}/sync_duty.rs (96%) rename consensus/types/src/{ => sync_committee}/sync_selection_proof.rs (90%) rename consensus/types/src/{ => sync_committee}/sync_subnet_id.rs (90%) create mode 100644 consensus/types/src/test_utils/test_random/mod.rs rename consensus/types/src/test_utils/{ => test_random}/test_random.rs (90%) create mode 100644 consensus/types/src/validator/mod.rs rename consensus/types/src/{ => validator}/proposer_preparation_data.rs (95%) rename consensus/types/src/{ => validator}/validator.rs (97%) rename consensus/types/src/{ => validator}/validator_registration_data.rs (93%) rename consensus/types/src/{ => validator}/validator_subscription.rs (93%) create mode 100644 consensus/types/src/withdrawal/mod.rs rename consensus/types/src/{ => withdrawal}/pending_partial_withdrawal.rs (85%) rename consensus/types/src/{ => withdrawal}/withdrawal.rs (73%) rename consensus/types/src/{ => withdrawal}/withdrawal_credentials.rs (91%) rename consensus/types/src/{ => withdrawal}/withdrawal_request.rs (87%) rename consensus/types/{src/beacon_state/committee_cache/tests.rs => tests/committee_cache.rs} (97%) rename consensus/types/{src/beacon_state/tests.rs => tests/state.rs} (97%) delete mode 100644 scripts/local_testnet/network_params_das.yaml delete mode 100644 scripts/tests/checkpoint-sync-config-devnet.yaml diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index c129c0ec95c..9992273e0a7 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -179,7 +179,7 @@ jobs: continue-on-error: true strategy: matrix: - network: [sepolia, devnet] + network: [sepolia] steps: - uses: actions/checkout@v5 diff --git a/Cargo.lock b/Cargo.lock index b7b148b7b20..dc7031d7640 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,6 +34,7 @@ dependencies = [ name = "account_utils" version = "0.1.0" dependencies = [ + "bls", "eth2_keystore", "eth2_wallet", "filesystem", @@ -142,9 +143,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abecb92ba478a285fbf5689100dbafe4003ded4a09bf4b5ef62cca87cd4f79e" +checksum = "2e318e25fb719e747a7e8db1654170fc185024f3ed5b10f86c08d448a912f6e2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -153,6 +154,7 @@ dependencies = [ "alloy-trie", "alloy-tx-macros", "auto_impl", + "borsh", "c-kzg", "derive_more 2.0.1", "either", @@ -168,9 +170,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e864d4f11d1fb8d3ac2fd8f3a15f1ee46d55ec6d116b342ed1b2cb737f25894" +checksum = "364380a845193a317bcb7a5398fc86cdb66c47ebe010771dde05f6869bf9e64a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -191,8 +193,6 @@ dependencies = [ "alloy-sol-type-parser", "alloy-sol-types", "itoa", - "serde", - "serde_json", "winnow", ] @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e867b5fd52ed0372a95016f3a37cbff95a9d5409230fbaef2d8ea00e8618098" +checksum = "a4c4d7c5839d9f3a467900c625416b24328450c65702eb3d8caff8813e4d1d33" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -271,9 +271,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcab4c51fb1273e3b0f59078e0cdf8aa99f697925b09f0d2055c18be46b4d48c" +checksum = "f72cf87cda808e593381fb9f005ffa4d2475552b7a6c5ac33d087bf77d82abd0" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -286,9 +286,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d6ed73d440bae8f27771b7cd507fa8f10f19ddf0b8f67e7622a52e0dbf798e" +checksum = "12aeb37b6f2e61b93b1c3d34d01ee720207c76fe447e2a2c217e433ac75b17f5" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -312,9 +312,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219dccd2cf753a43bd9b0fbb7771a16927ffdb56e43e3a15755bef1a74d614aa" +checksum = "abd29ace62872083e30929cd9b282d82723196d196db589f3ceda67edcc05552" dependencies = [ "alloy-consensus", "alloy-eips", @@ -355,9 +355,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ef8cbc2b68e2512acf04b2d296c05c98a661bc460462add6414528f4ff3d9b" +checksum = "9b710636d7126e08003b8217e24c09f0cca0b46d62f650a841736891b1ed1fc1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -416,9 +416,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c2630fde9ff6033a780635e1af6ef40e92d74a9cacb8af3defc1b15cfebca5" +checksum = "d0882e72d2c1c0c79dcf4ab60a67472d3f009a949f774d4c17d0bdb669cfde05" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -439,9 +439,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "425e14ee32eb8b7edd6a2247fe0ed640785e6eba75af27db27f1e6220c15ef0d" +checksum = "6a63fb40ed24e4c92505f488f9dd256e2afaed17faa1b7a221086ebba74f4122" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -450,9 +450,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0185f68a0f8391ab996d335a887087d7ccdbc97952efab3516f6307d456ba2cd" +checksum = "9eae0c7c40da20684548cbc8577b6b7447f7bf4ddbac363df95e3da220e41e72" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -471,9 +471,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e856112bfa0d9adc85bd7c13db03fad0e71d1d6fb4c2010e475b6718108236" +checksum = "c0df1987ed0ff2d0159d76b52e7ddfc4e4fbddacc54d2fbee765e0d14d7c01b5" dependencies = [ "alloy-primitives", "serde", @@ -482,9 +482,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a4f629da632d5279bbc5731634f0f5c9484ad9c4cad0cd974d9669dc1f46d6" +checksum = "6ff69deedee7232d7ce5330259025b868c5e6a52fa8dffda2c861fb3a5889b24" dependencies = [ "alloy-primitives", "async-trait", @@ -497,9 +497,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.42" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "590dcaeb290cdce23155e68af4791d093afc3754b1a331198a25d2d44c5456e8" +checksum = "72cfe0be3ec5a8c1a46b2e5a7047ed41121d360d97f4405bb7c1c784880c86cb" dependencies = [ "alloy-consensus", "alloy-network", @@ -583,9 +583,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe215a2f9b51d5f1aa5c8cf22c8be8cdb354934de09c9a4e37aefb79b77552fd" +checksum = "be98b07210d24acf5b793c99b759e9a696e4a2e67593aec0487ae3b3e1a2478c" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -606,9 +606,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1b37b1a30d23deb3a8746e882c70b384c574d355bc2bbea9ea918b0c31366e" +checksum = "4198a1ee82e562cab85e7f3d5921aab725d9bd154b6ad5017f82df1695877c97" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -637,9 +637,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ccf423f6de62e8ce1d6c7a11fb7508ae3536d02e0d68aaeb05c8669337d0937" +checksum = "333544408503f42d7d3792bfc0f7218b643d968a03d2c0ed383ae558fb4a76d0" dependencies = [ "darling 0.21.3", "proc-macro2", @@ -1246,6 +1246,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", + "fixed_bytes", "fork_choice", "futures", "genesis", @@ -1260,6 +1261,7 @@ dependencies = [ "maplit", "merkle_proof", "metrics", + "milhouse", "mockall", "mockall_double", "once_cell", @@ -1288,6 +1290,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", "types", "zkvm_execution_layer", "zstd 0.13.3", @@ -1299,6 +1302,7 @@ version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", + "bls", "clap", "clap_utils", "client", @@ -1330,6 +1334,7 @@ dependencies = [ name = "beacon_node_fallback" version = "0.1.0" dependencies = [ + "bls", "clap", "eth2", "futures", @@ -1602,6 +1607,8 @@ dependencies = [ name = "builder_client" version = "0.1.0" dependencies = [ + "bls", + "context_deserialize", "eth2", "ethereum_ssz", "lighthouse_version", @@ -2558,6 +2565,7 @@ dependencies = [ "alloy-dyn-abi", "alloy-json-abi", "alloy-primitives", + "bls", "ethereum_ssz", "hex", "reqwest", @@ -2768,6 +2776,7 @@ name = "doppelganger_service" version = "0.1.0" dependencies = [ "beacon_node_fallback", + "bls", "environment", "eth2", "futures", @@ -2892,16 +2901,19 @@ dependencies = [ "hex", "kzg", "logging", + "milhouse", "rayon", "serde", "serde_json", "serde_repr", "serde_yaml", "snap", + "ssz_types", "state_processing", "swap_or_not_shuffle", "tree_hash", "tree_hash_derive", + "typenum", "types", ] @@ -2929,7 +2941,9 @@ name = "eip_3076" version = "0.1.0" dependencies = [ "arbitrary", + "bls", "ethereum_serde_utils", + "fixed_bytes", "serde", "serde_json", "tempfile", @@ -3153,19 +3167,17 @@ dependencies = [ name = "eth2" version = "0.1.0" dependencies = [ + "bls", + "context_deserialize", "educe", "eip_3076", - "either", - "enr", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "futures", "futures-util", - "libp2p-identity", "mediatype", - "multiaddr", "pretty_reqwest_error", "proto_array", "rand 0.9.2", @@ -3175,6 +3187,7 @@ dependencies = [ "serde", "serde_json", "ssz_types", + "superstruct", "test_random_derive", "tokio", "types", @@ -3244,6 +3257,7 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "fixed_bytes", "kzg", "pretty_reqwest_error", "reqwest", @@ -3384,8 +3398,10 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-signer-local", "async-channel 1.9.0", + "bls", "deposit_contract", "execution_layer", + "fixed_bytes", "fork_choice", "futures", "hex", @@ -3397,6 +3413,7 @@ dependencies = [ "task_executor", "tempfile", "tokio", + "typenum", "types", ] @@ -3409,6 +3426,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types-eth", "arc-swap", + "bls", "builder_client", "bytes", "eth2", @@ -3447,6 +3465,7 @@ dependencies = [ "tree_hash", "tree_hash_derive", "triehash", + "typenum", "types", "warp", "zeroize", @@ -3620,6 +3639,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "logging", "metrics", "proto_array", @@ -3810,6 +3830,7 @@ dependencies = [ name = "genesis" version = "0.2.0" dependencies = [ + "bls", "ethereum_hashing", "ethereum_ssz", "int_to_bytes", @@ -4255,14 +4276,17 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_processor", + "bls", "bs58 0.4.0", "bytes", + "context_deserialize", "directory", "either", "eth2", "ethereum_serde_utils", "ethereum_ssz", "execution_layer", + "fixed_bytes", "futures", "genesis", "health_metrics", @@ -4969,6 +4993,7 @@ dependencies = [ "ethereum_hashing", "ethereum_ssz", "execution_layer", + "fixed_bytes", "hex", "lighthouse_network", "lighthouse_version", @@ -5518,6 +5543,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "async-channel 1.9.0", + "bls", "bytes", "delay_map", "directory", @@ -5527,6 +5553,7 @@ dependencies = [ "eth2", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "fnv", "futures", "hex", @@ -5559,6 +5586,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", + "typenum", "types", "unsigned-varint 0.8.0", ] @@ -5573,6 +5601,7 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_node_fallback", + "bls", "doppelganger_service", "either", "environment", @@ -6240,6 +6269,7 @@ dependencies = [ "eth2_network_config", "ethereum_ssz", "execution_layer", + "fixed_bytes", "fnv", "futures", "genesis", @@ -6272,6 +6302,7 @@ dependencies = [ "tokio-stream", "tracing", "tracing-subscriber", + "typenum", "types", ] @@ -6676,9 +6707,11 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bitvec", + "bls", "educe", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "itertools 0.10.5", "maplit", "metrics", @@ -6688,7 +6721,9 @@ dependencies = [ "serde", "state_processing", "store", + "superstruct", "tokio", + "typenum", "types", ] @@ -7200,6 +7235,7 @@ version = "0.2.0" dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "safe_arith", "serde", "serde_yaml", @@ -8381,6 +8417,7 @@ dependencies = [ name = "signing_method" version = "0.1.0" dependencies = [ + "bls", "eth2_keystore", "ethereum_serde_utils", "lockfile", @@ -8388,6 +8425,7 @@ dependencies = [ "reqwest", "serde", "task_executor", + "tracing", "types", "url", "validator_metrics", @@ -8435,6 +8473,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "typenum", "types", ] @@ -8449,11 +8488,13 @@ name = "slasher" version = "0.1.0" dependencies = [ "bincode", + "bls", "byteorder", "educe", "ethereum_ssz", "ethereum_ssz_derive", "filesystem", + "fixed_bytes", "flate2", "libmdbx", "lmdb-rkv", @@ -8473,6 +8514,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", "types", ] @@ -8498,9 +8540,11 @@ name = "slashing_protection" version = "0.1.0" dependencies = [ "arbitrary", + "bls", "eip_3076", "ethereum_serde_utils", "filesystem", + "fixed_bytes", "r2d2", "r2d2_sqlite", "rayon", @@ -8598,6 +8642,7 @@ checksum = "1fc20a89bab2dabeee65e9c9eb96892dc222c23254b401e1319b85efd852fa31" dependencies = [ "arbitrary", "context_deserialize", + "educe", "ethereum_serde_utils", "ethereum_ssz", "itertools 0.14.0", @@ -8625,11 +8670,13 @@ dependencies = [ "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "int_to_bytes", "integer-sqrt", "itertools 0.10.5", "merkle_proof", "metrics", + "milhouse", "rand 0.9.2", "rayon", "safe_arith", @@ -8639,6 +8686,7 @@ dependencies = [ "tokio", "tracing", "tree_hash", + "typenum", "types", ] @@ -8647,7 +8695,9 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", + "bls", "ethereum_ssz", + "fixed_bytes", "state_processing", "tokio", "types", @@ -8670,11 +8720,13 @@ dependencies = [ "directory", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "itertools 0.10.5", "leveldb", "logging", "lru 0.12.5", "metrics", + "milhouse", "parking_lot", "rand 0.9.2", "redb", @@ -8688,6 +8740,7 @@ dependencies = [ "tempfile", "tracing", "tracing-subscriber", + "typenum", "types", "xdelta3", "zstd 0.13.3", @@ -9600,6 +9653,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", ] [[package]] @@ -9823,6 +9877,7 @@ dependencies = [ "eth2_keystore", "ethereum_serde_utils", "filesystem", + "fixed_bytes", "futures", "graffiti_file", "health_metrics", @@ -9839,6 +9894,7 @@ dependencies = [ "signing_method", "slashing_protection", "slot_clock", + "ssz_types", "sysinfo", "system_health", "task_executor", @@ -9846,6 +9902,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "typenum", "types", "url", "validator_dir", @@ -9883,6 +9940,7 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_chain", + "bls", "clap", "clap_utils", "educe", @@ -9939,6 +9997,7 @@ dependencies = [ name = "validator_store" version = "0.1.0" dependencies = [ + "bls", "eth2", "slashing_protection", "types", @@ -10180,10 +10239,12 @@ version = "0.1.0" dependencies = [ "account_utils", "async-channel 1.9.0", + "bls", "environment", "eth2", "eth2_keystore", "eth2_network_config", + "fixed_bytes", "futures", "initialized_validators", "lighthouse_validator_store", @@ -10195,6 +10256,7 @@ dependencies = [ "serde_yaml", "slashing_protection", "slot_clock", + "ssz_types", "task_executor", "tempfile", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 03754a95866..cd5f82b1788 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,10 +98,15 @@ version = "8.0.1" [workspace.dependencies] account_utils = { path = "common/account_utils" } -alloy-consensus = { version = "=1.0.42", default-features = false } -alloy-primitives = { version = "=1.4.1", default-features = false, features = ["rlp", "getrandom"] } -alloy-rlp = { version = "=0.3.12", default-features = false } -alloy-rpc-types-eth = { version = "=1.0.42", default-features = false, features = ["serde"] } +alloy-consensus = { version = "1", default-features = false } +alloy-dyn-abi = { version = "1", default-features = false } +alloy-json-abi = { version = "1", default-features = false } +alloy-network = { version = "1", default-features = false } +alloy-primitives = { version = "1", default-features = false, features = ["rlp", "getrandom"] } +alloy-provider = { version = "1", default-features = false, features = ["reqwest"] } +alloy-rlp = { version = "0.3", default-features = false } +alloy-rpc-types-eth = { version = "1", default-features = false, features = ["serde"] } +alloy-signer-local = { version = "1", default-features = false } anyhow = "1" arbitrary = { version = "1", features = ["derive"] } async-channel = "1.9.0" @@ -230,7 +235,7 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = { version = "0.14.0", features = ["context_deserialize"] } +ssz_types = { version = "0.14.0", features = ["context_deserialize", "runtime_types"] } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } @@ -257,6 +262,7 @@ tracing-opentelemetry = "0.31.0" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } tree_hash = "0.12.0" tree_hash_derive = "0.12.0" +typenum = "1" types = { path = "consensus/types" } url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 18064b990f3..96098ccbbd1 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ @@ -7,7 +8,7 @@ use slashing_protection::{ use std::fs::File; use std::path::PathBuf; use std::str::FromStr; -use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Slot}; pub const CMD: &str = "slashing-protection"; pub const IMPORT_CMD: &str = "import"; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 1e02db1e979..41a0c7e706c 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -20,6 +20,7 @@ testing = [] # Enables testing-only CLI flags [dependencies] account_utils = { workspace = true } beacon_chain = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } client = { path = "client" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 7d7332da575..485aff2e076 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,13 +19,14 @@ alloy-primitives = { workspace = true } bitvec = { workspace = true } bls = { workspace = true } educe = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } # TODO(zkproofs): add as a workspace dependency zkvm_execution_layer = { path = "../../zkvm_execution_layer" } fork_choice = { workspace = true } @@ -41,6 +42,7 @@ logging = { workspace = true } lru = { workspace = true } merkle_proof = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } once_cell = { workspace = true } oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } @@ -67,6 +69,7 @@ tokio-stream = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } zstd = { workspace = true } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index f879adfb498..26a33898129 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -10,14 +10,15 @@ //! and penalties can be computed and the `state.current_justified_checkpoint` can be updated. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fixed_bytes::FixedBytesExtended; use parking_lot::RwLock; use state_processing::state_advance::{Error as StateAdvanceError, partial_state_advance}; use std::collections::HashMap; use std::ops::Range; use types::{ - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - Hash256, RelativeEpoch, Slot, - attestation::Error as AttestationError, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, RelativeEpoch, + Slot, + attestation::AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index c816a0b29f3..7b3bb03e568 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -685,13 +685,13 @@ impl From for BeaconChainError { mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; + use bls::Keypair; use execution_layer::test_utils::Block; + use fixed_bytes::FixedBytesExtended; use std::sync::Arc; use std::sync::LazyLock; use tokio::sync::mpsc; - use types::{ - ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MinimalEthSpec, Slot, - }; + use types::{ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot}; const VALIDATOR_COUNT: usize = 48; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f6c26bb72c3..b314faa6d7b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -78,6 +78,8 @@ use crate::{ AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, metrics, }; +use bls::{PublicKey, PublicKeyBytes, Signature}; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, }; @@ -85,6 +87,7 @@ use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, @@ -1262,7 +1265,7 @@ impl BeaconChain { let num_required_columns = T::EthSpec::number_of_columns() / 2; let reconstruction_possible = columns.len() >= num_required_columns; if reconstruction_possible { - reconstruct_blobs(&self.kzg, &columns, None, &block, &self.spec) + reconstruct_blobs(&self.kzg, columns, None, &block, &self.spec) .map(Some) .map_err(Error::FailedToReconstructBlobs) } else { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 0c203009bbe..60487f9c469 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -6,6 +6,7 @@ use crate::{BeaconSnapshot, metrics}; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use fork_choice::ForkChoiceStore; use proto_array::JustifiedBalances; use safe_arith::ArithError; @@ -17,7 +18,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, Slot, + Hash256, Slot, }; #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index bd6460eba7d..a923d657a86 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -18,10 +18,9 @@ use state_processing::state_advance::partial_state_advance; use std::num::NonZeroUsize; use std::sync::Arc; use tracing::instrument; +use typenum::Unsigned; use types::non_zero_usize::new_non_zero_usize; -use types::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, -}; +use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot}; /// The number of sets of proposer indices that should be cached. const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index bd1adb7e407..e8d4c75dcee 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -294,7 +294,7 @@ impl BlockTimesCache { #[cfg(test)] mod test { use super::*; - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; #[test] fn observed_time_uses_minimum() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 374f1e2b360..bca8d2bc57b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -66,6 +66,7 @@ use crate::{ beacon_chain::{BeaconForkChoice, ForkChoiceError}, metrics, }; +use bls::{PublicKey, PublicKeyBytes}; use educe::Educe; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; @@ -97,7 +98,7 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, - PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data_column_sidecar::DataColumnSidecarError, }; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c176c47f4dd..de5d7b20966 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -23,7 +23,9 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, ServerSentEventHandler, }; +use bls::Signature; use execution_layer::ExecutionLayer; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::Kzg; @@ -46,7 +48,7 @@ use tracing::{debug, error, info}; use types::data_column_custody_group::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index ca32744bafb..4f8b8e32dc7 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -12,6 +12,7 @@ use crate::{BeaconChainTypes, BlockProcessStatus}; use lighthouse_tracing::SPAN_PENDING_COMPONENTS; use lru::LruCache; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use ssz_types::{RuntimeFixedVector, RuntimeVariableList}; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; @@ -20,8 +21,7 @@ use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, BlockImportSource, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, + DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, }; #[derive(Clone)] @@ -1399,15 +1399,14 @@ mod pending_components_tests { use crate::PayloadVerificationOutcome; use crate::block_verification_types::BlockImportData; use crate::test_utils::{NumBlobs, generate_rand_block_and_blobs, test_spec}; + use fixed_bytes::FixedBytesExtended; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; use rand::SeedableRng; use rand::rngs::StdRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; - use types::{ - BeaconState, FixedBytesExtended, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot, - }; + use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 0effe0ec9d2..4a0595929f2 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -9,9 +9,11 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; +use bls::PublicKeyBytes; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; use futures::channel::mpsc::TrySendError; +use milhouse::Error as MilhouseError; use operation_pool::OpPoolError; use safe_arith::ArithError; use ssz_types::Error as SszTypesError; @@ -28,7 +30,6 @@ use state_processing::{ }; use task_executor::ShutdownReason; use tokio::task::JoinError; -use types::milhouse::Error as MilhouseError; use types::*; macro_rules! easy_from_to { diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index e8110d14cdc..56808e0e67e 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -225,13 +225,14 @@ async fn engine_version_cache_refresh_service( mod tests { use crate::ChainConfig; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; + use bls::Keypair; use execution_layer::EngineCapabilities; use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; use tracing::info; - use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, Keypair, MinimalEthSpec}; + use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, MinimalEthSpec}; const VALIDATOR_COUNT: usize = 48; /// A cached set of keys. diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index e4040eea6b0..91b0f12cbb3 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,6 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics}; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use state_processing::{ per_block_processing::ParallelSignatureSets, @@ -12,7 +13,7 @@ use store::metadata::DataColumnInfo; use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; use tracing::{debug, instrument}; -use types::{FixedBytesExtended, Hash256, Slot}; +use types::{Hash256, Slot}; /// Use a longer timeout on the pubkey cache. /// diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 200774ebe46..334124419b9 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -308,12 +308,14 @@ pub(crate) fn build_data_column_sidecars( /// and it will be slow if the node needs to reconstruct the blobs pub fn reconstruct_blobs( kzg: &Kzg, - data_columns: &[Arc>], + mut data_columns: Vec>>, blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, spec: &ChainSpec, ) -> Result, String> { - // The data columns are from the database, so we assume their correctness. + // Sort data columns by index to ensure ascending order for KZG operations + data_columns.sort_unstable_by_key(|dc| dc.index); + let first_data_column = data_columns .first() .ok_or("data_columns should have at least one element".to_string())?; @@ -331,7 +333,7 @@ pub fn reconstruct_blobs( .map(|row_index| { let mut cells: Vec = vec![]; let mut cell_ids: Vec = vec![]; - for data_column in data_columns { + for data_column in &data_columns { let cell = data_column .column .get(row_index) @@ -463,6 +465,7 @@ mod test { test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_data_columns_unordered(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns_unordered(&kzg, &spec); test_validate_data_columns(&kzg, &spec); } @@ -595,7 +598,7 @@ mod test { let blob_indices = vec![1, 2]; let reconstructed_blobs = reconstruct_blobs( kzg, - &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + column_sidecars[0..column_sidecars.len() / 2].to_vec(), Some(blob_indices.clone()), &signed_blinded_block, spec, @@ -613,6 +616,31 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 2; + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); + + // Test reconstruction with columns in reverse order (non-ascending) + let mut subset_columns: Vec<_> = + column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2].to_vec(); + subset_columns.reverse(); // This would fail without proper sorting in reconstruct_blobs + + let signed_blinded_block = signed_block.into(); + let reconstructed_blobs = + reconstruct_blobs(kzg, subset_columns, None, &signed_blinded_block, spec).unwrap(); + + for (i, original_blob) in blobs.iter().enumerate() { + let reconstructed_blob = &reconstructed_blobs.get(i).unwrap().blob; + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { Kzg::new_from_trusted_setup(&get_trusted_setup()).expect("should create kzg") } diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 4c4478d17e6..beefc2d678b 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -577,12 +577,11 @@ where #[cfg(test)] mod tests { use super::*; - use ssz_types::BitList; - use store::BitVector; + use fixed_bytes::FixedBytesExtended; + use ssz_types::{BitList, BitVector}; use tree_hash::TreeHash; use types::{ - Attestation, AttestationBase, AttestationElectra, FixedBytesExtended, Fork, Hash256, - SyncCommitteeMessage, + Attestation, AttestationBase, AttestationElectra, Fork, Hash256, SyncCommitteeMessage, test_utils::{generate_deterministic_keypair, test_random_instance}, }; diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index f6f62e1b73b..b2c5cb4b38a 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -473,7 +473,8 @@ where #[cfg(not(debug_assertions))] mod tests { use super::*; - use types::{AttestationBase, FixedBytesExtended, Hash256, test_utils::test_random_instance}; + use fixed_bytes::FixedBytesExtended; + use types::{AttestationBase, Hash256, test_utils::test_random_instance}; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 34d68fe3ac0..d5433f49d1b 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -19,8 +19,9 @@ use bitvec::vec::BitVec; use std::collections::{HashMap, HashSet}; use std::hash::Hash; use std::marker::PhantomData; +use typenum::Unsigned; use types::slot_data::SlotData; -use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// @@ -619,7 +620,7 @@ impl SlotSubcommitteeIndex { #[cfg(test)] mod tests { use super::*; - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index 096c8bff77d..b740735ac41 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -4,7 +4,8 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; +use typenum::Unsigned; +use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/beacon_node/beacon_chain/src/observed_slashable.rs b/beacon_node/beacon_chain/src/observed_slashable.rs index 001a0d4a867..704d605436b 100644 --- a/beacon_node/beacon_chain/src/observed_slashable.rs +++ b/beacon_node/beacon_chain/src/observed_slashable.rs @@ -5,7 +5,8 @@ use crate::observed_block_producers::Error; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{EthSpec, Hash256, Slot, Unsigned}; +use typenum::Unsigned; +use types::{EthSpec, Hash256, Slot}; #[derive(Eq, Hash, PartialEq, Debug, Default)] pub struct ProposalKey { diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 22921147a68..618d459754d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -290,6 +290,7 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { + use fixed_bytes::FixedBytesExtended; use types::*; use crate::test_utils::EphemeralHarnessType; diff --git a/beacon_node/beacon_chain/src/single_attestation.rs b/beacon_node/beacon_chain/src/single_attestation.rs index 33a093687e5..955eb98e92a 100644 --- a/beacon_node/beacon_chain/src/single_attestation.rs +++ b/beacon_node/beacon_chain/src/single_attestation.rs @@ -1,7 +1,7 @@ use crate::attestation_verification::Error; +use ssz_types::{BitList, BitVector}; use types::{ - Attestation, AttestationBase, AttestationElectra, BitList, BitVector, EthSpec, ForkName, - SingleAttestation, + Attestation, AttestationBase, AttestationElectra, EthSpec, ForkName, SingleAttestation, }; pub fn single_attestation_to_attestation( diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index e72e9a6b21f..e74e284e583 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -30,6 +30,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, metrics, observed_aggregates::ObserveOutcome, }; +use bls::AggregateSignature; use bls::{PublicKeyBytes, verify_signature_sets}; use educe::Educe; use safe_arith::ArithError; @@ -49,9 +50,9 @@ use tree_hash_derive::TreeHash; use types::ChainSpec; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; -use types::sync_committee::Error as SyncCommitteeError; +use types::sync_committee::SyncCommitteeError; use types::{ - AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, + BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, sync_committee_contribution::Error as ContributionError, }; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 52f486e1105..b2488f0b639 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -20,6 +20,9 @@ pub use crate::{ validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, }; use bls::get_withdrawal_credentials; +use bls::{ + AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, +}; use eth2::types::SignedBlockContentsTuple; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ @@ -30,6 +33,7 @@ use execution_layer::{ MockExecutionLayer, }, }; +use fixed_bytes::FixedBytesExtended; use futures::channel::mpsc::Receiver; pub use genesis::{DEFAULT_ETH1_BLOCK_HASH, InteropGenesisBuilder}; use int_to_bytes::int_to_bytes32; @@ -46,6 +50,7 @@ use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::{RuntimeVariableList, VariableList}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; @@ -60,12 +65,13 @@ use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; use tree_hash::TreeHash; +use typenum::U4294967296; use types::data_column_custody_group::CustodyIndex; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; use types::test_utils::TestRandom; pub use types::test_utils::generate_deterministic_keypairs; -use types::{typenum::U4294967296, *}; +use types::*; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; @@ -2932,7 +2938,6 @@ where let chain_dump = self.chain.chain_dump().unwrap(); chain_dump .iter() - .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root) .filter(|block_hash| *block_hash != Hash256::zero()) .map(|hash| hash.into()) @@ -3290,7 +3295,7 @@ pub fn generate_rand_block_and_blobs( ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); - let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, Signature::random_for_test(rng)); let mut blob_sidecars = vec![]; let bundle = match block { diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index ba06d5da4ec..2a76d65d328 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -4,6 +4,7 @@ use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH}; use crate::metrics; +use bls::PublicKeyBytes; use itertools::Itertools; use logging::crit; use parking_lot::{Mutex, RwLock}; @@ -28,9 +29,10 @@ use types::consts::altair::{ use types::{ Attestation, AttestationData, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, - IndexedAttestationRef, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, - SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, + IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, SignedContributionAndProof, + Slot, SyncCommitteeMessage, VoluntaryExit, }; + /// Used for Prometheus labels. /// /// We've used `total` for this value to align with Nimbus, as per: diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index a346a649f02..26ac02d91b4 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,6 +1,8 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN; +use bls::{PublicKey, PublicKeyBytes}; +use fixed_bytes::FixedBytesExtended; use rayon::prelude::*; use smallvec::SmallVec; use ssz::{Decode, Encode}; @@ -9,7 +11,7 @@ use std::collections::HashMap; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; use tracing::instrument; -use types::{BeaconState, FixedBytesExtended, Hash256, PublicKey, PublicKeyBytes}; +use types::{BeaconState, Hash256}; /// Provides a mapping of `validator_index -> validator_publickey`. /// @@ -244,10 +246,11 @@ impl DatabasePubkey { mod test { use super::*; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use bls::Keypair; use logging::create_test_tracing_subscriber; use std::sync::Arc; use store::HotColdDB; - use types::{EthSpec, Keypair, MainnetEthSpec}; + use types::{EthSpec, MainnetEthSpec}; type E = MainnetEthSpec; type T = EphemeralHarnessType; diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 0acb23d5126..017c249d10b 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -4,11 +4,10 @@ use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{StateSkipConfig, WhenSlotSkipped, metrics}; +use bls::{AggregateSignature, Keypair}; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; -use types::{ - AggregateSignature, Attestation, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot, -}; +use types::{Attestation, EthSpec, MainnetEthSpec, RelativeEpoch, Slot}; pub const VALIDATOR_COUNT: usize = 16; diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 706ffad3c1a..7984ea47081 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -13,15 +13,17 @@ use beacon_chain::{ single_attestation_to_attestation, test_spec, }, }; +use bls::{AggregateSignature, Keypair, SecretKey}; +use fixed_bytes::FixedBytesExtended; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; +use typenum::Unsigned; use types::{ - Address, AggregateSignature, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, Unsigned, + Address, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, ForkName, Hash256, + MainnetEthSpec, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, signed_aggregate_and_proof::SignedAggregateAndProofRefMut, test_utils::generate_deterministic_keypair, }; diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index c42a2828c01..d1a0d87adf1 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -7,6 +7,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, block_verification_types::AsBlock, }; +use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; use std::sync::{Arc, LazyLock}; use types::{blob_sidecar::FixedBlobSidecarList, *}; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 881885cef23..2644b74b28e 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -13,6 +13,8 @@ use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, InvalidSignature, NotifyExecutionLayer, }; +use bls::{AggregateSignature, Keypair, Signature}; +use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 229ae1e1998..be9b3b2fa12 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -9,6 +9,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, block_verification_types::AsBlock, }; +use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; use std::sync::{Arc, LazyLock}; use types::*; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c18af0bde70..2f97f10745e 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -9,6 +9,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, }, }; +use bls::Keypair; use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 0a5881e486b..ee9cf511ea5 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -7,8 +7,9 @@ use beacon_chain::test_utils::{ use beacon_chain::{ BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, - types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, + types::{Epoch, EthSpec, MinimalEthSpec}, }; +use bls::Keypair; use eth2::types::{StandardAttestationRewards, TotalAttestationRewards, ValidatorId}; use state_processing::{BlockReplayError, BlockReplayer}; use std::array::IntoIter; diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 3b09921c15c..db7f7dbdbbd 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -4,6 +4,7 @@ use beacon_chain::{ persisted_custody::PersistedCustody, test_utils::{BeaconChainHarness, DiskHarnessType, test_spec}, }; +use bls::Keypair; use logging::create_test_tracing_subscriber; use operation_pool::PersistedOperationPool; use ssz::Encode; @@ -16,7 +17,7 @@ use store::{ }; use strum::IntoEnumIterator; use tempfile::{TempDir, tempdir}; -use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot}; +use types::{ChainSpec, Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; type Store = Arc, BeaconNodeBackend>>; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0733d901fc3..8de96adb2d4 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -25,11 +25,14 @@ use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, }; +use bls::{Keypair, Signature, SignatureBytes}; +use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use maplit::hashset; use rand::Rng; use rand::rngs::StdRng; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::VariableList; use state_processing::{BlockReplayer, state_advance::complete_state_advance}; use std::collections::HashMap; use std::collections::HashSet; @@ -5525,7 +5528,6 @@ fn get_finalized_epoch_boundary_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() - .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .collect() } @@ -5534,7 +5536,6 @@ fn get_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() - .cloned() .map(|checkpoint| checkpoint.beacon_block_root.into()) .collect() } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 9dd12410fbb..d2124c66415 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -2,6 +2,8 @@ use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; +use bls::{AggregateSignature, Keypair, SecretKey}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use safe_arith::SafeArith; use state_processing::{ @@ -11,10 +13,11 @@ use state_processing::{ use std::sync::LazyLock; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; +use typenum::Unsigned; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::{ - AggregateSignature, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, - SecretKey, Slot, SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, + Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SyncContributionData, SyncSelectionProof, + SyncSubnetId, }; pub type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index ec0e607d00a..17d9c5f697f 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,13 +8,14 @@ use beacon_chain::{ OP_POOL_DB_KEY, }, }; +use bls::Keypair; use operation_pool::PersistedOperationPool; use state_processing::EpochProcessingError; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use std::sync::LazyLock; use types::{ - BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, Keypair, - MinimalEthSpec, RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, MinimalEthSpec, + RelativeEpoch, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 95732abeb5d..521fc4ac975 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -2,8 +2,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{MISSED_BLOCK_LAG_SLOTS, ValidatorMonitorConfig}; +use bls::{Keypair, PublicKeyBytes}; use std::sync::LazyLock; -use types::{Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Hash256, MainnetEthSpec, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 9b1f86360df..09bf3f48b4e 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,6 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +bls = { workspace = true } +context_deserialize = { workspace = true } eth2 = { workspace = true } ethereum_ssz = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index b486e77083a..4fc6b3a379b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,9 +1,11 @@ +use bls::PublicKeyBytes; +use context_deserialize::ContextDeserialize; pub use eth2::Error; use eth2::types::beacon_response::EmptyMetadata; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - ContentType, ContextDeserialize, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, - ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, + ContentType, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionedResponse, + SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; use eth2::{ @@ -538,9 +540,10 @@ impl BuilderHttpClient { #[cfg(test)] mod tests { use super::*; + use bls::Signature; + use eth2::types::MainnetEthSpec; use eth2::types::builder_bid::{BuilderBid, BuilderBidFulu}; use eth2::types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use eth2::types::{MainnetEthSpec, Signature}; use mockito::{Matcher, Server, ServerGuard}; type E = MainnetEthSpec; diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 43b2e1dd751..c443e945743 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -10,9 +10,10 @@ alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } alloy-rpc-types-eth = { workspace = true } arc-swap = "1.6.0" +bls = { workspace = true } builder_client = { path = "../builder_client" } bytes = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events", "lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } fixed_bytes = { workspace = true } @@ -48,6 +49,7 @@ tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } triehash = "0.8.4" +typenum = { workspace = true } types = { workspace = true } warp = { workspace = true } zeroize = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 98da7dbf2c7..b0cc4dd8241 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -20,8 +20,8 @@ use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, - ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, - Uint256, VariableList, Withdrawal, Withdrawals, + ExecutionPayloadHeader, ExecutionPayloadRef, ForkName, Hash256, Transactions, Uint256, + Withdrawal, Withdrawals, }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, @@ -770,7 +770,7 @@ impl ClientVersionV1 { self.commit .0 .get(..4) - .map_or_else(|| self.commit.0.as_str(), |s| s) + .unwrap_or(self.commit.0.as_str()) .to_lowercase(), lighthouse_commit_prefix .0 diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8f7564ace6b..c421491f808 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -103,9 +103,10 @@ pub static LIGHTHOUSE_JSON_CLIENT_VERSION: LazyLock = /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { + use bls::{PublicKeyBytes, SignatureBytes}; use ssz::Decode; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; - use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; + use types::{ChainSpec, DepositData, Hash256}; pub use eth2::lighthouse::DepositLog; @@ -1466,10 +1467,13 @@ mod test { use super::auth::JwtKey; use super::*; use crate::test_utils::{DEFAULT_JWT_SECRET, MockServer}; + use fixed_bytes::FixedBytesExtended; + use ssz_types::VariableList; use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{FixedBytesExtended, MainnetEthSpec, Unsigned}; + use typenum::Unsigned; + use types::MainnetEthSpec; struct Tester { server: MockServer, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index cc46070325d..fc8eae015b9 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1101,10 +1101,10 @@ impl TryFrom for ClientVersionV1 { #[cfg(test)] mod tests { + use bls::{PublicKeyBytes, SignatureBytes}; use ssz::Encode; use types::{ - ConsolidationRequest, DepositRequest, MainnetEthSpec, PublicKeyBytes, RequestType, - SignatureBytes, WithdrawalRequest, + ConsolidationRequest, DepositRequest, MainnetEthSpec, RequestType, WithdrawalRequest, }; use super::*; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index c2a31c2699b..34b1832894e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -9,6 +9,7 @@ use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{Auth, JwtKey, strip_prefix}; pub use block_hash::calculate_execution_block_hash; +use bls::{PublicKeyBytes, Signature}; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; @@ -55,7 +56,7 @@ use types::{ use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, - FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, + FullPayload, ProposerPreparationData, Slot, }; mod block_hash; diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 7e0033d732c..89d2994ce28 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -8,6 +8,7 @@ use crate::engines::ForkchoiceState; use alloy_consensus::TxEnvelope; use alloy_rpc_types_eth::Transaction as AlloyTransaction; use eth2::types::BlobsBundle; +use fixed_bytes::FixedBytesExtended; use kzg::{Kzg, KzgCommitment, KzgProof}; use parking_lot::Mutex; use rand::{Rng, SeedableRng, rngs::StdRng}; @@ -22,8 +23,8 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadGloas, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, - KzgProofs, Transaction, Transactions, Uint256, + ExecutionPayloadGloas, ExecutionPayloadHeader, ForkName, Hash256, KzgProofs, Transaction, + Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -41,7 +42,7 @@ pub enum Block { PoS(ExecutionPayload), } -pub fn mock_el_extra_data() -> types::VariableList { +pub fn mock_el_extra_data() -> VariableList { "block gen was here".as_bytes().to_vec().try_into().unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 9add1369194..1d4f36b62c5 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,6 +1,8 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; +use bls::{PublicKeyBytes, SecretKey, Signature}; use bytes::Bytes; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::PublishBlockRequest; use eth2::types::{ BlobsBundle, BlockId, BroadcastValidation, EndpointVersion, EventKind, EventTopic, @@ -14,6 +16,7 @@ use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use ssz::Encode; +use ssz_types::VariableList; use std::collections::HashMap; use std::fmt::Debug; use std::future::Future; @@ -25,17 +28,16 @@ use tempfile::NamedTempFile; use tokio_stream::StreamExt; use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; +use types::ExecutionBlockHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, BuilderBidFulu, BuilderBidGloas, SignedBuilderBid, }; use types::{ Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, - ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, - SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, Hash256, + SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; -use types::{ExecutionBlockHash, SecretKey}; use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; @@ -71,7 +73,7 @@ impl Operation { } } -pub fn mock_builder_extra_data() -> types::VariableList { +pub fn mock_builder_extra_data() -> VariableList { "mock_builder".as_bytes().to_vec().try_into().unwrap() } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 73c998956ca..c69edb8f397 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -5,9 +5,10 @@ use crate::{ *, }; use alloy_primitives::B256 as H256; +use fixed_bytes::FixedBytesExtended; use kzg::Kzg; use tempfile::NamedTempFile; -use types::{FixedBytesExtended, MainnetEthSpec}; +use types::MainnetEthSpec; pub struct MockExecutionLayer { pub server: MockServer, diff --git a/beacon_node/execution_layer/src/versioned_hashes.rs b/beacon_node/execution_layer/src/versioned_hashes.rs index 97c3100de99..21cfd5a3223 100644 --- a/beacon_node/execution_layer/src/versioned_hashes.rs +++ b/beacon_node/execution_layer/src/versioned_hashes.rs @@ -1,6 +1,7 @@ use alloy_consensus::TxEnvelope; use alloy_rlp::Decodable; -use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash}; +use typenum::Unsigned; +use types::{EthSpec, ExecutionPayloadRef, Hash256, VersionedHash}; #[derive(Debug)] pub enum Error { diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 8f6f3516fc5..124231a57e5 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] +bls = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } int_to_bytes = { workspace = true } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index dfa4daab9ae..349b8f19c8b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,12 +1,10 @@ use crate::common::genesis_deposits; +use bls::{Keypair, PublicKey, Signature}; use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; -use types::{ - BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256, Keypair, - PublicKey, Signature, -}; +use types::{BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256}; pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 7dd0d0223f4..571dab10273 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -8,14 +8,17 @@ autotests = false # using a single test binary com [dependencies] beacon_chain = { workspace = true } beacon_processor = { workspace = true } +bls = { workspace = true } bs58 = "0.4.0" bytes = { workspace = true } +context_deserialize = { workspace = true } directory = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } futures = { workspace = true } health_metrics = { workspace = true } hex = { workspace = true } diff --git a/beacon_node/http_api/src/beacon/mod.rs b/beacon_node/http_api/src/beacon/mod.rs new file mode 100644 index 00000000000..df5e6eee5cb --- /dev/null +++ b/beacon_node/http_api/src/beacon/mod.rs @@ -0,0 +1,2 @@ +pub mod pool; +pub mod states; diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs new file mode 100644 index 00000000000..059573c3175 --- /dev/null +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -0,0 +1,522 @@ +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter}; +use crate::version::{ + ResponseIncludesVersion, V1, V2, add_consensus_version_header, beacon_response, + unsupported_version_rejection, +}; +use crate::{sync_committees, utils}; +use beacon_chain::observed_operations::ObservationOutcome; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use operation_pool::ReceivedPreCapella; +use slot_clock::SlotClock; +use std::collections::HashSet; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, info, warn}; +use types::{ + Attestation, AttestationData, AttesterSlashing, ForkName, ProposerSlashing, + SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, SyncCommitteeMessage, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::reject::convert_rejection; + +pub type BeaconPoolPathFilter = BoxedFilter<( + TaskSpawner<::EthSpec>, + Arc>, +)>; +pub type BeaconPoolPathV2Filter = BoxedFilter<( + TaskSpawner<::EthSpec>, + Arc>, +)>; +pub type BeaconPoolPathAnyFilter = BoxedFilter<( + EndpointVersion, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +/// POST beacon/pool/bls_to_execution_changes +pub fn post_beacon_pool_bls_to_execution_changes( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + address_changes: Vec, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_http_api(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let received_pre_capella = + if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = chain.import_bls_to_execution_change( + verified_address_change, + received_pre_capella, + ); + + info!( + %validator_index, + ?address, + published = + matches!(received_pre_capella, ReceivedPreCapella::No), + imported, + "Processed BLS to execution change" + ); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!(%validator_index, "BLS to execution change already known"); + } + Err(e) => { + warn!( + validator_index, + reason = ?e, + source = "HTTP", + "Invalid BLS to execution change" + ); + failures.push(Failure::new(index, format!("invalid: {e:?}"))); + } + } + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/bls_to_execution_changes +pub fn get_beacon_pool_bls_to_execution_changes( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(GenericResponse::from(address_changes)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/sync_committees +pub fn post_beacon_pool_sync_committees( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + signatures: Vec, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + sync_committees::process_sync_committee_signatures( + signatures, network_tx, &chain, + )?; + Ok(GenericResponse::from(())) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/voluntary_exits +pub fn get_beacon_pool_voluntary_exits( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(GenericResponse::from(attestations)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/voluntary_exits +pub fn post_beacon_pool_voluntary_exits( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + exit: SignedVoluntaryExit, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_voluntary_exit_for_gossip(exit.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_voluntary_exit(&exit.message); + + if let ObservationOutcome::New(exit) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), + )?; + + chain.import_voluntary_exit(exit); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/proposer_slashings +pub fn get_beacon_pool_proposer_slashings( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(GenericResponse::from(attestations)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/proposer_slashings +pub fn post_beacon_pool_proposer_slashings( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + slashing: ProposerSlashing, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_proposer_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_proposer_slashing(&slashing); + + if let ObservationOutcome::New(slashing) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::ProposerSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_proposer_slashing(slashing); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/attester_slashings +pub fn get_beacon_pool_attester_slashings( + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let slashings = chain.op_pool.get_all_attester_slashings(); + + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let slashings = slashings + .into_iter() + .filter(|slashing| { + (fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Base(_))) + }) + .collect::>(); + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &slashings); + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ) + .boxed() +} + +// POST beacon/pool/attester_slashings +pub fn post_beacon_pool_attester_slashings( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>, + slashing: AttesterSlashing, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_attester_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_attester_slashing(slashing.to_ref()); + + if let ObservationOutcome::New(slashing) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::AttesterSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_attester_slashing(slashing); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/attestations?committee_index,slot +pub fn get_beacon_pool_attestations( + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp::query::()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>, + query: AttestationPoolQuery| { + task_spawner.blocking_response_task(Priority::P1, move || { + let query_filter = |data: &AttestationData, committee_indices: HashSet| { + query.slot.is_none_or(|slot| slot == data.slot) + && query + .committee_index + .is_none_or(|index| committee_indices.contains(&index)) + }; + + let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); + attestations.extend( + chain + .naive_aggregation_pool + .read() + .iter() + .filter(|&att| { + query_filter(att.data(), att.get_committee_indices_map()) + }) + .cloned(), + ); + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let attestations = attestations + .into_iter() + .filter(|att| { + (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(att, Attestation::Base(_))) + }) + .collect::>(); + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &attestations); + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ) + .boxed() +} + +pub fn post_beacon_pool_attestations_v2( + network_tx_filter: &NetworkTxFilter, + optional_consensus_version_header_filter: OptionalConsensusVersionHeaderFilter, + beacon_pool_path_v2: &BeaconPoolPathV2Filter, +) -> ResponseFilter { + beacon_pool_path_v2 + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp_utils::json::json::>()) + .and(optional_consensus_version_header_filter) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + attestations: Vec, + _fork_name: Option, + network_tx: UnboundedSender>| async move { + let result = crate::publish_attestations::publish_attestations( + task_spawner, + chain, + attestations, + network_tx, + true, + ) + .await + .map(|()| warp::reply::json(&())); + convert_rejection(result).await + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs new file mode 100644 index 00000000000..6d06bcc77d6 --- /dev/null +++ b/beacon_node/http_api/src/beacon/states.rs @@ -0,0 +1,787 @@ +use crate::StateId; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::ResponseFilter; +use crate::validator::pubkey_to_validator_index; +use crate::version::{ + ResponseIncludesVersion, add_consensus_version_header, + execution_optimistic_finalized_beacon_response, +}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::types::{ + ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, + ValidatorsRequestBody, +}; +use std::sync::Arc; +use types::{ + AttestationShufflingId, CommitteeCache, Error as BeaconStateError, EthSpec, RelativeEpoch, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::query::multi_key_query; + +type BeaconStatesPath = BoxedFilter<( + StateId, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +// GET beacon/states/{state_id}/pending_consolidations +pub fn get_beacon_state_pending_consolidations( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_consolidations")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(consolidations) = state.pending_consolidations() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending consolidations not found".to_string(), + )); + }; + + Ok(( + consolidations.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_partial_withdrawals +pub fn get_beacon_state_pending_partial_withdrawals( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_partial_withdrawals")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(withdrawals) = state.pending_partial_withdrawals() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending withdrawals not found".to_string(), + )); + }; + + Ok(( + withdrawals.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_deposits +pub fn get_beacon_state_pending_deposits( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_deposits")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(deposits) = state.pending_deposits() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending deposits not found".to_string(), + )); + }; + + Ok(( + deposits.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/randao?epoch +pub fn get_beacon_state_randao( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::RandaoQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic, finalized)) + }, + )?; + + Ok( + eth2::types::GenericResponse::from(eth2::types::RandaoMix { randao }) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/sync_committees?epoch +pub fn get_beacon_state_sync_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::SyncCommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .cloned() + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + let validators = chain + .validator_indices(sync_committee.pubkeys.iter()) + .map_err(warp_utils::reject::unhandled_error)?; + + let validator_aggregates = validators + .chunks_exact(T::EthSpec::sync_subcommittee_size()) + .map(|indices| eth2::types::SyncSubcommittee { + indices: indices.to_vec(), + }) + .collect(); + + let response = eth2::types::SyncCommitteeByValidatorIndices { + validators, + validator_aggregates, + }; + + Ok(eth2::types::GenericResponse::from(response) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/committees?slot,index,epoch +pub fn get_beacon_state_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::CommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + + // Attempt to obtain the committee_cache from the beacon chain + let decision_slot = (epoch.saturating_sub(2u64)) + .end_slot(T::EthSpec::slots_per_epoch()); + // Find the decision block and skip to another method on any kind + // of failure + let shuffling_id = if let Ok(Some(shuffling_decision_block)) = + chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + { + Some(AttestationShufflingId { + shuffling_epoch: epoch, + shuffling_decision_block, + }) + } else { + None + }; + + // Attempt to read from the chain cache if there exists a + // shuffling_id + let maybe_cached_shuffling = if let Some(shuffling_id) = + shuffling_id.as_ref() + { + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + .and_then(|mut cache_write| cache_write.get(shuffling_id)) + .and_then(|cache_item| cache_item.wait().ok()) + } else { + None + }; + + let committee_cache = + if let Some(shuffling) = maybe_cached_shuffling { + shuffling + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state.committee_cache(relative_epoch).cloned() + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ), + } + .map_err( + |e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( + "epoch out of bounds, \ + try state at slot {}", + first_subsequent_restore_point_slot, + ), + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ + too far in future" + .into(), + ) + } + } + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), + }, + )?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + && let Some(shuffling_id) = shuffling_id + && let Some(mut cache_write) = chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + { + cache_write.insert_committee_cache( + shuffling_id, + &possibly_built_cache, + ); + } + + possibly_built_cache + }; + + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(eth2::types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok((response, execution_optimistic, finalized)) + }, + )?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators/{validator_id} +pub fn get_beacon_state_validators_id( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid validator ID".to_string(), + )) + })) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + validator_id: ValidatorId| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( + &chain, state, pubkey, + ) + .map_err(|e| { + warp_utils::reject::custom_not_found(format!( + "unable to access pubkey cache: {e:?}", + )) + })?, + ValidatorId::Index(index) => Some(*index as usize), + }; + + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; + + Some(eth2::types::ValidatorData { + index: index as u64, + balance, + status: + eth2::types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validators +pub fn post_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorsRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.ids, + &query.statuses, + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators?id,status +pub fn get_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let query = query_res?; + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.id, + &query.status, + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_identities +pub fn post_beacon_state_validator_identities( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_identities")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorIdentitiesRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validator_identities( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_balances +pub fn post_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorBalancesRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validator_balances?id +pub fn get_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + task_spawner.blocking_json_task(Priority::P1, move || { + let query = query_res?; + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + query.id.as_deref(), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/finality_checkpoints +pub fn get_beacon_state_finality_checkpoints( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + eth2::types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/fork +pub fn get_beacon_state_fork( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/root +pub fn get_beacon_state_root( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .and(warp::path("root")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; + Ok(eth2::types::GenericResponse::from( + eth2::types::RootData::from(root), + )) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 778067c32bb..ea8b47f91ef 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -2,16 +2,17 @@ use crate::version::inconsistent_fork_rejection; use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; +use fixed_bytes::FixedBytesExtended; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ - BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, - SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, UnversionedResponse, - beacon_response::ExecutionOptimisticFinalizedMetadata, + BlobSidecarList, DataColumnSidecarList, EthSpec, ForkName, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; @@ -474,7 +475,7 @@ impl BlockId { ) .collect::, _>>()?; - reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block, &chain.spec).map_err( + reconstruct_blobs(&chain.kzg, data_columns, blob_indices, block, &chain.spec).map_err( |e| { warp_utils::reject::custom_server_error(format!( "Error reconstructing data columns: {e:?}" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6389b34961a..58cd2a3bdbc 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -8,6 +8,7 @@ mod aggregate_attestation; mod attestation_performance; mod attester_duties; +mod beacon; mod block_id; mod block_packing_efficiency; mod block_rewards; @@ -29,39 +30,41 @@ mod sync_committees; mod task_spawner; pub mod test_utils; mod ui; +mod utils; mod validator; mod validator_inclusion; mod validators; mod version; + +use crate::beacon::pool::*; use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; -use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::utils::{AnyVersionFilter, EthV1Filter}; +use crate::validator::post_validator_liveness_epoch; +use crate::validator::*; use crate::version::beacon_response; -use beacon_chain::{ - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - WhenSlotSkipped, attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, validator_monitor::timestamp_now, -}; +use beacon::states; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; +use context_deserialize::ContextDeserialize; use directory::DEFAULT_ROOT_DIR; use eth2::StatusCode; +use eth2::lighthouse::sync_state::SyncState; use eth2::types::{ - self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, - ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, - StateId as CoreStateId, ValidatorBalancesRequestBody, ValidatorId, - ValidatorIdentitiesRequestBody, ValidatorStatus, ValidatorsRequestBody, + self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceExtraData, + ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorId, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; -use lighthouse_network::rpc::methods::MetaData; -use lighthouse_network::{Enr, NetworkGlobals, PeerId, PubsubMessage, types::SyncState}; +use lighthouse_network::Enr; +use lighthouse_network::NetworkGlobals; +use lighthouse_network::PeerId; use lighthouse_version::version_with_platform; use logging::{SSELoggingComponents, crit}; -use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use network::{NetworkMessage, NetworkSenders}; use network_utils::enr_ext::EnrExt; -use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; pub use publish_blocks::{ ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block, @@ -70,7 +73,6 @@ use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; -use std::collections::HashSet; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -80,26 +82,18 @@ use std::sync::Arc; use sysinfo::{System, SystemExt}; use system_health::{observe_nat, observe_system_health_bn}; use task_spawner::{Priority, TaskSpawner}; -use tokio::sync::{ - mpsc::{Sender, UnboundedSender}, - oneshot, -}; +use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{ StreamExt, wrappers::{BroadcastStream, errors::BroadcastStreamRecvError}, }; -use tracing::{debug, error, info, warn}; +use tracing::{debug, info, warn}; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, SingleAttestation, Slot, - SyncCommitteeMessage, SyncContributionData, + BeaconStateError, Checkpoint, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, + SignedBlindedBeaconBlock, Slot, }; -use validator::pubkey_to_validator_index; use version::{ - ResponseIncludesVersion, V1, V2, V3, add_consensus_version_header, add_ssz_content_type_header, + ResponseIncludesVersion, V1, V2, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, unsupported_version_rejection, }; @@ -107,7 +101,7 @@ use warp::Reply; use warp::hyper::Body; use warp::sse::Event; use warp::{Filter, Rejection, http::Response}; -use warp_utils::{query::multi_key_query, reject::convert_rejection, uor::UnifyingOrFilter}; +use warp_utils::{query::multi_key_query, uor::UnifyingOrFilter}; const API_PREFIX: &str = "eth"; @@ -359,16 +353,18 @@ pub fn serve( } // Create a filter that extracts the endpoint version. - let any_version = warp::path(API_PREFIX).and(warp::path::param::().or_else( - |_| async move { - Err(warp_utils::reject::custom_bad_request( - "Invalid version identifier".to_string(), - )) - }, - )); + let any_version = warp::path(API_PREFIX) + .and( + warp::path::param::().or_else(|_| async move { + Err(warp_utils::reject::custom_bad_request( + "Invalid version identifier".to_string(), + )) + }), + ) + .boxed(); // Filter that enforces a single endpoint version and then discards the `EndpointVersion`. - let single_version = |reqd: EndpointVersion| { + fn single_version(any_version: AnyVersionFilter, reqd: EndpointVersion) -> EthV1Filter { any_version .and_then(move |version| async move { if version == reqd { @@ -378,10 +374,11 @@ pub fn serve( } }) .untuple_one() - }; + .boxed() + } - let eth_v1 = single_version(V1); - let eth_v2 = single_version(V2); + let eth_v1 = single_version(any_version.clone(), V1); + let eth_v2 = single_version(any_version.clone(), V2); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -402,34 +399,34 @@ pub fn serve( // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); - let chain_filter = - warp::any() - .map(move || inner_ctx.chain.clone()) - .and_then(|chain| async move { - match chain { - Some(chain) => Ok(chain), - None => Err(warp_utils::reject::custom_not_found( - "Beacon chain genesis has not yet been observed.".to_string(), - )), - } - }); + let chain_filter = warp::any() + .map(move || inner_ctx.chain.clone()) + .and_then(|chain| async move { + match chain { + Some(chain) => Ok(chain), + None => Err(warp_utils::reject::custom_not_found( + "Beacon chain genesis has not yet been observed.".to_string(), + )), + } + }) + .boxed(); // Create a `warp` filter that provides access to the network sender channel. let network_tx = ctx .network_senders .as_ref() .map(|senders| senders.network_send()); - let network_tx_filter = - warp::any() - .map(move || network_tx.clone()) - .and_then(|network_tx| async move { - match network_tx { - Some(network_tx) => Ok(network_tx), - None => Err(warp_utils::reject::custom_not_found( - "The networking stack has not yet started (network_tx).".to_string(), - )), - } - }); + let network_tx_filter = warp::any() + .map(move || network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (network_tx).".to_string(), + )), + } + }) + .boxed(); // Create a `warp` filter that provides access to the network attestation subscription channel. let validator_subscriptions_tx = ctx @@ -446,7 +443,8 @@ pub fn serve( .to_string(), )), } - }); + }) + .boxed(); // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = @@ -486,7 +484,8 @@ pub fn serve( SyncState::Stalled => Ok(()), } }, - ); + ) + .boxed(); // Create a `warp` filter that returns 404s if the light client server is disabled. let light_client_server_filter = @@ -539,8 +538,9 @@ pub fn serve( .beacon_processor_send .clone() .filter(|_| config.enable_beacon_processor); - let task_spawner_filter = - warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let task_spawner_filter = warp::any() + .map(move || TaskSpawner::new(beacon_processor_send.clone())) + .boxed(); let duplicate_block_status_code = ctx.config.duplicate_block_status_code; @@ -552,6 +552,7 @@ pub fn serve( // GET beacon/genesis let get_beacon_genesis = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) @@ -575,6 +576,7 @@ pub fn serve( */ let beacon_states_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("states")) .and(warp::path::param::().or_else(|_| async { @@ -583,1190 +585,1082 @@ pub fn serve( )) })) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); // GET beacon/states/{state_id}/root - let get_beacon_state_root = beacon_states_path - .clone() - .and(warp::path("root")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - root, - ))) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); + let get_beacon_state_root = states::get_beacon_state_root(beacon_states_path.clone()); // GET beacon/states/{state_id}/fork - let get_beacon_state_fork = beacon_states_path + let get_beacon_state_fork = states::get_beacon_state_fork(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/finality_checkpoints + let get_beacon_state_finality_checkpoints = + states::get_beacon_state_finality_checkpoints(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/validator_balances?id + let get_beacon_state_validator_balances = + states::get_beacon_state_validator_balances(beacon_states_path.clone()); + + // POST beacon/states/{state_id}/validator_balances + let post_beacon_state_validator_balances = + states::post_beacon_state_validator_balances(beacon_states_path.clone()); + + // POST beacon/states/{state_id}/validator_identities + let post_beacon_state_validator_identities = + states::post_beacon_state_validator_identities(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/validators?id,status + let get_beacon_state_validators = + states::get_beacon_state_validators(beacon_states_path.clone()); + + // POST beacon/states/{state_id}/validators + let post_beacon_state_validators = + states::post_beacon_state_validators(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/validators/{validator_id} + let get_beacon_state_validators_id = + states::get_beacon_state_validators_id(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/committees?slot,index,epoch + let get_beacon_state_committees = + states::get_beacon_state_committees(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/sync_committees?epoch + let get_beacon_state_sync_committees = + states::get_beacon_state_sync_committees(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/randao?epoch + let get_beacon_state_randao = states::get_beacon_state_randao(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/pending_deposits + let get_beacon_state_pending_deposits = + states::get_beacon_state_pending_deposits(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/pending_partial_withdrawals + let get_beacon_state_pending_partial_withdrawals = + states::get_beacon_state_pending_partial_withdrawals(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/pending_consolidations + let get_beacon_state_pending_consolidations = + states::get_beacon_state_pending_consolidations(beacon_states_path.clone()); + + // GET beacon/headers + // + // Note: this endpoint only returns information about blocks in the canonical chain. Given that + // there's a `canonical` flag on the response, I assume it should also return non-canonical + // things. Returning non-canonical things is hard for us since we don't already have a + // mechanism for arbitrary forwards block iteration, we only support iterating forwards along + // the canonical chain. + let get_beacon_headers = eth_v1 .clone() - .and(warp::path("fork")) + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) .then( - |state_id: StateId, + |query: api_types::HeadersQuery, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let (fork, execution_optimistic, finalized) = - state_id.fork_and_execution_optimistic_and_finalized(&chain)?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data: fork, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + let (root, block, execution_optimistic, finalized) = + match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::unhandled_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + false, + ) + } + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let (parent, execution_optimistic, _parent_finalized) = + BlockId::from_root(parent_root).blinded_block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::unhandled_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().is_ok_and(|(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::unhandled_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; + + BlockId::from_root(root) + .blinded_block(&chain) + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic, finalized)| { + (root, block, execution_optimistic, finalized) + })? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let (root, execution_optimistic, finalized) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt + && block.parent_root() != parent_root + { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + + (root, block, execution_optimistic, finalized) + } + }; + + let data = api_types::BlockHeaderData { + root, + canonical: true, + header: api_types::BlockHeaderAndSignature { + message: block.message().block_header(), + signature: block.signature().clone().into(), + }, + }; + + Ok(api_types::GenericResponse::from(vec![data]) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); - // GET beacon/states/{state_id}/finality_checkpoints - let get_beacon_state_finality_checkpoints = beacon_states_path + // GET beacon/headers/{block_id} + let get_beacon_headers_block_id = eth_v1 .clone() - .and(warp::path("finality_checkpoints")) + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block ID".to_string(), + )) + })) .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) .then( - |state_id: StateId, + |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - finalized, - )) - }, - )?; + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; + // Ignore the second `execution_optimistic` since the first one has more + // information about the original request. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + let canonical = chain + .block_root_at_slot(block.slot(), WhenSlotSkipped::None) + .map_err(warp_utils::reject::unhandled_error)? + .is_some_and(|canonical| root == canonical); + + let data = api_types::BlockHeaderData { + root, + canonical, + header: api_types::BlockHeaderAndSignature { + message: block.message().block_header(), + signature: block.signature().clone().into(), + }, + }; Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, execution_optimistic: Some(execution_optimistic), finalized: Some(finalized), + data, }) }) }, ); - // GET beacon/states/{state_id}/validator_balances?id - let get_beacon_state_validator_balances = beacon_states_path + /* + * beacon/blocks + */ + let consensus_version_header_filter = + warp::header::header::(CONSENSUS_VERSION_HEADER).boxed(); + + let optional_consensus_version_header_filter = + warp::header::optional::(CONSENSUS_VERSION_HEADER).boxed(); + + // POST beacon/blocks + let post_beacon_blocks = eth_v1 .clone() - .and(warp::path("validator_balances")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) .and(warp::path::end()) - .and(multi_key_query::()) + .and(warp::body::json()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - task_spawner.blocking_json_task(Priority::P1, move || { - let query = query_res?; - crate::validators::get_beacon_state_validator_balances( - state_id, + move |value: serde_json::Value, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let request = PublishBlockRequest::::context_deserialize( + &value, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(request), chain, - query.id.as_deref(), + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, ) + .await }) }, ); - // POST beacon/states/{state_id}/validator_balances - let post_beacon_state_validator_balances = beacon_states_path + let post_beacon_blocks_ssz = eth_v1 .clone() - .and(warp::path("validator_balances")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorBalancesRequestBody| { - task_spawner.blocking_json_task(Priority::P1, move || { - crate::validators::get_beacon_state_validator_balances( - state_id, + .and(warp::body::bytes()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .then( + move |block_bytes: Bytes, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block_contents = PublishBlockRequest::::from_ssz_bytes( + &block_bytes, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(block_contents), chain, - Some(&query.ids), + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, ) + .await }) }, ); - // POST beacon/states/{state_id}/validator_identities - let post_beacon_state_validator_identities = beacon_states_path + let post_beacon_blocks_v2 = eth_v2 .clone() - .and(warp::path("validator_identities")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) + .and(warp::body::json()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorIdentitiesRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validator_identities( - state_id, + move |validation_level: api_types::BroadcastValidationQuery, + value: serde_json::Value, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let request = PublishBlockRequest::::context_deserialize( + &value, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(request), chain, - Some(&query.ids), + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, ) + .await }) }, ); - // GET beacon/states/{state_id}/validators?id,status - let get_beacon_state_validators = beacon_states_path + let post_beacon_blocks_v2_ssz = eth_v2 .clone() - .and(warp::path("validators")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) .and(warp::path::end()) - .and(multi_key_query::()) + .and(warp::body::bytes()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let query = query_res?; - crate::validators::get_beacon_state_validators( - state_id, + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block_contents = PublishBlockRequest::::from_ssz_bytes( + &block_bytes, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(block_contents), chain, - &query.id, - &query.status, + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, ) + .await }) }, ); - // POST beacon/states/{state_id}/validators - let post_beacon_state_validators = beacon_states_path + /* + * beacon/blinded_blocks + */ + + // POST beacon/blinded_blocks + let post_beacon_blinded_blocks = eth_v1 .clone() - .and(warp::path("validators")) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) .and(warp::path::end()) .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorsRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validators( - state_id, + move |block_contents: Arc>, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_blinded_block( + block_contents, chain, - &query.ids, - &query.statuses, + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, ) + .await }) }, ); - // GET beacon/states/{state_id}/validators/{validator_id} - let get_beacon_state_validators_id = beacon_states_path + // POST beacon/blocks + let post_beacon_blinded_blocks_ssz = eth_v1 .clone() - .and(warp::path("validators")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid validator ID".to_string(), - )) - })) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - validator_id: ValidatorId| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( - &chain, state, pubkey, - ) - .map_err(|e| { - warp_utils::reject::custom_not_found(format!( - "unable to access pubkey cache: {e:?}", - )) - })?, - ValidatorId::Index(index) => Some(*index as usize), - }; - - Ok(( - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; - - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map(Arc::new) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, + ) + .await }) }, ); - // GET beacon/states/{state_id}/committees?slot,index,epoch - let get_beacon_state_committees = beacon_states_path + let post_beacon_blinded_blocks_v2 = eth_v2 .clone() - .and(warp::path("committees")) - .and(warp::query::()) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(consensus_version_header_filter) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::CommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - - // Attempt to obtain the committee_cache from the beacon chain - let decision_slot = (epoch.saturating_sub(2u64)) - .end_slot(T::EthSpec::slots_per_epoch()); - // Find the decision block and skip to another method on any kind - // of failure - let shuffling_id = if let Ok(Some(shuffling_decision_block)) = - chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) - { - Some(AttestationShufflingId { - shuffling_epoch: epoch, - shuffling_decision_block, - }) - } else { - None - }; - - // Attempt to read from the chain cache if there exists a - // shuffling_id - let maybe_cached_shuffling = if let Some(shuffling_id) = - shuffling_id.as_ref() - { - chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - .and_then(|mut cache_write| cache_write.get(shuffling_id)) - .and_then(|cache_item| cache_item.wait().ok()) - } else { - None - }; - - let committee_cache = - if let Some(shuffling) = maybe_cached_shuffling { - shuffling - } else { - let possibly_built_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state.committee_cache_is_initialized( - relative_epoch, - ) => - { - state.committee_cache(relative_epoch).cloned() - } - _ => CommitteeCache::initialized( - state, - epoch, - &chain.spec, - ), - } - .map_err( - |e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() - as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot( - T::EthSpec::slots_per_epoch(), - ) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request( - format!( - "epoch out of bounds, \ - try state at slot {}", - first_subsequent_restore_point_slot, - ), - ) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, \ - too far in future" - .into(), - ) - } - } - _ => warp_utils::reject::unhandled_error( - BeaconChainError::from(e), - ), - }, - )?; - - // Attempt to write to the beacon cache (only if the cache - // size is not the default value). - if chain.config.shuffling_cache_size - != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE - && let Some(shuffling_id) = shuffling_id - && let Some(mut cache_write) = chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - { - cache_write.insert_committee_cache( - shuffling_id, - &possibly_built_cache, - ); - } - - possibly_built_cache - }; - - // Use either the supplied slot or all slots in the epoch. - let slots = - query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); - - // Use either the supplied committee index or all available indices. - let indices = - query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); - - let mut response = Vec::with_capacity(slots.len() * indices.len()); - - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request( - format!("{} is not in epoch {}", slot, epoch), - )); - } - - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; - - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } + move |validation_level: api_types::BroadcastValidationQuery, + blinded_block_json: serde_json::Value, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let blinded_block = + SignedBlindedBeaconBlock::::context_deserialize( + &blinded_block_json, + consensus_version, + ) + .map(Arc::new) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + publish_blocks::publish_blinded_block( + blinded_block, + chain, + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, + ) + .await + }) + }, + ); - Ok((response, execution_optimistic, finalized)) - }, - )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + let post_beacon_blinded_blocks_v2_ssz = eth_v2 + .clone() + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .then( + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map(Arc::new) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, + ) + .await }) }, ); - // GET beacon/states/{state_id}/sync_committees?epoch - let get_beacon_state_sync_committees = beacon_states_path + let block_id_or_err = warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block ID".to_string(), + )) + }); + + let beacon_blocks_path_v1 = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + let beacon_blocks_path_any = any_version + .clone() + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + // GET beacon/blocks/{block_id} + let get_beacon_block = beacon_blocks_path_any .clone() - .and(warp::path("sync_committees")) - .and(warp::query::()) .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - |state_id: StateId, + |endpoint_version: EndpointVersion, + block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>, - query: api_types::SyncCommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (sync_committee, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - Ok(( - state - .get_built_sync_committee(epoch, &chain.spec) - .cloned() - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no \ - sync committee for epoch {}", - current_epoch, epoch - )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - let validators = chain - .validator_indices(sync_committee.pubkeys.iter()) - .map_err(warp_utils::reject::unhandled_error)?; - - let validator_aggregates = validators - .chunks_exact(T::EthSpec::sync_subcommittee_size()) - .map(|indices| api_types::SyncSubcommittee { - indices: indices.to_vec(), - }) - .collect(); + accept_header: Option| { + task_spawner.spawn_async_with_rejection(Priority::P1, async move { + let (block, execution_optimistic, finalized) = + block_id.full_block(&chain).await?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - let response = api_types::SyncCommitteeByValidatorIndices { - validators, - validator_aggregates, + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), }; - Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => execution_optimistic_finalized_beacon_response( + require_version, + execution_optimistic, + finalized, + block, + ) + .map(|res| warp::reply::json(&res).into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/states/{state_id}/randao?epoch - let get_beacon_state_randao = beacon_states_path + // GET beacon/blocks/{block_id}/root + let get_beacon_block_root = beacon_blocks_path_v1 .clone() - .and(warp::path("randao")) - .and(warp::query::()) + .and(warp::path("root")) .and(warp::path::end()) .then( - |state_id: StateId, + |block_id: BlockId, task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::RandaoQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (randao, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); - let randao = *state.get_randao_mix(epoch).map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "epoch out of range: {e:?}" - )) - })?; - Ok((randao, execution_optimistic, finalized)) - }, - )?; - + chain: Arc>| { + // Prioritise requests for the head block root, as it is used by some VCs (including + // the Lighthouse VC) to create sync committee messages. + let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; Ok( - api_types::GenericResponse::from(api_types::RandaoMix { randao }) + api_types::GenericResponse::from(api_types::RootData::from(block_root)) .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }, ); - // GET beacon/states/{state_id}/pending_deposits - let get_beacon_state_pending_deposits = beacon_states_path + // GET beacon/blocks/{block_id}/attestations + let get_beacon_block_attestations = beacon_blocks_path_any .clone() - .and(warp::path("pending_deposits")) + .and(warp::path("attestations")) .and(warp::path::end()) .then( - |state_id: StateId, + |endpoint_version: EndpointVersion, + block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(deposits) = state.pending_deposits() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending deposits not found".to_string(), - )); - }; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let atts = block + .message() + .body() + .attestations() + .map(|att| att.clone_as_attestation()) + .collect::>(); - Ok(( - deposits.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), + let res = execution_optimistic_finalized_beacon_response( + require_version, execution_optimistic, finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) + &atts, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) }) }, ); - // GET beacon/states/{state_id}/pending_partial_withdrawals - let get_beacon_state_pending_partial_withdrawals = beacon_states_path + // GET beacon/blinded_blocks/{block_id} + let get_beacon_blinded_block = eth_v1 .clone() - .and(warp::path("pending_partial_withdrawals")) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(block_id_or_err) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - |state_id: StateId, + |block_id: BlockId, task_spawner: TaskSpawner, - chain: Arc>| { + chain: Arc>, + accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(withdrawals) = state.pending_partial_withdrawals() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending withdrawals not found".to_string(), - )); - }; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - Ok(( - withdrawals.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + block, + ) + .map(|res| warp::reply::json(&res).into_response()) + } + } .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/states/{state_id}/pending_consolidations - let get_beacon_state_pending_consolidations = beacon_states_path + /* + * beacon/blob_sidecars + */ + + // GET beacon/blob_sidecars/{block_id} + let get_blob_sidecars = eth_v1 .clone() - .and(warp::path("pending_consolidations")) + .and(warp::path("beacon")) + .and(warp::path("blob_sidecars")) + .and(block_id_or_err) .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) .then( - |state_id: StateId, + |block_id: BlockId, + indices_res: Result, task_spawner: TaskSpawner, - chain: Arc>| { + chain: Arc>, + accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(consolidations) = state.pending_consolidations() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending consolidations not found".to_string(), - )); - }; + let indices = indices_res?; + let (block, blob_sidecar_list_filtered, execution_optimistic, finalized) = + block_id.get_blinded_block_and_blob_list_filtered(indices, &chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - Ok(( - consolidations.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(blob_sidecar_list_filtered.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &blob_sidecar_list_filtered, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/headers - // - // Note: this endpoint only returns information about blocks in the canonical chain. Given that - // there's a `canonical` flag on the response, I assume it should also return non-canonical - // things. Returning non-canonical things is hard for us since we don't already have a - // mechanism for arbitrary forwards block iteration, we only support iterating forwards along - // the canonical chain. - let get_beacon_headers = eth_v1 + // GET beacon/blobs/{block_id} + let get_blobs = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("headers")) - .and(warp::query::()) + .and(warp::path("blobs")) + .and(block_id_or_err) .and(warp::path::end()) + .and(multi_key_query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) .then( - |query: api_types::HeadersQuery, + |block_id: BlockId, + version_hashes_res: Result, task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, block, execution_optimistic, finalized) = - match (query.slot, query.parent_root) { - // No query parameters, return the canonical head block. - (None, None) => { - let (cached_head, execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::unhandled_error)?; - ( - cached_head.head_block_root(), - cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic_or_invalid(), - false, - ) - } - // Only the parent root parameter, do a forwards-iterator lookup. - (None, Some(parent_root)) => { - let (parent, execution_optimistic, _parent_finalized) = - BlockId::from_root(parent_root).blinded_block(&chain)?; - let (root, _slot) = chain - .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::unhandled_error)? - // Ignore any skip-slots immediately following the parent. - .find(|res| { - res.as_ref().is_ok_and(|(root, _)| *root != parent_root) - }) - .transpose() - .map_err(warp_utils::reject::unhandled_error)? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "child of block with root {}", - parent_root - )) - })?; - - BlockId::from_root(root) - .blinded_block(&chain) - // Ignore this `execution_optimistic` since the first value has - // more information about the original request. - .map(|(block, _execution_optimistic, finalized)| { - (root, block, execution_optimistic, finalized) - })? - } - // Slot is supplied, search by slot and optionally filter by - // parent root. - (Some(slot), parent_root_opt) => { - let (root, execution_optimistic, finalized) = - BlockId::from_slot(slot).root(&chain)?; - // Ignore the second `execution_optimistic`, the first one is the - // most relevant since it knows that we queried by slot. - let (block, _execution_optimistic, _finalized) = - BlockId::from_root(root).blinded_block(&chain)?; - - // If the parent root was supplied, check that it matches the block - // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt - && block.parent_root() != parent_root - { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } - - (root, block, execution_optimistic, finalized) - } - }; - - let data = api_types::BlockHeaderData { - root, - canonical: true, - header: api_types::BlockHeaderAndSignature { - message: block.message().block_header(), - signature: block.signature().clone().into(), - }, - }; + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let versioned_hashes = version_hashes_res?; + let response = + block_id.get_blobs_by_versioned_hashes(versioned_hashes, &chain)?; - Ok(api_types::GenericResponse::from(vec![data]) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(response.data.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::No, + response.metadata.execution_optimistic.unwrap_or(false), + response.metadata.finalized.unwrap_or(false), + response.data, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } }) }, ); - // GET beacon/headers/{block_id} - let get_beacon_headers_block_id = eth_v1 + /* + * beacon/pool + */ + + let beacon_pool_path = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("headers")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block ID".to_string(), - )) - })) - .and(warp::path::end()) + .and(warp::path("pool")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .then( - |block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, execution_optimistic, finalized) = block_id.root(&chain)?; - // Ignore the second `execution_optimistic` since the first one has more - // information about the original request. - let (block, _execution_optimistic, _finalized) = - BlockId::from_root(root).blinded_block(&chain)?; + .boxed(); - let canonical = chain - .block_root_at_slot(block.slot(), WhenSlotSkipped::None) - .map_err(warp_utils::reject::unhandled_error)? - .is_some_and(|canonical| root == canonical); + let beacon_pool_path_v2 = eth_v2 + .clone() + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .boxed(); - let data = api_types::BlockHeaderData { - root, - canonical, - header: api_types::BlockHeaderAndSignature { - message: block.message().block_header(), - signature: block.signature().clone().into(), - }, - }; + let beacon_pool_path_any = any_version + .clone() + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .boxed(); - Ok(api_types::ExecutionOptimisticFinalizedResponse { - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - data, + let post_beacon_pool_attestations_v2 = post_beacon_pool_attestations_v2( + &network_tx_filter, + optional_consensus_version_header_filter, + &beacon_pool_path_v2, + ); + + // GET beacon/pool/attestations?committee_index,slot + let get_beacon_pool_attestations = get_beacon_pool_attestations(&beacon_pool_path_any); + + // POST beacon/pool/attester_slashings + let post_beacon_pool_attester_slashings = + post_beacon_pool_attester_slashings(&network_tx_filter, &beacon_pool_path_any); + + // GET beacon/pool/attester_slashings + let get_beacon_pool_attester_slashings = + get_beacon_pool_attester_slashings(&beacon_pool_path_any); + + // POST beacon/pool/proposer_slashings + let post_beacon_pool_proposer_slashings = + post_beacon_pool_proposer_slashings(&network_tx_filter, &beacon_pool_path); + + // GET beacon/pool/proposer_slashings + let get_beacon_pool_proposer_slashings = get_beacon_pool_proposer_slashings(&beacon_pool_path); + + // POST beacon/pool/voluntary_exits + let post_beacon_pool_voluntary_exits = + post_beacon_pool_voluntary_exits(&network_tx_filter, &beacon_pool_path); + + // GET beacon/pool/voluntary_exits + let get_beacon_pool_voluntary_exits = get_beacon_pool_voluntary_exits(&beacon_pool_path); + + // POST beacon/pool/sync_committees + let post_beacon_pool_sync_committees = + post_beacon_pool_sync_committees(&network_tx_filter, &beacon_pool_path); + + // GET beacon/pool/bls_to_execution_changes + let get_beacon_pool_bls_to_execution_changes = + get_beacon_pool_bls_to_execution_changes(&beacon_pool_path); + + // POST beacon/pool/bls_to_execution_changes + let post_beacon_pool_bls_to_execution_changes = + post_beacon_pool_bls_to_execution_changes(&network_tx_filter, &beacon_pool_path); + + let beacon_rewards_path = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + // GET beacon/rewards/blocks/{block_id} + let get_beacon_rewards_blocks = beacon_rewards_path + .clone() + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + block_id: BlockId| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (rewards, execution_optimistic, finalized) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) }) }) }, ); /* - * beacon/blocks + * builder/states */ - let consensus_version_header_filter = - warp::header::header::(CONSENSUS_VERSION_HEADER); - let optional_consensus_version_header_filter = - warp::header::optional::(CONSENSUS_VERSION_HEADER); + let builder_states_path = eth_v1 + .clone() + .and(warp::path("builder")) + .and(warp::path("states")) + .and(chain_filter.clone()); - // POST beacon/blocks - let post_beacon_blocks = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::path::end()) - .and(warp::body::json()) - .and(consensus_version_header_filter) + // GET builder/states/{state_id}/expected_withdrawals + let get_expected_withdrawals = builder_states_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::path::param::()) + .and(warp::path("expected_withdrawals")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - move |value: serde_json::Value, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let request = PublishBlockRequest::::context_deserialize( - &value, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) - })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(request), - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await + |chain: Arc>, + task_spawner: TaskSpawner, + state_id: StateId, + query: api_types::ExpectedWithdrawalsQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (state, execution_optimistic, finalized) = state_id.state(&chain)?; + let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); + let withdrawals = + get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(withdrawals.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json( + &api_types::ExecutionOptimisticFinalizedResponse { + data: withdrawals, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + ) + .into_response()), + } }) }, ); - let post_beacon_blocks_ssz = eth_v1 + /* + * beacon/light_client + */ + + let beacon_light_client_path = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(consensus_version_header_filter) + .and(warp::path("light_client")) + .and(light_client_server_filter) + .and(chain_filter.clone()); + + // GET beacon/light_client/bootstrap/{block_root} + let get_beacon_light_client_bootstrap = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::path("bootstrap")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block root value".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - move |block_bytes: Bytes, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = PublishBlockRequest::::from_ssz_bytes( - &block_bytes, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(block_contents), - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, + task_spawner: TaskSpawner, + block_root: Hash256, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + get_light_client_bootstrap::(chain, &block_root, accept_header) }) }, ); - let post_beacon_blocks_v2 = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::json()) - .and(consensus_version_header_filter) + // GET beacon/light_client/optimistic_update + let get_beacon_light_client_optimistic_update = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::path("optimistic_update")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - move |validation_level: api_types::BroadcastValidationQuery, - value: serde_json::Value, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let request = PublishBlockRequest::::context_deserialize( - &value, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) - })?; + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, + task_spawner: TaskSpawner, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + let update = chain + .light_client_server_cache + .get_latest_optimistic_update() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "No LightClientOptimisticUpdate is available".to_string(), + ) + })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(request), - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await + let fork_name = chain + .spec + .fork_name_at_slot::(update.get_slot()); + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(update.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + update, + )) + .into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - let post_beacon_blocks_v2_ssz = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(consensus_version_header_filter) + // GET beacon/light_client/finality_update + let get_beacon_light_client_finality_update = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = PublishBlockRequest::::from_ssz_bytes( - &block_bytes, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(block_contents), - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await - }) - }, - ); - - /* - * beacon/blinded_blocks - */ - - // POST beacon/blinded_blocks - let post_beacon_blinded_blocks = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) + .and(warp::path("finality_update")) .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::header::optional::("accept")) .then( - move |block_contents: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - publish_blocks::publish_blinded_block( - block_contents, - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await - }) - }, - ); - - // POST beacon/blocks - let post_beacon_blinded_blocks_ssz = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBlindedBeaconBlock::::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map(Arc::new) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_blinded_block( - block, - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await - }) - }, - ); - - let post_beacon_blinded_blocks_v2 = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(consensus_version_header_filter) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |validation_level: api_types::BroadcastValidationQuery, - blinded_block_json: serde_json::Value, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let blinded_block = - SignedBlindedBeaconBlock::::context_deserialize( - &blinded_block_json, - consensus_version, - ) - .map(Arc::new) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) - })?; - publish_blocks::publish_blinded_block( - blinded_block, - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await - }) - }, - ); - - let post_beacon_blinded_blocks_v2_ssz = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBlindedBeaconBlock::::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map(Arc::new) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_blinded_block( - block, - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await - }) - }, - ); - - let block_id_or_err = warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block ID".to_string(), - )) - }); - - let beacon_blocks_path_v1 = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(block_id_or_err) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let beacon_blocks_path_any = any_version - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(block_id_or_err) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - // GET beacon/blocks/{block_id} - let get_beacon_block = beacon_blocks_path_any - .clone() - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |endpoint_version: EndpointVersion, - block_id: BlockId, - task_spawner: TaskSpawner, + |light_client_server_enabled: Result<(), Rejection>, chain: Arc>, + task_spawner: TaskSpawner, accept_header: Option| { - task_spawner.spawn_async_with_rejection(Priority::P1, async move { - let (block, execution_optimistic, finalized) = - block_id.full_block(&chain).await?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + let update = chain + .light_client_server_cache + .get_latest_finality_update() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "No LightClientFinalityUpdate is available".to_string(), + ) + })?; + let fork_name = chain + .spec + .fork_name_at_slot::(update.signature_slot()); match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) - .body(block.as_ssz_bytes().into()) + .body(update.as_ssz_bytes().into()) .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -1774,2421 +1668,882 @@ pub fn serve( e )) }), - _ => execution_optimistic_finalized_beacon_response( - require_version, - execution_optimistic, - finalized, - block, - ) - .map(|res| warp::reply::json(&res).into_response()), + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + update, + )) + .into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/blocks/{block_id}/root - let get_beacon_block_root = beacon_blocks_path_v1 + // GET beacon/light_client/updates + let get_beacon_light_client_updates = beacon_light_client_path .clone() - .and(warp::path("root")) + .and(task_spawner_filter.clone()) + .and(warp::path("updates")) .and(warp::path::end()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) .then( - |block_id: BlockId, + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, task_spawner: TaskSpawner, - chain: Arc>| { - // Prioritise requests for the head block root, as it is used by some VCs (including - // the Lighthouse VC) to create sync committee messages. - let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; - Ok( - api_types::GenericResponse::from(api_types::RootData::from(block_root)) - .add_execution_optimistic_finalized(execution_optimistic, finalized), - ) + query: LightClientUpdatesQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + get_light_client_updates::(chain, query, accept_header) }) }, ); - // GET beacon/blocks/{block_id}/attestations - let get_beacon_block_attestations = beacon_blocks_path_any - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .then( - |endpoint_version: EndpointVersion, - block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let atts = block - .message() - .body() - .attestations() - .map(|att| att.clone_as_attestation()) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; + /* + * beacon/rewards + */ - let res = execution_optimistic_finalized_beacon_response( - require_version, - execution_optimistic, - finalized, - &atts, - )?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); - - // GET beacon/blinded_blocks/{block_id} - let get_beacon_blinded_block = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(block_id_or_err) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(block.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - // Post as a V2 endpoint so we return the fork version. - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - block, - ) - .map(|res| warp::reply::json(&res).into_response()) - } - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - /* - * beacon/blob_sidecars - */ - - // GET beacon/blob_sidecars/{block_id} - let get_blob_sidecars = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blob_sidecars")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(multi_key_query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - indices_res: Result, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let indices = indices_res?; - let (block, blob_sidecar_list_filtered, execution_optimistic, finalized) = - block_id.get_blinded_block_and_blob_list_filtered(indices, &chain)?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(blob_sidecar_list_filtered.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - // Post as a V2 endpoint so we return the fork version. - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - &blob_sidecar_list_filtered, - )?; - Ok(warp::reply::json(&res).into_response()) - } - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/blobs/{block_id} - let get_blobs = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blobs")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(multi_key_query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - version_hashes_res: Result, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let versioned_hashes = version_hashes_res?; - let response = - block_id.get_blobs_by_versioned_hashes(versioned_hashes, &chain)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(response.data.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::No, - response.metadata.execution_optimistic.unwrap_or(false), - response.metadata.finalized.unwrap_or(false), - response.data, - )?; - Ok(warp::reply::json(&res).into_response()) - } - } - }) - }, - ); - - /* - * beacon/pool - */ - - let beacon_pool_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let beacon_pool_path_v2 = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let beacon_pool_path_any = any_version - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let post_beacon_pool_attestations_v2 = beacon_pool_path_v2 - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp_utils::json::json::>()) - .and(optional_consensus_version_header_filter) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - attestations: Vec, - _fork_name: Option, - network_tx: UnboundedSender>| async move { - let result = crate::publish_attestations::publish_attestations( - task_spawner, - chain, - attestations, - network_tx, - true, - ) - .await - .map(|()| warp::reply::json(&())); - convert_rejection(result).await - }, - ); - - // GET beacon/pool/attestations?committee_index,slot - let get_beacon_pool_attestations = beacon_pool_path_any - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp::query::()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::AttestationPoolQuery| { - task_spawner.blocking_response_task(Priority::P1, move || { - let query_filter = |data: &AttestationData, committee_indices: HashSet| { - query.slot.is_none_or(|slot| slot == data.slot) - && query - .committee_index - .is_none_or(|index| committee_indices.contains(&index)) - }; - - let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); - attestations.extend( - chain - .naive_aggregation_pool - .read() - .iter() - .filter(|&att| { - query_filter(att.data(), att.get_committee_indices_map()) - }) - .cloned(), - ); - // Use the current slot to find the fork version, and convert all messages to the - // current fork's format. This is to ensure consistent message types matching - // `Eth-Consensus-Version`. - let current_slot = - chain - .slot_clock - .now() - .ok_or(warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ))?; - let fork_name = chain.spec.fork_name_at_slot::(current_slot); - let attestations = attestations - .into_iter() - .filter(|att| { - (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) - || (!fork_name.electra_enabled() - && matches!(att, Attestation::Base(_))) - }) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = beacon_response(require_version, &attestations); - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); - - // POST beacon/pool/attester_slashings - let post_beacon_pool_attester_slashings = beacon_pool_path_any - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. - |_endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>, - slashing: AttesterSlashing, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_attester_slashing_for_gossip(slashing.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_attester_slashing(slashing.to_ref()); - - if let ObservationOutcome::New(slashing) = outcome { - publish_pubsub_message( - &network_tx, - PubsubMessage::AttesterSlashing(Box::new( - slashing.clone().into_inner(), - )), - )?; - - chain.import_attester_slashing(slashing); - } - - Ok(()) - }) - }, - ); - - // GET beacon/pool/attester_slashings - let get_beacon_pool_attester_slashings = - beacon_pool_path_any - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let slashings = chain.op_pool.get_all_attester_slashings(); - - // Use the current slot to find the fork version, and convert all messages to the - // current fork's format. This is to ensure consistent message types matching - // `Eth-Consensus-Version`. - let current_slot = chain.slot_clock.now().ok_or( - warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ), - )?; - let fork_name = chain.spec.fork_name_at_slot::(current_slot); - let slashings = slashings - .into_iter() - .filter(|slashing| { - (fork_name.electra_enabled() - && matches!(slashing, AttesterSlashing::Electra(_))) - || (!fork_name.electra_enabled() - && matches!(slashing, AttesterSlashing::Base(_))) - }) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = beacon_response(require_version, &slashings); - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); - - // POST beacon/pool/proposer_slashings - let post_beacon_pool_proposer_slashings = beacon_pool_path - .clone() - .and(warp::path("proposer_slashings")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - slashing: ProposerSlashing, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_proposer_slashing_for_gossip(slashing.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_proposer_slashing(&slashing); - - if let ObservationOutcome::New(slashing) = outcome { - publish_pubsub_message( - &network_tx, - PubsubMessage::ProposerSlashing(Box::new( - slashing.clone().into_inner(), - )), - )?; - - chain.import_proposer_slashing(slashing); - } - - Ok(()) - }) - }, - ); - - // GET beacon/pool/proposer_slashings - let get_beacon_pool_proposer_slashings = beacon_pool_path - .clone() - .and(warp::path("proposer_slashings")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_proposer_slashings(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); - - // POST beacon/pool/voluntary_exits - let post_beacon_pool_voluntary_exits = beacon_pool_path - .clone() - .and(warp::path("voluntary_exits")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - exit: SignedVoluntaryExit, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_voluntary_exit_for_gossip(exit.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_voluntary_exit(&exit.message); - - if let ObservationOutcome::New(exit) = outcome { - publish_pubsub_message( - &network_tx, - PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), - )?; - - chain.import_voluntary_exit(exit); - } - - Ok(()) - }) - }, - ); - - // GET beacon/pool/voluntary_exits - let get_beacon_pool_voluntary_exits = beacon_pool_path - .clone() - .and(warp::path("voluntary_exits")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_voluntary_exits(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); - - // POST beacon/pool/sync_committees - let post_beacon_pool_sync_committees = beacon_pool_path - .clone() - .and(warp::path("sync_committees")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - signatures: Vec, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - sync_committees::process_sync_committee_signatures( - signatures, network_tx, &chain, - )?; - Ok(api_types::GenericResponse::from(())) - }) - }, - ); - - // GET beacon/pool/bls_to_execution_changes - let get_beacon_pool_bls_to_execution_changes = beacon_pool_path - .clone() - .and(warp::path("bls_to_execution_changes")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); - Ok(api_types::GenericResponse::from(address_changes)) - }) - }, - ); - - // POST beacon/pool/bls_to_execution_changes - let post_beacon_pool_bls_to_execution_changes = beacon_pool_path - .clone() - .and(warp::path("bls_to_execution_changes")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - address_changes: Vec, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let mut failures = vec![]; - - for (index, address_change) in address_changes.into_iter().enumerate() { - let validator_index = address_change.message.validator_index; - - match chain.verify_bls_to_execution_change_for_http_api(address_change) { - Ok(ObservationOutcome::New(verified_address_change)) => { - let validator_index = - verified_address_change.as_inner().message.validator_index; - let address = verified_address_change - .as_inner() - .message - .to_execution_address; - - // New to P2P *and* op pool, gossip immediately if post-Capella. - let received_pre_capella = - if chain.current_slot_is_post_capella().unwrap_or(false) { - ReceivedPreCapella::No - } else { - ReceivedPreCapella::Yes - }; - if matches!(received_pre_capella, ReceivedPreCapella::No) { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - } - - // Import to op pool (may return `false` if there's a race). - let imported = chain.import_bls_to_execution_change( - verified_address_change, - received_pre_capella, - ); - - info!( - %validator_index, - ?address, - published = - matches!(received_pre_capella, ReceivedPreCapella::No), - imported, - "Processed BLS to execution change" - ); - } - Ok(ObservationOutcome::AlreadyKnown) => { - debug!(%validator_index, "BLS to execution change already known"); - } - Err(e) => { - warn!( - validator_index, - reason = ?e, - source = "HTTP", - "Invalid BLS to execution change" - ); - failures.push(api_types::Failure::new( - index, - format!("invalid: {e:?}"), - )); - } - } - } - - if failures.is_empty() { - Ok(()) - } else { - Err(warp_utils::reject::indexed_bad_request( - "some BLS to execution changes failed to verify".into(), - failures, - )) - } - }) - }, - ); - - let beacon_rewards_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("rewards")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - // GET beacon/rewards/blocks/{block_id} - let get_beacon_rewards_blocks = beacon_rewards_path - .clone() - .and(warp::path("blocks")) - .and(block_id_or_err) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - block_id: BlockId| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (rewards, execution_optimistic, finalized) = - standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; - Ok(api_types::GenericResponse::from(rewards)).map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); - - /* - * builder/states - */ - - let builder_states_path = eth_v1 - .and(warp::path("builder")) - .and(warp::path("states")) - .and(chain_filter.clone()); - - // GET builder/states/{state_id}/expected_withdrawals - let get_expected_withdrawals = builder_states_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path::param::()) - .and(warp::path("expected_withdrawals")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |chain: Arc>, - task_spawner: TaskSpawner, - state_id: StateId, - query: api_types::ExpectedWithdrawalsQuery, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (state, execution_optimistic, finalized) = state_id.state(&chain)?; - let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); - let withdrawals = - get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(withdrawals.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json( - &api_types::ExecutionOptimisticFinalizedResponse { - data: withdrawals, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }, - ) - .into_response()), - } - }) - }, - ); - - /* - * beacon/light_client - */ - - let beacon_light_client_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("light_client")) - .and(light_client_server_filter) - .and(chain_filter.clone()); - - // GET beacon/light_client/bootstrap/{block_root} - let get_beacon_light_client_bootstrap = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("bootstrap")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block root value".to_string(), - )) - })) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - block_root: Hash256, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - get_light_client_bootstrap::(chain, &block_root, accept_header) - }) - }, - ); - - // GET beacon/light_client/optimistic_update - let get_beacon_light_client_optimistic_update = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("optimistic_update")) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - let update = chain - .light_client_server_cache - .get_latest_optimistic_update() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "No LightClientOptimisticUpdate is available".to_string(), - ) - })?; - - let fork_name = chain - .spec - .fork_name_at_slot::(update.get_slot()); - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(update.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json(&beacon_response( - ResponseIncludesVersion::Yes(fork_name), - update, - )) - .into_response()), - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/light_client/finality_update - let get_beacon_light_client_finality_update = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("finality_update")) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - let update = chain - .light_client_server_cache - .get_latest_finality_update() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "No LightClientFinalityUpdate is available".to_string(), - ) - })?; - - let fork_name = chain - .spec - .fork_name_at_slot::(update.signature_slot()); - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(update.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json(&beacon_response( - ResponseIncludesVersion::Yes(fork_name), - update, - )) - .into_response()), - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/light_client/updates - let get_beacon_light_client_updates = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("updates")) - .and(warp::path::end()) - .and(warp::query::()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - query: LightClientUpdatesQuery, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - get_light_client_updates::(chain, query, accept_header) - }) - }, - ); - - /* - * beacon/rewards - */ - - let beacon_rewards_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("rewards")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - // POST beacon/rewards/attestations/{epoch} - let post_beacon_rewards_attestations = beacon_rewards_path - .clone() - .and(warp::path("attestations")) - .and(warp::path::param::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - epoch: Epoch, - validators: Vec| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestation_rewards = chain - .compute_attestation_rewards(epoch, validators) - .map_err(|e| match e { - BeaconChainError::MissingBeaconState(root) => { - warp_utils::reject::custom_not_found(format!( - "missing state {root:?}", - )) - } - BeaconChainError::NoStateForSlot(slot) => { - warp_utils::reject::custom_not_found(format!( - "missing state at slot {slot}" - )) - } - BeaconChainError::BeaconStateError( - BeaconStateError::UnknownValidator(validator_index), - ) => warp_utils::reject::custom_bad_request(format!( - "validator is unknown: {validator_index}" - )), - BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { - warp_utils::reject::custom_bad_request(format!( - "validator pubkey is unknown: {pubkey:?}" - )) - } - e => warp_utils::reject::custom_server_error(format!( - "unexpected error: {:?}", - e - )), - })?; - let execution_optimistic = - chain.is_optimistic_or_invalid_head().unwrap_or_default(); - - Ok(api_types::GenericResponse::from(attestation_rewards)) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) - }) - }, - ); - - // POST beacon/rewards/sync_committee/{block_id} - let post_beacon_rewards_sync_committee = beacon_rewards_path - .clone() - .and(warp::path("sync_committee")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - block_id: BlockId, - validators: Vec| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (rewards, execution_optimistic, finalized) = - sync_committee_rewards::compute_sync_committee_rewards( - chain, block_id, validators, - )?; - - Ok(api_types::GenericResponse::from(rewards)).map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); - - /* - * config - */ - - let config_path = eth_v1.and(warp::path("config")); - - // GET config/fork_schedule - let get_config_fork_schedule = config_path - .and(warp::path("fork_schedule")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let forks = ForkName::list_all() - .into_iter() - .filter_map(|fork_name| chain.spec.fork_for_name(fork_name)) - .collect::>(); - Ok(api_types::GenericResponse::from(forks)) - }) - }, - ); - - // GET config/spec - let get_config_spec = config_path - .and(warp::path("spec")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - move |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec); - Ok(api_types::GenericResponse::from(config_and_preset)) - }) - }, - ); - - // GET config/deposit_contract - let get_config_deposit_contract = config_path - .and(warp::path("deposit_contract")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - api_types::DepositContractData { - address: chain.spec.deposit_contract_address, - chain_id: chain.spec.deposit_chain_id, - }, - )) - }) - }, - ); - - /* - * debug - */ - - // GET debug/beacon/data_column_sidecars/{block_id} - let get_debug_data_column_sidecars = eth_v1 - .and(warp::path("debug")) - .and(warp::path("beacon")) - .and(warp::path("data_column_sidecars")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(multi_key_query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - indices_res: Result, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let indices = indices_res?; - let (data_columns, fork_name, execution_optimistic, finalized) = - block_id.get_data_columns(indices, &chain)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(data_columns.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - // Post as a V2 endpoint so we return the fork version. - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - &data_columns, - )?; - Ok(warp::reply::json(&res).into_response()) - } - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET debug/beacon/states/{state_id} - let get_debug_beacon_states = any_version - .and(warp::path("debug")) - .and(warp::path("beacon")) - .and(warp::path("states")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid state ID".to_string(), - )) - })) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |_endpoint_version: EndpointVersion, - state_id: StateId, - accept_header: Option, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || match accept_header { - Some(api_types::Accept::Ssz) => { - // We can ignore the optimistic status for the "fork" since it's a - // specification constant that doesn't change across competing heads of the - // beacon chain. - let t = std::time::Instant::now(); - let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let timer = metrics::start_timer(&metrics::HTTP_API_STATE_SSZ_ENCODE_TIMES); - let response_bytes = state.as_ssz_bytes(); - drop(timer); - debug!( - total_time_ms = t.elapsed().as_millis(), - target_slot = %state.slot(), - "HTTP state load" - ); - - Response::builder() - .status(200) - .body(response_bytes.into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map(|resp: warp::reply::Response| { - add_consensus_version_header(resp, fork_name) - }) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - } - _ => state_id.map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - &state, - )?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }, - ), - }) - }, - ); - - // GET debug/beacon/heads - let get_debug_beacon_heads = any_version - .and(warp::path("debug")) + let beacon_rewards_path = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("heads")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let heads = chain - .heads() - .into_iter() - .map(|(root, slot)| { - let execution_optimistic = if endpoint_version == V1 { - None - } else if endpoint_version == V2 { - chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&root) - .ok() - } else { - return Err(unsupported_version_rejection(endpoint_version)); - }; - Ok(api_types::ChainHeadData { - slot, - root, - execution_optimistic, - }) - }) - .collect::, warp::Rejection>>(); - Ok(api_types::GenericResponse::from(heads?)) - }) - }, - ); - - // GET debug/fork_choice - let get_debug_fork_choice = eth_v1 - .and(warp::path("debug")) - .and(warp::path("fork_choice")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); - - let proto_array = beacon_fork_choice.proto_array().core_proto_array(); - - let fork_choice_nodes = proto_array - .nodes - .iter() - .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) - } else { - None - }; - - ForkChoiceNode { - slot: node.slot, - block_root: node.root, - parent_root: node - .parent - .and_then(|index| proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, - validity: execution_status, - execution_block_hash: node - .execution_status - .block_hash() - .map(|block_hash| block_hash.into_root()), - extra_data: ForkChoiceExtraData { - target_root: node.target_root, - justified_root: node.justified_checkpoint.root, - finalized_root: node.finalized_checkpoint.root, - unrealized_justified_root: node - .unrealized_justified_checkpoint - .map(|checkpoint| checkpoint.root), - unrealized_finalized_root: node - .unrealized_finalized_checkpoint - .map(|checkpoint| checkpoint.root), - unrealized_justified_epoch: node - .unrealized_justified_checkpoint - .map(|checkpoint| checkpoint.epoch), - unrealized_finalized_epoch: node - .unrealized_finalized_checkpoint - .map(|checkpoint| checkpoint.epoch), - execution_status: node.execution_status.to_string(), - best_child: node - .best_child - .and_then(|index| proto_array.nodes.get(index)) - .map(|child| child.root), - best_descendant: node - .best_descendant - .and_then(|index| proto_array.nodes.get(index)) - .map(|descendant| descendant.root), - }, - } - }) - .collect::>(); - Ok(ForkChoice { - justified_checkpoint: beacon_fork_choice.justified_checkpoint(), - finalized_checkpoint: beacon_fork_choice.finalized_checkpoint(), - fork_choice_nodes, - }) - }) - }, - ); - - /* - * node - */ - - // GET node/identity - let get_node_identity = eth_v1 - .and(warp::path("node")) - .and(warp::path("identity")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, - network_globals: Arc>, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let enr = network_globals.local_enr(); - let p2p_addresses = enr.multiaddr_p2p_tcp(); - let discovery_addresses = enr.multiaddr_p2p_udp(); - Ok(api_types::GenericResponse::from(api_types::IdentityData { - peer_id: network_globals.local_peer_id().to_base58(), - enr, - p2p_addresses, - discovery_addresses, - metadata: from_meta_data::( - &network_globals.local_metadata, - &chain.spec, - ), - })) - }) - }, - ); - - // GET node/version - let get_node_version = eth_v1 - .and(warp::path("node")) - .and(warp::path("version")) - .and(warp::path::end()) - // Bypass the `task_spawner` since this method returns a static string. - .then(|| async { - warp::reply::json(&api_types::GenericResponse::from(api_types::VersionData { - version: version_with_platform(), - })) - .into_response() - }); - - // GET node/syncing - let get_node_syncing = eth_v1 - .and(warp::path("node")) - .and(warp::path("syncing")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, - network_globals: Arc>, - chain: Arc>| { - async move { - let el_offline = if let Some(el) = &chain.execution_layer { - el.is_offline_or_erroring().await - } else { - true - }; - - task_spawner - .blocking_json_task(Priority::P0, move || { - let (head, head_execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::unhandled_error)?; - let head_slot = head.head_slot(); - let current_slot = - chain.slot_clock.now_or_genesis().ok_or_else(|| { - warp_utils::reject::custom_server_error( - "Unable to read slot clock".into(), - ) - })?; - - // Taking advantage of saturating subtraction on slot. - let sync_distance = current_slot - head_slot; - - let is_optimistic = head_execution_status.is_optimistic_or_invalid(); - - // When determining sync status, make an exception for single-node - // testnets with 0 peers. - let sync_state = network_globals.sync_state.read(); - let is_synced = sync_state.is_synced() - || (sync_state.is_stalled() - && network_globals.config.target_peers == 0); - drop(sync_state); - - let syncing_data = api_types::SyncingData { - is_syncing: !is_synced, - is_optimistic, - el_offline, - head_slot, - sync_distance, - }; - - Ok(api_types::GenericResponse::from(syncing_data)) - }) - .await - } - }, - ); + .and(warp::path("rewards")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); - // GET node/health - let get_node_health = eth_v1 - .and(warp::path("node")) - .and(warp::path("health")) + // POST beacon/rewards/attestations/{epoch} + let post_beacon_rewards_attestations = beacon_rewards_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::param::()) .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) - .and(chain_filter.clone()) + .and(warp_utils::json::json()) .then( |task_spawner: TaskSpawner, - network_globals: Arc>, - chain: Arc>| { - async move { - let el_offline = if let Some(el) = &chain.execution_layer { - el.is_offline_or_erroring().await - } else { - true - }; - - task_spawner - .blocking_response_task(Priority::P0, move || { - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::unhandled_error)?; - - let is_syncing = !network_globals.sync_state.read().is_synced(); - - if el_offline { - Err(warp_utils::reject::not_synced( - "execution layer is offline".to_string(), + chain: Arc>, + epoch: Epoch, + validators: Vec| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestation_rewards = chain + .compute_attestation_rewards(epoch, validators) + .map_err(|e| match e { + BeaconChainError::MissingBeaconState(root) => { + warp_utils::reject::custom_not_found(format!( + "missing state {root:?}", )) - } else if is_syncing || is_optimistic { - Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::PARTIAL_CONTENT, + } + BeaconChainError::NoStateForSlot(slot) => { + warp_utils::reject::custom_not_found(format!( + "missing state at slot {slot}" )) - } else { - Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::OK, + } + BeaconChainError::BeaconStateError( + BeaconStateError::UnknownValidator(validator_index), + ) => warp_utils::reject::custom_bad_request(format!( + "validator is unknown: {validator_index}" + )), + BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { + warp_utils::reject::custom_bad_request(format!( + "validator pubkey is unknown: {pubkey:?}" )) } - }) - .await - } + e => warp_utils::reject::custom_server_error(format!( + "unexpected error: {:?}", + e + )), + })?; + let execution_optimistic = + chain.is_optimistic_or_invalid_head().unwrap_or_default(); + + Ok(api_types::GenericResponse::from(attestation_rewards)) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) }, ); - // GET node/peers/{peer_id} - let get_node_peers_by_id = eth_v1 - .and(warp::path("node")) - .and(warp::path("peers")) - .and(warp::path::param::()) + // POST beacon/rewards/sync_committee/{block_id} + let post_beacon_rewards_sync_committee = beacon_rewards_path + .clone() + .and(warp::path("sync_committee")) + .and(block_id_or_err) .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) + .and(warp_utils::json::json()) .then( - |requested_peer_id: String, - task_spawner: TaskSpawner, - network_globals: Arc>| { + |task_spawner: TaskSpawner, + chain: Arc>, + block_id: BlockId, + validators: Vec| { task_spawner.blocking_json_task(Priority::P1, move || { - let peer_id = PeerId::from_bytes( - &bs58::decode(requested_peer_id.as_str()) - .into_vec() - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "invalid peer id: {}", - e - )) - })?, - ) - .map_err(|_| { - warp_utils::reject::custom_bad_request("invalid peer id.".to_string()) - })?; - - if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) { - let address = if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { - multiaddr.to_string() - } else if let Some(addr) = peer_info.listening_addresses().first() { - addr.to_string() - } else { - String::new() - }; + let (rewards, execution_optimistic, finalized) = + sync_committee_rewards::compute_sync_committee_rewards( + chain, block_id, validators, + )?; - // the eth2 API spec implies only peers we have been connected to at some point should be included. - if let Some(&dir) = peer_info.connection_direction() { - return Ok(api_types::GenericResponse::from(api_types::PeerData { - peer_id: peer_id.to_string(), - enr: peer_info.enr().map(|enr| enr.to_base64()), - last_seen_p2p_address: address, - direction: dir.into(), - state: peer_info.connection_status().clone().into(), - })); - } - } - Err(warp_utils::reject::custom_not_found( - "peer not found.".to_string(), - )) + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); - // GET node/peers - let get_node_peers = eth_v1 - .and(warp::path("node")) - .and(warp::path("peers")) + /* + * config + */ + + let config_path = eth_v1.clone().and(warp::path("config")); + + // GET config/fork_schedule + let get_config_fork_schedule = config_path + .clone() + .and(warp::path("fork_schedule")) .and(warp::path::end()) - .and(multi_key_query::()) .and(task_spawner_filter.clone()) - .and(network_globals.clone()) + .and(chain_filter.clone()) .then( - |query_res: Result, - task_spawner: TaskSpawner, - network_globals: Arc>| { + |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let query = query_res?; - let mut peers: Vec = Vec::new(); - network_globals - .peers - .read() - .peers() - .for_each(|(peer_id, peer_info)| { - let address = - if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { - multiaddr.to_string() - } else if let Some(addr) = peer_info.listening_addresses().first() { - addr.to_string() - } else { - String::new() - }; - - // the eth2 API spec implies only peers we have been connected to at some point should be included. - if let Some(&dir) = peer_info.connection_direction() { - let direction = dir.into(); - let state = peer_info.connection_status().clone().into(); - - let state_matches = query - .state - .as_ref() - .is_none_or(|states| states.contains(&state)); - let direction_matches = query - .direction - .as_ref() - .is_none_or(|directions| directions.contains(&direction)); - - if state_matches && direction_matches { - peers.push(api_types::PeerData { - peer_id: peer_id.to_string(), - enr: peer_info.enr().map(|enr| enr.to_base64()), - last_seen_p2p_address: address, - direction, - state, - }); - } - } - }); - Ok(api_types::PeersData { - meta: api_types::PeersMetaData { - count: peers.len() as u64, - }, - data: peers, - }) + let forks = ForkName::list_all() + .into_iter() + .filter_map(|fork_name| chain.spec.fork_for_name(fork_name)) + .collect::>(); + Ok(api_types::GenericResponse::from(forks)) }) }, ); - // GET node/peer_count - let get_node_peer_count = eth_v1 - .and(warp::path("node")) - .and(warp::path("peer_count")) + // GET config/spec + let get_config_spec = config_path + .clone() + .and(warp::path("spec")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and(network_globals.clone()) + .and(chain_filter.clone()) .then( - |task_spawner: TaskSpawner, - network_globals: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let mut connected: u64 = 0; - let mut connecting: u64 = 0; - let mut disconnected: u64 = 0; - let mut disconnecting: u64 = 0; - - network_globals - .peers - .read() - .peers() - .for_each(|(_, peer_info)| { - let state = - api_types::PeerState::from(peer_info.connection_status().clone()); - match state { - api_types::PeerState::Connected => connected += 1, - api_types::PeerState::Connecting => connecting += 1, - api_types::PeerState::Disconnected => disconnected += 1, - api_types::PeerState::Disconnecting => disconnecting += 1, - } - }); - - Ok(api_types::GenericResponse::from(api_types::PeerCount { - connected, - connecting, - disconnected, - disconnecting, - })) + move |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let config_and_preset = + ConfigAndPreset::from_chain_spec::(&chain.spec); + Ok(api_types::GenericResponse::from(config_and_preset)) }) }, ); - /* - * validator - */ - // GET validator/duties/proposer/{epoch} - let get_validator_duties_proposer = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("proposer")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) + // GET config/deposit_contract + let get_config_deposit_contract = config_path + .and(warp::path("deposit_contract")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - proposer_duties::proposer_duties(epoch, &chain) + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + Ok(api_types::GenericResponse::from( + api_types::DepositContractData { + address: chain.spec.deposit_contract_address, + chain_id: chain.spec.deposit_chain_id, + }, + )) }) }, ); - // GET validator/blocks/{slot} - let get_validator_blocks = any_version - .and(warp::path("validator")) - .and(warp::path("blocks")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid slot".to_string(), - )) - })) + /* + * debug + */ + + // GET debug/beacon/data_column_sidecars/{block_id} + let get_debug_data_column_sidecars = eth_v1 + .clone() + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("data_column_sidecars")) + .and(block_id_or_err) .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .and(not_while_syncing_filter.clone()) - .and(warp::query::()) + .and(multi_key_query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) .then( - |endpoint_version: EndpointVersion, - slot: Slot, - accept_header: Option, - not_synced_filter: Result<(), Rejection>, - query: api_types::ValidatorBlocksQuery, + |block_id: BlockId, + indices_res: Result, task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - debug!(?slot, "Block production request from HTTP API"); - - not_synced_filter?; + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let indices = indices_res?; + let (data_columns, fork_name, execution_optimistic, finalized) = + block_id.get_data_columns(indices, &chain)?; - if endpoint_version == V3 { - produce_block_v3(accept_header, chain, slot, query).await - } else { - produce_block_v2(accept_header, chain, slot, query).await + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(data_columns.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &data_columns, + )?; + Ok(warp::reply::json(&res).into_response()) + } } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET validator/blinded_blocks/{slot} - let get_validator_blinded_blocks = eth_v1 - .and(warp::path("validator")) - .and(warp::path("blinded_blocks")) - .and(warp::path::param::().or_else(|_| async { + // GET debug/beacon/states/{state_id} + let get_debug_beacon_states = any_version + .clone() + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( - "Invalid slot".to_string(), + "Invalid state ID".to_string(), )) })) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp::query::()) .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |slot: Slot, - not_synced_filter: Result<(), Rejection>, - query: api_types::ValidatorBlocksQuery, + |_endpoint_version: EndpointVersion, + state_id: StateId, accept_header: Option, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - not_synced_filter?; - produce_blinded_block_v2(accept_header, chain, slot, query).await + task_spawner.blocking_response_task(Priority::P1, move || match accept_header { + Some(api_types::Accept::Ssz) => { + // We can ignore the optimistic status for the "fork" since it's a + // specification constant that doesn't change across competing heads of the + // beacon chain. + let t = std::time::Instant::now(); + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let timer = metrics::start_timer(&metrics::HTTP_API_STATE_SSZ_ENCODE_TIMES); + let response_bytes = state.as_ssz_bytes(); + drop(timer); + debug!( + total_time_ms = t.elapsed().as_millis(), + target_slot = %state.slot(), + "HTTP state load" + ); + + Response::builder() + .status(200) + .body(response_bytes.into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map(|resp: warp::reply::Response| { + add_consensus_version_header(resp, fork_name) + }) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + } + _ => state_id.map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &state, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }, + ), }) }, ); - // GET validator/attestation_data?slot,committee_index - let get_validator_attestation_data = eth_v1 - .and(warp::path("validator")) - .and(warp::path("attestation_data")) + // GET debug/beacon/heads + let get_debug_beacon_heads = any_version + .clone() + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("heads")) .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |query: api_types::ValidatorAttestationDataQuery, - not_synced_filter: Result<(), Rejection>, + |endpoint_version: EndpointVersion, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - - let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; - - // allow a tolerance of one slot to account for clock skew - if query.slot > current_slot + 1 { - return Err(warp_utils::reject::custom_bad_request(format!( - "request slot {} is more than one slot past the current slot {}", - query.slot, current_slot - ))); - } - - chain - .produce_unaggregated_attestation(query.slot, query.committee_index) - .map(|attestation| attestation.data().clone()) - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::unhandled_error) + task_spawner.blocking_json_task(Priority::P1, move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| { + let execution_optimistic = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&root) + .ok() + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(api_types::ChainHeadData { + slot, + root, + execution_optimistic, + }) + }) + .collect::, warp::Rejection>>(); + Ok(api_types::GenericResponse::from(heads?)) }) }, ); - // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = any_version - .and(warp::path("validator")) - .and(warp::path("aggregate_attestation")) + // GET debug/fork_choice + let get_debug_fork_choice = eth_v1 + .clone() + .and(warp::path("debug")) + .and(warp::path("fork_choice")) .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |endpoint_version: EndpointVersion, - query: api_types::ValidatorAggregateAttestationQuery, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P0, move || { - not_synced_filter?; - crate::aggregate_attestation::get_aggregate_attestation( - query.slot, - &query.attestation_data_root, - query.committee_index, - endpoint_version, - chain, - ) + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); + + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); + + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; + + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + extra_data: ForkChoiceExtraData { + target_root: node.target_root, + justified_root: node.justified_checkpoint.root, + finalized_root: node.finalized_checkpoint.root, + unrealized_justified_root: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_finalized_root: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_justified_epoch: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + unrealized_finalized_epoch: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + execution_status: node.execution_status.to_string(), + best_child: node + .best_child + .and_then(|index| proto_array.nodes.get(index)) + .map(|child| child.root), + best_descendant: node + .best_descendant + .and_then(|index| proto_array.nodes.get(index)) + .map(|descendant| descendant.root), + }, + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: beacon_fork_choice.justified_checkpoint(), + finalized_checkpoint: beacon_fork_choice.finalized_checkpoint(), + fork_choice_nodes, + }) }) }, ); - // POST validator/duties/attester/{epoch} - let post_validator_duties_attester = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("attester")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) + /* + * node + */ + + // GET node/identity + let get_node_identity = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("identity")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) + .and(network_globals.clone()) .and(chain_filter.clone()) .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, + |task_spawner: TaskSpawner, + network_globals: Arc>, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - attester_duties::attester_duties(epoch, &indices.0, &chain) + task_spawner.blocking_json_task(Priority::P1, move || { + let enr = network_globals.local_enr(); + let p2p_addresses = enr.multiaddr_p2p_tcp(); + let discovery_addresses = enr.multiaddr_p2p_udp(); + Ok(api_types::GenericResponse::from(api_types::IdentityData { + peer_id: network_globals.local_peer_id().to_base58(), + enr: enr.to_base64(), + p2p_addresses: p2p_addresses.iter().map(|a| a.to_string()).collect(), + discovery_addresses: discovery_addresses + .iter() + .map(|a| a.to_string()) + .collect(), + metadata: utils::from_meta_data::( + &network_globals.local_metadata, + &chain.spec, + ), + })) }) }, ); - // POST validator/duties/sync/{epoch} - let post_validator_duties_sync = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("sync")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) + // GET node/version + let get_node_version = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("version")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - sync_committees::sync_committee_duties(epoch, &indices.0, &chain) - }) - }, - ); + // Bypass the `task_spawner` since this method returns a static string. + .then(|| async { + warp::reply::json(&api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + .into_response() + }); - // GET validator/sync_committee_contribution - let get_validator_sync_committee_contribution = eth_v1 - .and(warp::path("validator")) - .and(warp::path("sync_committee_contribution")) + // GET node/syncing + let get_node_syncing = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("syncing")) .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) + .and(network_globals.clone()) .and(chain_filter.clone()) .then( - |sync_committee_data: SyncContributionData, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, + |task_spawner: TaskSpawner, + network_globals: Arc>, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - chain - .get_aggregated_sync_committee_contribution(&sync_committee_data) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "unable to fetch sync contribution: {:?}", - e - )) - })? - .map(api_types::GenericResponse::from) - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "no matching sync contribution found".to_string(), - ) + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; + + task_spawner + .blocking_json_task(Priority::P0, move || { + let (head, head_execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::unhandled_error)?; + let head_slot = head.head_slot(); + let current_slot = + chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to read slot clock".into(), + ) + })?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let is_optimistic = head_execution_status.is_optimistic_or_invalid(); + + // When determining sync status, make an exception for single-node + // testnets with 0 peers. + let sync_state = network_globals.sync_state.read(); + let is_synced = sync_state.is_synced() + || (sync_state.is_stalled() + && network_globals.config.target_peers == 0); + drop(sync_state); + + let syncing_data = api_types::SyncingData { + is_syncing: !is_synced, + is_optimistic, + el_offline, + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) }) - }) + .await + } }, ); - // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = any_version - .and(warp::path("validator")) - .and(warp::path("aggregate_and_proofs")) + // GET node/health + let get_node_health = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("health")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) + .and(network_globals.clone()) .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. - |_endpoint_version: EndpointVersion, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>, - aggregates: Vec>, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - let seen_timestamp = timestamp_now(); - let mut verified_aggregates = Vec::with_capacity(aggregates.len()); - let mut messages = Vec::with_capacity(aggregates.len()); - let mut failures = Vec::new(); - - // Verify that all messages in the post are valid before processing further - for (index, aggregate) in aggregates.iter().enumerate() { - match chain.verify_aggregated_attestation_for_gossip(aggregate) { - Ok(verified_aggregate) => { - messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new( - verified_aggregate.aggregate().clone(), - ))); - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_aggregated_attestation( - seen_timestamp, - verified_aggregate.aggregate(), - verified_aggregate.indexed_attestation(), - &chain.slot_clock, - ); - - verified_aggregates.push((index, verified_aggregate)); - } - // If we already know the attestation, don't broadcast it or attempt to - // further verify it. Return success. - // - // It's reasonably likely that two different validators produce - // identical aggregates, especially if they're using the same beacon - // node. - Err(AttnError::AttestationSupersetKnown(_)) => continue, - // If we've already seen this aggregator produce an aggregate, just - // skip this one. - // - // We're likely to see this with VCs that use fallback BNs. The first - // BN might time-out *after* publishing the aggregate and then the - // second BN will indicate it's already seen the aggregate. - // - // There's no actual error for the user or the network since the - // aggregate has been successfully published by some other node. - Err(AttnError::AggregatorAlreadyKnown(_)) => continue, - Err(e) => { - error!( - error = ?e, - request_index = index, - aggregator_index = aggregate.message().aggregator_index(), - attestation_index = aggregate.message().aggregate().committee_index(), - attestation_slot = %aggregate.message().aggregate().data().slot, - "Failure verifying aggregate and proofs" - ); - failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); - } - } - } + |task_spawner: TaskSpawner, + network_globals: Arc>, + chain: Arc>| { + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; - // Publish aggregate attestations to the libp2p network - if !messages.is_empty() { - publish_network_message(&network_tx, NetworkMessage::Publish { messages })?; - } + task_spawner + .blocking_response_task(Priority::P0, move || { + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::unhandled_error)?; - // Import aggregate attestations - for (index, verified_aggregate) in verified_aggregates { - if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { - error!( - error = ?e, - request_index = index, - aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), - attestation_index = verified_aggregate.attestation().committee_index(), - attestation_slot = %verified_aggregate.attestation().data().slot, - "Failure applying verified aggregate attestation to fork choice" - ); - failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); - } - if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { - warn!( - error = ?e, - request_index = index, - "Could not add verified aggregate attestation to the inclusion pool" - ); - failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); - } - } + let is_syncing = !network_globals.sync_state.read().is_synced(); - if !failures.is_empty() { - Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), - failures, - )) - } else { - Ok(()) - } - }) + if el_offline { + Err(warp_utils::reject::not_synced( + "execution layer is offline".to_string(), + )) + } else if is_syncing || is_optimistic { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::PARTIAL_CONTENT, + )) + } else { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::OK, + )) + } + }) + .await + } }, ); - let post_validator_contribution_and_proofs = eth_v1 - .and(warp::path("validator")) - .and(warp::path("contribution_and_proofs")) + // GET node/peers/{peer_id} + let get_node_peers_by_id = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("peers")) + .and(warp::path::param::()) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) + .and(network_globals.clone()) .then( - |not_synced_filter: Result<(), Rejection>, + |requested_peer_id: String, task_spawner: TaskSpawner, - chain: Arc>, - contributions: Vec>, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - sync_committees::process_signed_contribution_and_proofs( - contributions, - network_tx, - &chain, - )?; - Ok(api_types::GenericResponse::from(())) - }) - }, - ); + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let peer_id = PeerId::from_bytes( + &bs58::decode(requested_peer_id.as_str()) + .into_vec() + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "invalid peer id: {}", + e + )) + })?, + ) + .map_err(|_| { + warp_utils::reject::custom_bad_request("invalid peer id.".to_string()) + })?; - // POST validator/beacon_committee_subscriptions - let post_validator_beacon_committee_subscriptions = eth_v1 - .and(warp::path("validator")) - .and(warp::path("beacon_committee_subscriptions")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(validator_subscription_tx_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |committee_subscriptions: Vec, - validator_subscription_tx: Sender, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions - .iter() - .map(|subscription| { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - api_types::ValidatorSubscription { - attestation_committee_index: subscription.committee_index, - slot: subscription.slot, - committee_count_at_slot: subscription.committees_at_slot, - is_aggregator: subscription.is_aggregator, - } - }) - .collect(); + if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) { + let address = if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { + multiaddr.to_string() + } else if let Some(addr) = peer_info.listening_addresses().first() { + addr.to_string() + } else { + String::new() + }; - let message = - ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - info = "the host may be overloaded or resource-constrained", - error = ?e, - "Unable to process committee subscriptions" - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down" - .to_string(), - )); + // the eth2 API spec implies only peers we have been connected to at some point should be included. + if let Some(&dir) = peer_info.connection_direction() { + return Ok(api_types::GenericResponse::from(api_types::PeerData { + peer_id: peer_id.to_string(), + enr: peer_info.enr().map(|enr| enr.to_base64()), + last_seen_p2p_address: address, + direction: dir.into(), + state: peer_info.connection_status().clone().into(), + })); + } } - Ok(()) + Err(warp_utils::reject::custom_not_found( + "peer not found.".to_string(), + )) }) }, ); - // POST validator/prepare_beacon_proposer - let post_validator_prepare_beacon_proposer = eth_v1 - .and(warp::path("validator")) - .and(warp::path("prepare_beacon_proposer")) + // GET node/peers + let get_node_peers = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("peers")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(network_tx_filter.clone()) + .and(multi_key_query::()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) + .and(network_globals.clone()) .then( - |not_synced_filter: Result<(), Rejection>, - network_tx: UnboundedSender>, + |query_res: Result, task_spawner: TaskSpawner, - chain: Arc>, - preparation_data: Vec| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - not_synced_filter?; - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)?; - - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::unhandled_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - debug!( - count = preparation_data.len(), - "Received proposer preparation data" - ); + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let query = query_res?; + let mut peers: Vec = Vec::new(); + network_globals + .peers + .read() + .peers() + .for_each(|(peer_id, peer_info)| { + let address = + if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { + multiaddr.to_string() + } else if let Some(addr) = peer_info.listening_addresses().first() { + addr.to_string() + } else { + String::new() + }; - execution_layer - .update_proposer_preparation( - current_epoch, - preparation_data.iter().map(|data| (data, &None)), - ) - .await; + // the eth2 API spec implies only peers we have been connected to at some point should be included. + if let Some(&dir) = peer_info.connection_direction() { + let direction = dir.into(); + let state = peer_info.connection_status().clone().into(); - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; + let state_matches = query + .state + .as_ref() + .is_none_or(|states| states.contains(&state)); + let direction_matches = query + .direction + .as_ref() + .is_none_or(|directions| directions.contains(&direction)); - if chain.spec.is_peer_das_scheduled() { - let (finalized_beacon_state, _, _) = - StateId(CoreStateId::Finalized).state(&chain)?; - let validators_and_balances = preparation_data - .iter() - .filter_map(|preparation| { - if let Ok(effective_balance) = finalized_beacon_state - .get_effective_balance(preparation.validator_index as usize) - { - Some((preparation.validator_index as usize, effective_balance)) - } else { - None + if state_matches && direction_matches { + peers.push(api_types::PeerData { + peer_id: peer_id.to_string(), + enr: peer_info.enr().map(|enr| enr.to_base64()), + last_seen_p2p_address: address, + direction, + state, + }); } - }) - .collect::>(); - - let current_slot = - chain.slot().map_err(warp_utils::reject::unhandled_error)?; - if let Some(cgc_change) = chain - .data_availability_checker - .custody_context() - .register_validators(validators_and_balances, current_slot, &chain.spec) - { - chain.update_data_column_custody_info(Some( - cgc_change - .effective_epoch - .start_slot(T::EthSpec::slots_per_epoch()), - )); - - network_tx.send(NetworkMessage::CustodyCountChanged { - new_custody_group_count: cgc_change.new_custody_group_count, - sampling_count: cgc_change.sampling_count, - }).unwrap_or_else(|e| { - debug!(error = %e, "Could not send message to the network service. \ - Likely shutdown") - }); - } - } - - Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + } + }); + Ok(api_types::PeersData { + meta: api_types::PeersMetaData { + count: peers.len() as u64, + }, + data: peers, + }) }) }, ); - // POST validator/register_validator - let post_validator_register_validator = eth_v1 - .and(warp::path("validator")) - .and(warp::path("register_validator")) + // GET node/peer_count + let get_node_peer_count = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("peer_count")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) + .and(network_globals.clone()) .then( |task_spawner: TaskSpawner, - chain: Arc>, - register_val_data: Vec| async { - let (tx, rx) = oneshot::channel(); - - let initial_result = task_spawner - .spawn_async_with_rejection_no_conversion(Priority::P0, async move { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)?; - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::unhandled_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - debug!( - count = register_val_data.len(), - "Received register validator request" - ); + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let mut connected: u64 = 0; + let mut connecting: u64 = 0; + let mut disconnected: u64 = 0; + let mut disconnecting: u64 = 0; - let head_snapshot = chain.head_snapshot(); - let spec = &chain.spec; + network_globals + .peers + .read() + .peers() + .for_each(|(_, peer_info)| { + let state = + api_types::PeerState::from(peer_info.connection_status().clone()); + match state { + api_types::PeerState::Connected => connected += 1, + api_types::PeerState::Connecting => connecting += 1, + api_types::PeerState::Disconnected => disconnected += 1, + api_types::PeerState::Disconnecting => disconnecting += 1, + } + }); - let (preparation_data, filtered_registration_data): ( - Vec<(ProposerPreparationData, Option)>, - Vec, - ) = register_val_data - .into_iter() - .filter_map(|register_data| { - chain - .validator_index(®ister_data.message.pubkey) - .ok() - .flatten() - .and_then(|validator_index| { - let validator = head_snapshot - .beacon_state - .get_validator(validator_index) - .ok()?; - let validator_status = ValidatorStatus::from_validator( - validator, - current_epoch, - spec.far_future_epoch, - ) - .superstatus(); - let is_active_or_pending = - matches!(validator_status, ValidatorStatus::Pending) - || matches!( - validator_status, - ValidatorStatus::Active - ); - - // Filter out validators who are not 'active' or 'pending'. - is_active_or_pending.then_some({ - ( - ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, - Some(register_data.message.gas_limit), - ), - register_data, - ) - }) - }) - }) - .unzip(); + Ok(api_types::GenericResponse::from(api_types::PeerCount { + connected, + connecting, + disconnected, + disconnecting, + })) + }) + }, + ); + /* + * validator + */ - // Update the prepare beacon proposer cache based on this request. - execution_layer - .update_proposer_preparation( - current_epoch, - preparation_data.iter().map(|(data, limit)| (data, limit)), - ) - .await; - - // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blinded block - // flow failing. - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; + // GET validator/duties/proposer/{epoch} + let get_validator_duties_proposer = get_validator_duties_proposer( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - info!( - count = filtered_registration_data.len(), - "Forwarding register validator request to connected builder" - ); + // GET validator/blocks/{slot} + let get_validator_blocks = get_validator_blocks( + any_version.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // It's a waste of a `BeaconProcessor` worker to just - // wait on a response from the builder (especially since - // they have frequent timeouts). Spawn a new task and - // send the response back to our original HTTP request - // task via a channel. - let builder_future = async move { - let arc_builder = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)? - .builder(); - let builder = arc_builder - .as_ref() - .ok_or(BeaconChainError::BuilderMissing) - .map_err(warp_utils::reject::unhandled_error)?; - builder - .post_builder_validators(&filtered_registration_data) - .await - .map(|resp| warp::reply::json(&resp).into_response()) - .map_err(|e| { - warn!( - num_registrations = filtered_registration_data.len(), - error = ?e, - "Relay error when registering validator(s)" - ); - // Forward the HTTP status code if we are able to, otherwise fall back - // to a server error. - if let eth2::Error::ServerMessage(message) = e { - if message.code == StatusCode::BAD_REQUEST.as_u16() { - return warp_utils::reject::custom_bad_request( - message.message, - ); - } else { - // According to the spec this response should only be a 400 or 500, - // so we fall back to a 500 here. - return warp_utils::reject::custom_server_error( - message.message, - ); - } - } - warp_utils::reject::custom_server_error(format!("{e:?}")) - }) - }; - tokio::task::spawn(async move { tx.send(builder_future.await) }); + // GET validator/blinded_blocks/{slot} + let get_validator_blinded_blocks = get_validator_blinded_blocks( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // Just send a generic 200 OK from this closure. We'll - // ignore the `Ok` variant and form a proper response - // from what is sent back down the channel. - Ok(warp::reply::reply().into_response()) - }) - .await; + // GET validator/attestation_data?slot,committee_index + let get_validator_attestation_data = get_validator_attestation_data( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - if initial_result.is_err() { - return convert_rejection(initial_result).await; - } + // GET validator/aggregate_attestation?attestation_data_root,slot + let get_validator_aggregate_attestation = get_validator_aggregate_attestation( + any_version.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // Await a response from the builder without blocking a - // `BeaconProcessor` worker. - convert_rejection(rx.await.unwrap_or_else(|_| { - Ok(warp::reply::with_status( - warp::reply::json(&"No response from channel"), - warp::http::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response()) - })) - .await - }, - ); - // POST validator/sync_committee_subscriptions - let post_validator_sync_committee_subscriptions = eth_v1 - .and(warp::path("validator")) - .and(warp::path("sync_committee_subscriptions")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(validator_subscription_tx_filter) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |subscriptions: Vec, - validator_subscription_tx: Sender, - task_spawner: TaskSpawner, - chain: Arc>, - | { - task_spawner.blocking_json_task(Priority::P0, move || { - for subscription in subscriptions { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - - let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { - subscriptions: vec![subscription], - }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - info = "the host may be overloaded or resource-constrained", - error = ?e, - "Unable to process sync subscriptions" - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down".to_string(), - )); - } - } + // POST validator/duties/attester/{epoch} + let post_validator_duties_attester = post_validator_duties_attester( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - Ok(()) - }) - }, - ); + // POST validator/duties/sync/{epoch} + let post_validator_duties_sync = post_validator_duties_sync( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // POST validator/liveness/{epoch} - let post_validator_liveness_epoch = eth_v1 - .and(warp::path("validator")) - .and(warp::path("liveness")) - .and(warp::path::param::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - // Ensure the request is for either the current, previous or next epoch. - let current_epoch = - chain.epoch().map_err(warp_utils::reject::unhandled_error)?; - let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); - let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + // GET validator/sync_committee_contribution + let get_validator_sync_committee_contribution = get_validator_sync_committee_contribution( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - if epoch < prev_epoch || epoch > next_epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "request epoch {} is more than one epoch from the current epoch {}", - epoch, current_epoch - ))); - } + // POST validator/aggregate_and_proofs + let post_validator_aggregate_and_proofs = post_validator_aggregate_and_proofs( + any_version.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - let liveness: Vec = indices - .0 - .iter() - .cloned() - .map(|index| { - let is_live = chain.validator_seen_at_epoch(index as usize, epoch); - api_types::StandardLivenessResponseData { index, is_live } - }) - .collect(); + let post_validator_contribution_and_proofs = post_validator_contribution_and_proofs( + eth_v1.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - Ok(api_types::GenericResponse::from(liveness)) - }) - }, + // POST validator/beacon_committee_subscriptions + let post_validator_beacon_committee_subscriptions = + post_validator_beacon_committee_subscriptions( + eth_v1.clone().clone(), + chain_filter.clone(), + validator_subscription_tx_filter.clone(), + task_spawner_filter.clone(), ); + // POST validator/prepare_beacon_proposer + let post_validator_prepare_beacon_proposer = post_validator_prepare_beacon_proposer( + eth_v1.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); + + // POST validator/register_validator + let post_validator_register_validator = post_validator_register_validator( + eth_v1.clone().clone(), + chain_filter.clone(), + task_spawner_filter.clone(), + ); + // POST validator/sync_committee_subscriptions + let post_validator_sync_committee_subscriptions = post_validator_sync_committee_subscriptions( + eth_v1.clone().clone(), + chain_filter.clone(), + validator_subscription_tx_filter.clone(), + task_spawner_filter.clone(), + ); + + // POST validator/liveness/{epoch} + let post_validator_liveness_epoch = post_validator_liveness_epoch( + eth_v1.clone().clone(), + chain_filter.clone(), + task_spawner_filter.clone(), + ); + // POST lighthouse/finalize let post_lighthouse_finalize = warp::path("lighthouse") .and(warp::path("finalize")) @@ -4259,7 +2614,10 @@ pub fn serve( ); network_globals.add_trusted_peer(enr.clone()); - publish_network_message(&network_tx, NetworkMessage::ConnectTrustedPeer(enr))?; + utils::publish_network_message( + &network_tx, + NetworkMessage::ConnectTrustedPeer(enr), + )?; Ok(()) }) @@ -4290,7 +2648,7 @@ pub fn serve( ); network_globals.remove_trusted_peer(enr.clone()); - publish_network_message( + utils::publish_network_message( &network_tx, NetworkMessage::DisconnectTrustedPeer(enr), )?; @@ -4742,6 +3100,7 @@ pub fn serve( ); let get_events = eth_v1 + .clone() .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::()) @@ -5063,70 +3422,3 @@ pub fn serve( Ok(http_server) } - -fn from_meta_data( - meta_data: &RwLock>, - spec: &ChainSpec, -) -> api_types::MetaData { - let meta_data = meta_data.read(); - let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); - - let seq_number = *meta_data.seq_number(); - let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); - let syncnets = format_hex( - &meta_data - .syncnets() - .cloned() - .unwrap_or_default() - .into_bytes(), - ); - - if spec.is_peer_das_scheduled() { - api_types::MetaData::V3(api_types::MetaDataV3 { - seq_number, - attnets, - syncnets, - custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), - }) - } else { - api_types::MetaData::V2(api_types::MetaDataV2 { - seq_number, - attnets, - syncnets, - }) - } -} - -/// Publish a message to the libp2p pubsub network. -fn publish_pubsub_message( - network_tx: &UnboundedSender>, - message: PubsubMessage, -) -> Result<(), warp::Rejection> { - publish_network_message( - network_tx, - NetworkMessage::Publish { - messages: vec![message], - }, - ) -} - -/// Publish a message to the libp2p pubsub network. -fn publish_pubsub_messages( - network_tx: &UnboundedSender>, - messages: Vec>, -) -> Result<(), warp::Rejection> { - publish_network_message(network_tx, NetworkMessage::Publish { messages }) -} - -/// Publish a message to the libp2p network. -fn publish_network_message( - network_tx: &UnboundedSender>, - message: NetworkMessage, -) -> Result<(), warp::Rejection> { - network_tx.send(message).map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "unable to publish to network channel: {}", - e - )) - }) -} diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index ca9b86990c3..86eef03218b 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -3,13 +3,14 @@ use crate::version::{ beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::beacon_response::BeaconResponse; use eth2::types::{ self as api_types, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; -use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; +use types::{EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ Rejection, hyper::{Body, Response}, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 367e09969b4..472ec0b65e4 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -9,6 +9,7 @@ use crate::{ use beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{self as api_types, ProduceBlockV3Metadata, SkipRandaoVerification}; use lighthouse_tracing::{SPAN_PRODUCE_BLOCK_V2, SPAN_PRODUCE_BLOCK_V3}; use ssz::Encode; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 9671a72da26..b54c071eb80 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -138,9 +138,10 @@ pub async fn publish_block>( "Signed block published to network via HTTP API" ); - crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())).map_err( - |_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)), - )?; + crate::utils::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| { + BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)) + })?; Ok(()) }; @@ -492,7 +493,7 @@ fn publish_blob_sidecars( blob: &GossipVerifiedBlob, ) -> Result<(), BlockError> { let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); - crate::publish_pubsub_message(sender_clone, pubsub_message) + crate::utils::publish_pubsub_message(sender_clone, pubsub_message) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } @@ -525,7 +526,7 @@ fn publish_column_sidecars( PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) }) .collect::>(); - crate::publish_pubsub_messages(sender_clone, pubsub_messages) + crate::utils::publish_pubsub_messages(sender_clone, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index edda0e60a61..b9fa24ad6a4 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -1,6 +1,6 @@ //! Handlers for sync committee endpoints. -use crate::publish_pubsub_message; +use crate::utils::publish_pubsub_message; use beacon_chain::sync_committee_verification::{ Error as SyncVerificationError, VerifiedSyncCommitteeMessage, }; diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs new file mode 100644 index 00000000000..f2b859ebe59 --- /dev/null +++ b/beacon_node/http_api/src/utils.rs @@ -0,0 +1,90 @@ +use crate::task_spawner::TaskSpawner; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::EndpointVersion; +use lighthouse_network::PubsubMessage; +use lighthouse_network::rpc::methods::MetaData; +use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use parking_lot::RwLock; +use std::sync::Arc; +use tokio::sync::mpsc::{Sender, UnboundedSender}; +use types::{ChainSpec, EthSpec, ForkName}; +use warp::Rejection; +use warp::filters::BoxedFilter; + +pub type ResponseFilter = BoxedFilter<(warp::reply::Response,)>; +pub type AnyVersionFilter = BoxedFilter<(EndpointVersion,)>; +pub type EthV1Filter = BoxedFilter<()>; +pub type ChainFilter = BoxedFilter<(Arc>,)>; +pub type NotWhileSyncingFilter = BoxedFilter<(Result<(), Rejection>,)>; +pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner<::EthSpec>,)>; +pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; +pub type NetworkTxFilter = + BoxedFilter<(UnboundedSender::EthSpec>>,)>; +pub type OptionalConsensusVersionHeaderFilter = BoxedFilter<(Option,)>; + +pub fn from_meta_data( + meta_data: &RwLock>, + spec: &ChainSpec, +) -> eth2::types::MetaData { + let meta_data = meta_data.read(); + let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); + + let seq_number = *meta_data.seq_number(); + let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); + let syncnets = format_hex( + &meta_data + .syncnets() + .cloned() + .unwrap_or_default() + .into_bytes(), + ); + + if spec.is_peer_das_scheduled() { + eth2::types::MetaData::V3(eth2::types::MetaDataV3 { + seq_number, + attnets, + syncnets, + custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), + }) + } else { + eth2::types::MetaData::V2(eth2::types::MetaDataV2 { + seq_number, + attnets, + syncnets, + }) + } +} + +/// Publish a message to the libp2p pubsub network. +pub fn publish_pubsub_message( + network_tx: &UnboundedSender>, + message: PubsubMessage, +) -> Result<(), warp::Rejection> { + publish_network_message( + network_tx, + NetworkMessage::Publish { + messages: vec![message], + }, + ) +} + +/// Publish a message to the libp2p pubsub network. +pub fn publish_pubsub_messages( + network_tx: &UnboundedSender>, + messages: Vec>, +) -> Result<(), warp::Rejection> { + publish_network_message(network_tx, NetworkMessage::Publish { messages }) +} + +/// Publish a message to the libp2p network. +pub fn publish_network_message( + network_tx: &UnboundedSender>, + message: NetworkMessage, +) -> Result<(), warp::Rejection> { + network_tx.send(message).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to publish to network channel: {}", + e + )) + }) +} diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs deleted file mode 100644 index 25b0feb99e8..00000000000 --- a/beacon_node/http_api/src/validator.rs +++ /dev/null @@ -1,22 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use types::{BeaconState, PublicKeyBytes}; - -/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator -/// index and then ensures that the validator exists in the given `state`. -pub fn pubkey_to_validator_index( - chain: &BeaconChain, - state: &BeaconState, - pubkey: &PublicKeyBytes, -) -> Result, Box> { - chain - .validator_index(pubkey) - .map_err(Box::new)? - .filter(|&index| { - state - .validators() - .get(index) - .is_some_and(|v| v.pubkey == *pubkey) - }) - .map(Result::Ok) - .transpose() -} diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs new file mode 100644 index 00000000000..8baf7c52458 --- /dev/null +++ b/beacon_node/http_api/src/validator/mod.rs @@ -0,0 +1,972 @@ +use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{ + AnyVersionFilter, ChainFilter, EthV1Filter, NetworkTxFilter, NotWhileSyncingFilter, + ResponseFilter, TaskSpawnerFilter, ValidatorSubscriptionTxFilter, publish_network_message, +}; +use crate::version::V3; +use crate::{StateId, attester_duties, proposer_duties, sync_committees}; +use beacon_chain::attestation_verification::VerifiedAttestation; +use beacon_chain::validator_monitor::timestamp_now; +use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; +use bls::PublicKeyBytes; +use eth2::StatusCode; +use eth2::types::{ + Accept, BeaconCommitteeSubscription, EndpointVersion, Failure, GenericResponse, + StandardLivenessResponseData, StateId as CoreStateId, ValidatorAggregateAttestationQuery, + ValidatorAttestationDataQuery, ValidatorBlocksQuery, ValidatorIndexData, ValidatorStatus, +}; +use lighthouse_network::PubsubMessage; +use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::{Sender, UnboundedSender}; +use tokio::sync::oneshot; +use tracing::{debug, error, info, warn}; +use types::{ + BeaconState, Epoch, EthSpec, ProposerPreparationData, SignedAggregateAndProof, + SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncContributionData, + ValidatorSubscription, +}; +use warp::{Filter, Rejection, Reply}; +use warp_utils::reject::convert_rejection; + +/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator +/// index and then ensures that the validator exists in the given `state`. +pub fn pubkey_to_validator_index( + chain: &BeaconChain, + state: &BeaconState, + pubkey: &PublicKeyBytes, +) -> Result, Box> { + chain + .validator_index(pubkey) + .map_err(Box::new)? + .filter(|&index| { + state + .validators() + .get(index) + .is_some_and(|v| v.pubkey == *pubkey) + }) + .map(Result::Ok) + .transpose() +} + +// GET validator/sync_committee_contribution +pub fn get_validator_sync_committee_contribution( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_contribution")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |sync_committee_data: SyncContributionData, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + chain + .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch sync contribution: {:?}", + e + )) + })? + .map(GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching sync contribution found".to_string(), + ) + }) + }) + }, + ) + .boxed() +} + +// POST validator/duties/sync/{epoch} +pub fn post_validator_duties_sync( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("sync")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + sync_committees::sync_committee_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + +// POST validator/duties/attester/{epoch} +pub fn post_validator_duties_attester( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("attester")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + attester_duties::attester_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + +// GET validator/aggregate_attestation?attestation_data_root,slot +pub fn get_validator_aggregate_attestation( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("aggregate_attestation")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |endpoint_version: EndpointVersion, + query: ValidatorAggregateAttestationQuery, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P0, move || { + not_synced_filter?; + crate::aggregate_attestation::get_aggregate_attestation( + query.slot, + &query.attestation_data_root, + query.committee_index, + endpoint_version, + chain, + ) + }) + }, + ) + .boxed() +} + +// GET validator/attestation_data?slot,committee_index +pub fn get_validator_attestation_data( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("attestation_data")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |query: ValidatorAttestationDataQuery, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; + + // allow a tolerance of one slot to account for clock skew + if query.slot > current_slot + 1 { + return Err(warp_utils::reject::custom_bad_request(format!( + "request slot {} is more than one slot past the current slot {}", + query.slot, current_slot + ))); + } + + chain + .produce_unaggregated_attestation(query.slot, query.committee_index) + .map(|attestation| attestation.data().clone()) + .map(GenericResponse::from) + .map_err(warp_utils::reject::unhandled_error) + }) + }, + ) + .boxed() +} + +// GET validator/blinded_blocks/{slot} +pub fn get_validator_blinded_blocks( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("blinded_blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |slot: Slot, + not_synced_filter: Result<(), Rejection>, + query: ValidatorBlocksQuery, + accept_header: Option, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + not_synced_filter?; + produce_blinded_block_v2(accept_header, chain, slot, query).await + }) + }, + ) + .boxed() +} + +// GET validator/blocks/{slot} +pub fn get_validator_blocks( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(not_while_syncing_filter) + .and(warp::query::()) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |endpoint_version: EndpointVersion, + slot: Slot, + accept_header: Option, + not_synced_filter: Result<(), Rejection>, + query: ValidatorBlocksQuery, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + debug!(?slot, "Block production request from HTTP API"); + + not_synced_filter?; + + if endpoint_version == V3 { + produce_block_v3(accept_header, chain, slot, query).await + } else { + produce_block_v2(accept_header, chain, slot, query).await + } + }) + }, + ) + .boxed() +} + +// POST validator/liveness/{epoch} +pub fn post_validator_liveness_epoch( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("liveness")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + // Ensure the request is for either the current, previous or next epoch. + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; + let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); + let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + + if epoch < prev_epoch || epoch > next_epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch from the current epoch {}", + epoch, current_epoch + ))); + } + + let liveness: Vec = indices + .0 + .iter() + .cloned() + .map(|index| { + let is_live = chain.validator_seen_at_epoch(index as usize, epoch); + StandardLivenessResponseData { index, is_live } + }) + .collect(); + + Ok(GenericResponse::from(liveness)) + }) + }, + ) + .boxed() +} + +// POST validator/sync_committee_subscriptions +pub fn post_validator_sync_committee_subscriptions( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + validator_subscription_tx_filter: ValidatorSubscriptionTxFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_subscriptions")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(validator_subscription_tx_filter) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |subscriptions: Vec, + validator_subscription_tx: Sender, + task_spawner: TaskSpawner, + chain: Arc>, + | { + task_spawner.blocking_json_task(Priority::P0, move || { + for subscription in subscriptions { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + + let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { + subscriptions: vec![subscription], + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process sync subscriptions" + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } + } + + Ok(()) + }) + }, + ).boxed() +} + +// POST validator/register_validator +pub fn post_validator_register_validator( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("register_validator")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + register_val_data: Vec| async { + let (tx, rx) = oneshot::channel(); + + let initial_result = task_spawner + .spawn_async_with_rejection_no_conversion(Priority::P0, async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + count = register_val_data.len(), + "Received register validator request" + ); + + let head_snapshot = chain.head_snapshot(); + let spec = &chain.spec; + + let (preparation_data, filtered_registration_data): ( + Vec<(ProposerPreparationData, Option)>, + Vec, + ) = register_val_data + .into_iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .and_then(|validator_index| { + let validator = head_snapshot + .beacon_state + .get_validator(validator_index) + .ok()?; + let validator_status = ValidatorStatus::from_validator( + validator, + current_epoch, + spec.far_future_epoch, + ) + .superstatus(); + let is_active_or_pending = + matches!(validator_status, ValidatorStatus::Pending) + || matches!( + validator_status, + ValidatorStatus::Active + ); + + // Filter out validators who are not 'active' or 'pending'. + is_active_or_pending.then_some({ + ( + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), + register_data, + ) + }) + }) + }) + .unzip(); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) + .await; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blinded block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + info!( + count = filtered_registration_data.len(), + "Forwarding register validator request to connected builder" + ); + + // It's a waste of a `BeaconProcessor` worker to just + // wait on a response from the builder (especially since + // they have frequent timeouts). Spawn a new task and + // send the response back to our original HTTP request + // task via a channel. + let builder_future = async move { + let arc_builder = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)? + .builder(); + let builder = arc_builder + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::unhandled_error)?; + builder + .post_builder_validators(&filtered_registration_data) + .await + .map(|resp| warp::reply::json(&resp).into_response()) + .map_err(|e| { + warn!( + num_registrations = filtered_registration_data.len(), + error = ?e, + "Relay error when registering validator(s)" + ); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request( + message.message, + ); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error( + message.message, + ); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) + }; + tokio::task::spawn(async move { tx.send(builder_future.await) }); + + // Just send a generic 200 OK from this closure. We'll + // ignore the `Ok` variant and form a proper response + // from what is sent back down the channel. + Ok(warp::reply::reply().into_response()) + }) + .await; + + if initial_result.is_err() { + return convert_rejection(initial_result).await; + } + + // Await a response from the builder without blocking a + // `BeaconProcessor` worker. + convert_rejection(rx.await.unwrap_or_else(|_| { + Ok(warp::reply::with_status( + warp::reply::json(&"No response from channel"), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response()) + })) + .await + }, + ) + .boxed() +} + +// POST validator/prepare_beacon_proposer +pub fn post_validator_prepare_beacon_proposer( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("prepare_beacon_proposer")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(network_tx_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .then( + |not_synced_filter: Result<(), Rejection>, + network_tx: UnboundedSender>, + task_spawner: TaskSpawner, + chain: Arc>, + preparation_data: Vec| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + not_synced_filter?; + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)?; + + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + count = preparation_data.len(), + "Received proposer preparation data" + ); + + execution_layer + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) + .await; + + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + if chain.spec.is_peer_das_scheduled() { + let (finalized_beacon_state, _, _) = + StateId(CoreStateId::Finalized).state(&chain)?; + let validators_and_balances = preparation_data + .iter() + .filter_map(|preparation| { + if let Ok(effective_balance) = finalized_beacon_state + .get_effective_balance(preparation.validator_index as usize) + { + Some((preparation.validator_index as usize, effective_balance)) + } else { + None + } + }) + .collect::>(); + + let current_slot = + chain.slot().map_err(warp_utils::reject::unhandled_error)?; + if let Some(cgc_change) = chain + .data_availability_checker + .custody_context() + .register_validators(validators_and_balances, current_slot, &chain.spec) + { + chain.update_data_column_custody_info(Some( + cgc_change + .effective_epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )); + + network_tx.send(NetworkMessage::CustodyCountChanged { + new_custody_group_count: cgc_change.new_custody_group_count, + sampling_count: cgc_change.sampling_count, + }).unwrap_or_else(|e| { + debug!(error = %e, "Could not send message to the network service. \ + Likely shutdown") + }); + } + } + + Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + }) + }, + ) + .boxed() +} + +// POST validator/beacon_committee_subscriptions +pub fn post_validator_beacon_committee_subscriptions( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + validator_subscription_tx_filter: ValidatorSubscriptionTxFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("beacon_committee_subscriptions")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(validator_subscription_tx_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |committee_subscriptions: Vec, + validator_subscription_tx: Sender, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions + .iter() + .map(|subscription| { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + ValidatorSubscription { + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + } + }) + .collect(); + + let message = + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process committee subscriptions" + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down" + .to_string(), + )); + } + Ok(()) + }) + }, + ) + .boxed() +} + +pub fn post_validator_contribution_and_proofs( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("contribution_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>, + contributions: Vec>, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + sync_committees::process_signed_contribution_and_proofs( + contributions, + network_tx, + &chain, + )?; + Ok(GenericResponse::from(())) + }) + }, + ) + .boxed() +} + +// POST validator/aggregate_and_proofs +pub fn post_validator_aggregate_and_proofs( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("aggregate_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>, + aggregates: Vec>, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + let seen_timestamp = timestamp_now(); + let mut verified_aggregates = Vec::with_capacity(aggregates.len()); + let mut messages = Vec::with_capacity(aggregates.len()); + let mut failures = Vec::new(); + + // Verify that all messages in the post are valid before processing further + for (index, aggregate) in aggregates.iter().enumerate() { + match chain.verify_aggregated_attestation_for_gossip(aggregate) { + Ok(verified_aggregate) => { + messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new( + verified_aggregate.aggregate().clone(), + ))); + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_aggregated_attestation( + seen_timestamp, + verified_aggregate.aggregate(), + verified_aggregate.indexed_attestation(), + &chain.slot_clock, + ); + + verified_aggregates.push((index, verified_aggregate)); + } + // If we already know the attestation, don't broadcast it or attempt to + // further verify it. Return success. + // + // It's reasonably likely that two different validators produce + // identical aggregates, especially if they're using the same beacon + // node. + Err(AttestationError::AttestationSupersetKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(AttestationError::AggregatorAlreadyKnown(_)) => continue, + Err(e) => { + error!( + error = ?e, + request_index = index, + aggregator_index = aggregate.message().aggregator_index(), + attestation_index = aggregate.message().aggregate().committee_index(), + attestation_slot = %aggregate.message().aggregate().data().slot, + "Failure verifying aggregate and proofs" + ); + failures.push(Failure::new(index, format!("Verification: {:?}", e))); + } + } + } + + // Publish aggregate attestations to the libp2p network + if !messages.is_empty() { + publish_network_message(&network_tx, NetworkMessage::Publish { messages })?; + } + + // Import aggregate attestations + for (index, verified_aggregate) in verified_aggregates { + if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { + error!( + error = ?e, + request_index = index, + aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), + attestation_index = verified_aggregate.attestation().committee_index(), + attestation_slot = %verified_aggregate.attestation().data().slot, + "Failure applying verified aggregate attestation to fork choice" + ); + failures.push(Failure::new(index, format!("Fork choice: {:?}", e))); + } + if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { + warn!( + error = ?e, + request_index = index, + "Could not add verified aggregate attestation to the inclusion pool" + ); + failures.push(Failure::new(index, format!("Op pool: {:?}", e))); + } + } + + if !failures.is_empty() { + Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), + failures, + )) + } else { + Ok(()) + } + }) + }, + ).boxed() +} + +// GET validator/duties/proposer/{epoch} +pub fn get_validator_duties_proposer( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("proposer")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + proposer_duties::proposer_duties(epoch, &chain) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 871a10e7d4a..371064c886b 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,16 +1,14 @@ use crate::api_types::EndpointVersion; +use eth2::beacon_response::{ + BeaconResponse, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; use eth2::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, SSZ_CONTENT_TYPE_HEADER, }; use serde::Serialize; -use types::{ - BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, - UnversionedResponse, - beacon_response::{ - ExecutionOptimisticFinalizedBeaconResponse, ExecutionOptimisticFinalizedMetadata, - }, -}; +use types::{ForkName, InconsistentFork, Uint256}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 33f462fa5e2..357b78cf41c 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -6,13 +6,12 @@ use beacon_chain::{ }; use eth2::reqwest::{Response, StatusCode}; use eth2::types::{BroadcastValidation, PublishBlockRequest}; +use fixed_bytes::FixedBytesExtended; use http_api::test_utils::InteractiveTester; use http_api::{Config, ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block}; use std::collections::HashSet; use std::sync::Arc; -use types::{ - ColumnIndex, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, -}; +use types::{ColumnIndex, Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 50cf866b6a8..b96c8bd1122 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -4,13 +4,15 @@ use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, }; +use bls::PublicKey; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use execution_layer::test_utils::generate_genesis_header; +use fixed_bytes::FixedBytesExtended; use genesis::{InteropGenesisBuilder, bls_withdrawal_credentials}; use http_api::test_utils::*; use std::collections::HashSet; use types::{ - Address, ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, MinimalEthSpec, Slot, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, }; @@ -392,7 +394,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { fn withdrawal_credentials_fn<'a>( index: usize, - _: &'a types::PublicKey, + _: &'a PublicKey, spec: &'a ChainSpec, ) -> Hash256 { // It is a bit inefficient to regenerate the whole keypair here, but this is a workaround. diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 83cb70a7a3a..0119a7645c2 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -11,6 +11,7 @@ use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueM use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use fixed_bytes::FixedBytesExtended; use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; @@ -21,8 +22,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, FixedBytesExtended, ForkName, - Hash256, MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, Uint256, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, + MinimalEthSpec, ProposerPreparationData, Slot, Uint256, }; type E = MainnetEthSpec; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 8d99e696cf7..f8eba0ee2b7 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -6,6 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, }, }; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; use eth2::{ BeaconNodeHttpClient, Error, Error::ServerMessage, @@ -21,6 +22,7 @@ use execution_layer::test_utils::{ DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, MockBuilder, Operation, mock_builder_extra_data, mock_el_extra_data, }; +use fixed_bytes::FixedBytesExtended; use futures::FutureExt; use futures::stream::{Stream, StreamExt}; use http_api::{ @@ -34,6 +36,7 @@ use operation_pool::attestation_storage::CheckpointKey; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use ssz::BitList; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use state_processing::state_advance::partial_state_advance; @@ -43,9 +46,8 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, - MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, Slot, - attestation::AttestationBase, + Domain, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, RelativeEpoch, SelectionProof, + SignedRoot, SingleAttestation, Slot, attestation::AttestationBase, }; type E = MainnetEthSpec; @@ -2853,9 +2855,19 @@ impl ApiTester { let expected = IdentityData { peer_id: self.local_enr.peer_id().to_string(), - enr: self.local_enr.clone(), - p2p_addresses: self.local_enr.multiaddr_p2p_tcp(), - discovery_addresses: self.local_enr.multiaddr_p2p_udp(), + enr: self.local_enr.to_base64(), + p2p_addresses: self + .local_enr + .multiaddr_p2p_tcp() + .iter() + .map(|a| a.to_string()) + .collect(), + discovery_addresses: self + .local_enr + .multiaddr_p2p_udp() + .iter() + .map(|a| a.to_string()) + .collect(), metadata: MetaData::V2(MetaDataV2 { seq_number: 0, attnets: "0x0000000000000000".to_string(), @@ -2884,7 +2896,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id) + .get_node_peers_by_id(&self.external_peer_id.to_string()) .await .unwrap() .data; diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index a6dd276c197..efb6f27dc52 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -11,15 +11,17 @@ libp2p-websocket = [] [dependencies] alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } +bls = { workspace = true } bytes = { workspace = true } delay_map = { workspace = true } directory = { workspace = true } dirs = { workspace = true } discv5 = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } gossipsub = { workspace = true } @@ -49,6 +51,7 @@ tokio = { workspace = true } tokio-util = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } unsigned-varint = { version = "0.8", features = ["codec"] } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 018bf580504..df6e0740bb5 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1241,7 +1241,8 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV3}; use libp2p::identity::secp256k1; - use types::{BitVector, MinimalEthSpec, SubnetId}; + use ssz_types::BitVector; + use types::{MinimalEthSpec, SubnetId}; type E = MinimalEthSpec; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index d8dde408846..1b280d54035 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -3032,7 +3032,8 @@ mod tests { use proptest::prelude::*; use std::collections::HashSet; use tokio::runtime::Runtime; - use types::{DataColumnSubnetId, Unsigned}; + use typenum::Unsigned; + use types::DataColumnSubnetId; use types::{EthSpec, MainnetEthSpec as E}; #[derive(Clone, Debug)] diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 34c42fdd041..8abec366fac 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -8,7 +8,7 @@ use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; use ssz::{Decode, Encode}; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; @@ -18,7 +18,7 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ExecutionProof, ForkContext, ForkName, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, SignedBeaconBlockGloas, @@ -925,11 +925,13 @@ mod tests { use super::*; use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; + use bls::Signature; + use fixed_bytes::FixedBytesExtended; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, FixedBytesExtended, - FullPayload, KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, - blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, FullPayload, + KzgCommitment, KzgProof, SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, + data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index b297ce8f08f..98cc587201b 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,7 +5,7 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{VariableList, typenum::U256}; +use ssz_types::{RuntimeVariableList, VariableList, typenum::U256}; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -17,7 +17,7 @@ use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ExecutionProof, ExecutionProofId, ForkContext, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 0428f8787a3..dfa44976390 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -1,5 +1,6 @@ use super::methods::*; use crate::rpc::codec::SSZSnappyInboundCodec; +use bls::Signature; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; @@ -21,7 +22,7 @@ use types::{ LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, - Signature, SignedBeaconBlock, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 3f57406fc78..eea8782b2d5 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -3,7 +3,8 @@ mod pubsub; mod subnet; mod topics; -use types::{BitVector, EthSpec}; +use ssz_types::BitVector; +use types::EthSpec; pub type EnrAttestationBitfield = BitVector<::SubnetBitfieldLength>; pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSubnetCount>; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index cdb572ea7bf..a3524ec5763 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,7 +2,8 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; +use typenum::Unsigned; +use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 59f357454c4..3f53fa9c314 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,4 +1,5 @@ #![cfg(test)] +use fixed_bytes::FixedBytesExtended; use lighthouse_network::Enr; use lighthouse_network::Multiaddr; use lighthouse_network::service::Network as LibP2PService; @@ -9,10 +10,7 @@ use std::sync::Weak; use tokio::runtime::Runtime; use tracing::{Instrument, debug, error, info_span}; use tracing_subscriber::EnvFilter; -use types::{ - ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, - MinimalEthSpec, -}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec}; type E = MinimalEthSpec; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index cc0a893a3e9..2327184eeea 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -3,11 +3,13 @@ use crate::common; use crate::common::spec_with_all_forks_enabled; use crate::common::{Protocol, build_tracing_subscriber}; +use bls::Signature; +use fixed_bytes::FixedBytesExtended; use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; @@ -16,9 +18,8 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, FixedBytesExtended, ForkName, - Hash256, KzgCommitment, KzgProof, MinimalEthSpec, RuntimeVariableList, Signature, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkName, Hash256, + KzgCommitment, KzgProof, MinimalEthSpec, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index b60c5e6dbff..bf261965760 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -22,6 +22,7 @@ delay_map = { workspace = true } educe = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -45,6 +46,7 @@ tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index d83059ad278..841a8679cfd 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -33,6 +33,7 @@ use lighthouse_network::{ }; use matches::assert_matches; use slot_clock::SlotClock; +use ssz_types::RuntimeVariableList; use std::collections::HashSet; use std::iter::Iterator; use std::sync::Arc; @@ -42,8 +43,8 @@ use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - RuntimeVariableList, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, - SingleAttestation, Slot, SubnetId, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SingleAttestation, Slot, + SubnetId, }; type E = MainnetEthSpec; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a416f5cb123..0869b442aec 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -37,8 +37,9 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use tracing::{debug, error, info, trace, warn}; +use typenum::Unsigned; use types::{ - EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, Unsigned, + EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ebf5c1829e5..c571a40485c 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use fixed_bytes::FixedBytesExtended; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::{StatusMessage, methods::StatusMessageV2}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 551a0261f2c..5deea1dd94e 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -118,7 +118,8 @@ pub(crate) fn find_oldest_fork_ancestor( #[cfg(test)] mod tests { use super::{Node, compute_parent_chains, find_oldest_fork_ancestor}; - use types::{FixedBytesExtended, Hash256}; + use fixed_bytes::FixedBytesExtended; + use types::Hash256; fn h(n: u64) -> Hash256 { Hash256::from_low_u64_be(n) diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 01929cbf906..ed9a11a03de 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -7,11 +7,12 @@ use lighthouse_network::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, }, }; +use ssz_types::RuntimeVariableList; use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, RuntimeVariableList, SignedBeaconBlock, + Hash256, SignedBeaconBlock, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 0fdc505ab98..a9c3eb231c9 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -2002,8 +2002,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; + use ssz_types::RuntimeVariableList; use std::collections::VecDeque; - use types::RuntimeVariableList; struct DenebTester { rig: TestRig, diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index eeddb53c23e..6fab7a752a4 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -9,9 +9,11 @@ portable = ["beacon_chain/portable"] [dependencies] bitvec = { workspace = true } +bls = { workspace = true } educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } itertools = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } @@ -20,6 +22,8 @@ rayon = { workspace = true } serde = { workspace = true } state_processing = { workspace = true } store = { workspace = true } +superstruct = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index f28d8f278a0..897a7e5eccc 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,12 +1,13 @@ use crate::attestation_storage::{CompactAttestationRef, CompactIndexedAttestation}; use crate::max_cover::MaxCover; use crate::reward_cache::RewardCache; +use ssz::BitList; use state_processing::common::{ attesting_indices_base::get_attesting_indices, base, get_attestation_participation_flag_indices, }; use std::collections::HashMap; use types::{ - Attestation, BeaconState, BitList, ChainSpec, EthSpec, + Attestation, BeaconState, ChainSpec, EthSpec, beacon_state::BeaconStateBase, consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, }; diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 4f1b8b81fe4..9094c9cd4d4 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -1,11 +1,13 @@ use crate::AttestationStats; +use bls::AggregateSignature; use itertools::Itertools; +use ssz::{BitList, BitVector}; use std::collections::{BTreeMap, HashMap, HashSet}; +use superstruct::superstruct; +use typenum::Unsigned; use types::{ - AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, Checkpoint, - Epoch, EthSpec, Hash256, Slot, Unsigned, + Attestation, AttestationData, BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot, attestation::{AttestationBase, AttestationElectra}, - superstruct, }; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e92d381bacc..00361450a5b 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -35,12 +35,12 @@ use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::marker::PhantomData; use std::ptr; +use typenum::Unsigned; use types::{ AbstractExecPayload, Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, sync_aggregate::Error as SyncAggregateError, - typenum::Unsigned, }; type SyncContributions = RwLock>>>; @@ -793,6 +793,8 @@ mod release_tests { use beacon_chain::test_utils::{ BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, test_spec, }; + use bls::Keypair; + use fixed_bytes::FixedBytesExtended; use maplit::hashset; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::{VerifyOperation, common::get_attesting_indices_from_state}; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ee45c8dd053..241b5fec53c 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -11,6 +11,7 @@ use state_processing::SigVerifiedOp; use std::collections::HashSet; use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; +use superstruct::superstruct; use types::attestation::AttestationOnDisk; use types::*; diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs index adedcb5e39e..1e3fc4cf2dc 100644 --- a/beacon_node/operation_pool/src/reward_cache.rs +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -1,8 +1,7 @@ use crate::OpPoolError; use bitvec::vec::BitVec; -use types::{ - BeaconState, BeaconStateError, Epoch, EthSpec, FixedBytesExtended, Hash256, ParticipationFlags, -}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, ParticipationFlags}; #[derive(Debug, PartialEq, Eq, Clone)] struct Initialization { diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 70dd9a12edc..c3b34247b0e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -6,6 +6,7 @@ use beacon_chain::chain_config::{ }; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::graffiti_calculator::GraffitiOrigin; +use bls::PublicKeyBytes; use clap::{ArgMatches, Id, parser::ValueSource}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use clap_utils::{parse_flag, parse_required}; diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 61a8474a731..50028fe73ff 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -15,11 +15,13 @@ db-key = "0.0.5" directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } itertools = { workspace = true } leveldb = { version = "0.8.6", optional = true, default-features = false } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } parking_lot = { workspace = true } redb = { version = "2.1.3", optional = true } safe_arith = { workspace = true } @@ -31,6 +33,7 @@ strum = { workspace = true } superstruct = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index ee043c14f4e..9c8114e0c14 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -16,7 +16,9 @@ //! of elements. To find the chunk index of a vector index: `cindex = vindex / chunk_size`. use self::UpdatePattern::*; use crate::*; +use milhouse::{List, Vector}; use ssz::{Decode, Encode}; +use typenum::Unsigned; use types::historical_summary::HistoricalSummary; /// Description of how a `BeaconState` field is updated during state processing. @@ -784,6 +786,7 @@ impl From for ChunkError { #[cfg(test)] mod test { use super::*; + use fixed_bytes::FixedBytesExtended; use types::MainnetEthSpec as TestSpec; use types::*; diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index c0f15f2417b..05aa016ec10 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,5 +1,4 @@ use crate::hdiff::HierarchyConfig; -use crate::superstruct; use crate::{DBColumn, Error, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -7,6 +6,7 @@ use ssz_derive::{Decode, Encode}; use std::io::{Read, Write}; use std::num::NonZeroUsize; use strum::{Display, EnumString, EnumVariantNames}; +use superstruct::superstruct; use types::EthSpec; use types::non_zero_usize::new_non_zero_usize; use zstd::{Decoder, Encoder}; diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 8fdd5812eab..6b8c6156315 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -3,6 +3,7 @@ use crate::hot_cold_store::{BytesKey, HotColdDBError}; use crate::{ ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, get_key_for_col, metrics, }; +use fixed_bytes::FixedBytesExtended; use leveldb::{ compaction::Compaction, database::{ @@ -16,7 +17,7 @@ use leveldb::{ use std::collections::HashSet; use std::marker::PhantomData; use std::path::Path; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use types::{EthSpec, Hash256}; use super::interface::WriteOptions; diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index f62647ae545..6da99b7bd63 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -6,7 +6,7 @@ use crate::{DBColumn, hdiff}; use leveldb::error::Error as LevelDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot, milhouse}; +use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3e20aab9bf0..323c87a9142 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -2,6 +2,7 @@ use crate::{DBColumn, StoreConfig, StoreItem, metrics}; use bls::PublicKeyBytes; use itertools::Itertools; +use milhouse::List; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -11,7 +12,7 @@ use std::str::FromStr; use std::sync::LazyLock; use superstruct::superstruct; use types::historical_summary::HistoricalSummary; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, List, Slot, Validator}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator}; static EMPTY_PUBKEY: LazyLock = LazyLock::new(PublicKeyBytes::empty); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8f5eead8c20..c4137191744 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -18,6 +18,7 @@ use crate::{ metrics::{self, COLD_METRIC, HOT_METRIC}, parse_data_column_key, }; +use fixed_bytes::FixedBytesExtended; use itertools::{Itertools, process_results}; use lru::LruCache; use parking_lot::{Mutex, RwLock}; @@ -38,6 +39,7 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, instrument, warn}; +use typenum::Unsigned; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 88d509731c8..e2b666e5973 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -2,9 +2,9 @@ use crate::errors::HandleUnavailable; use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; +use typenum::Unsigned; use types::{ BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, SignedBeaconBlock, Slot, - typenum::Unsigned, }; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. @@ -387,8 +387,8 @@ mod test { use crate::{MemoryStore, StoreConfig as Config}; use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::MainnetEthSpec; + use fixed_bytes::FixedBytesExtended; use std::sync::Arc; - use types::FixedBytesExtended; fn get_state() -> BeaconState { let harness = BeaconChainHarness::builder(E::default()) diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 13b0dfab9f7..8ee37169aca 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -3,11 +3,12 @@ use crate::chunked_vector::{ load_variable_list_from_db, load_vector_from_db, }; use crate::{DBColumn, Error, KeyValueStore, KeyValueStoreOp}; -use ssz::{Decode, DecodeError, Encode}; +use milhouse::{List, Vector}; +use ssz::{BitVector, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; +use superstruct::superstruct; use types::historical_summary::HistoricalSummary; -use types::superstruct; use types::*; /// DEPRECATED Lightweight variant of the `BeaconState` that is stored in the database. diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 00c74a13038..d0a3e487c43 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +bls = { workspace = true } eth2_keystore = { workspace = true } eth2_wallet = { workspace = true } filesystem = { workspace = true } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 596d50de420..bffdfcc38bd 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -4,6 +4,7 @@ //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; +use bls::PublicKey; use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -12,7 +13,7 @@ use std::fs::{self, File, create_dir_all}; use std::io; use std::path::{Path, PathBuf}; use tracing::error; -use types::{Address, PublicKey, graffiti::GraffitiString}; +use types::{Address, graffiti::GraffitiString}; use validator_dir::VOTING_KEYSTORE_FILE; use zeroize::Zeroizing; diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index dfaad43719d..76c18ef2429 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,9 +7,10 @@ edition = { workspace = true } build = "build.rs" [dependencies] -alloy-dyn-abi = "1.4" -alloy-json-abi = "1.4" +alloy-dyn-abi = { workspace = true } +alloy-json-abi = { workspace = true } alloy-primitives = { workspace = true } +bls = { workspace = true } ethereum_ssz = { workspace = true } serde_json = { workspace = true } tree_hash = { workspace = true } diff --git a/common/deposit_contract/build.rs b/common/deposit_contract/build.rs index cae1d480c81..2061d13c243 100644 --- a/common/deposit_contract/build.rs +++ b/common/deposit_contract/build.rs @@ -153,14 +153,13 @@ fn verify_checksum(bytes: &[u8], expected_checksum: &str) { /// Returns the directory that will be used to store the deposit contract ABI. fn abi_dir() -> PathBuf { - let base = env::var("CARGO_MANIFEST_DIR") - .expect("should know manifest dir") + let base = env::var("OUT_DIR") + .expect("should know out dir") .parse::() - .expect("should parse manifest dir as path") - .join("contracts"); + .expect("should parse out dir as path"); std::fs::create_dir_all(base.clone()) - .expect("should be able to create abi directory in manifest"); + .expect("should be able to create abi directory in out dir"); base } diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 12c3bdaa894..6200a4ca158 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -1,9 +1,10 @@ use alloy_dyn_abi::{DynSolValue, JsonAbiExt}; use alloy_json_abi::JsonAbi; use alloy_primitives::FixedBytes; +use bls::{PublicKeyBytes, SignatureBytes}; use ssz::{Decode, DecodeError as SszDecodeError, Encode}; use tree_hash::TreeHash; -use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes}; +use types::{DepositData, Hash256}; #[derive(Debug)] pub enum Error { @@ -44,15 +45,25 @@ impl From for Error { pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000; pub const DEPOSIT_GAS: usize = 400_000; -pub const ABI: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.json"); -pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.bytecode"); +pub const ABI: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_validator_registration.json" +)); +pub const BYTECODE: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_validator_registration.bytecode" +)); pub const DEPOSIT_DATA_LEN: usize = 420; // lol pub mod testnet { - pub const ABI: &[u8] = - include_bytes!("../contracts/v0.12.1_testnet_validator_registration.json"); - pub const BYTECODE: &[u8] = - include_bytes!("../contracts/v0.12.1_testnet_validator_registration.bytecode"); + pub const ABI: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_testnet_validator_registration.json" + )); + pub const BYTECODE: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_testnet_validator_registration.bytecode" + )); } pub fn encode_eth1_tx_data(deposit_data: &DepositData) -> Result, Error> { @@ -116,10 +127,8 @@ pub fn decode_eth1_tx_data(bytes: &[u8], amount: u64) -> Result<(DepositData, Ha #[cfg(test)] mod tests { use super::*; - use types::{ - ChainSpec, EthSpec, Keypair, MinimalEthSpec, Signature, - test_utils::generate_deterministic_keypair, - }; + use bls::{Keypair, Signature}; + use types::{ChainSpec, EthSpec, MinimalEthSpec, test_utils::generate_deterministic_keypair}; type E = MinimalEthSpec; diff --git a/common/eip_3076/Cargo.toml b/common/eip_3076/Cargo.toml index 851ef26238a..058e1fd1a0a 100644 --- a/common/eip_3076/Cargo.toml +++ b/common/eip_3076/Cargo.toml @@ -11,7 +11,9 @@ json = ["dep:serde_json"] [dependencies] arbitrary = { workspace = true, features = ["derive"], optional = true } +bls = { workspace = true } ethereum_serde_utils = { workspace = true } +fixed_bytes = { workspace = true } serde = { workspace = true } serde_json = { workspace = true, optional = true } types = { workspace = true } diff --git a/common/eip_3076/src/lib.rs b/common/eip_3076/src/lib.rs index 2d47a77de40..cdd05d7b1ed 100644 --- a/common/eip_3076/src/lib.rs +++ b/common/eip_3076/src/lib.rs @@ -1,9 +1,10 @@ +use bls::PublicKeyBytes; use serde::{Deserialize, Serialize}; use std::cmp::max; use std::collections::{HashMap, HashSet}; #[cfg(feature = "json")] use std::io; -use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, Hash256, Slot}; #[derive(Debug)] pub enum Error { @@ -170,9 +171,9 @@ impl Interchange { #[cfg(test)] mod tests { use super::*; + use fixed_bytes::FixedBytesExtended; use std::fs::File; use tempfile::tempdir; - use types::FixedBytesExtended; fn get_interchange() -> Interchange { Interchange { diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 7a75bdc80a1..da8aba5ded9 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -5,35 +5,35 @@ authors = ["Paul Hauner "] edition = { workspace = true } [features] -default = ["lighthouse"] -lighthouse = [] +default = [] +lighthouse = ["proto_array", "eth2_keystore", "eip_3076", "zeroize"] +events = ["reqwest-eventsource", "futures", "futures-util"] [dependencies] +bls = { workspace = true } +context_deserialize = { workspace = true } educe = { workspace = true } -eip_3076 = { workspace = true } -either = { workspace = true } -enr = { version = "0.13.0", features = ["ed25519"] } -eth2_keystore = { workspace = true } +eip_3076 = { workspace = true, optional = true } +eth2_keystore = { workspace = true, optional = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures = { workspace = true } -futures-util = "0.3.8" -libp2p-identity = { version = "0.2", features = ["peerid"] } +futures = { workspace = true, optional = true } +futures-util = { version = "0.3.8", optional = true } mediatype = "0.19.13" -multiaddr = "0.18.2" pretty_reqwest_error = { workspace = true } -proto_array = { workspace = true } -rand = { workspace = true } +proto_array = { workspace = true, optional = true } reqwest = { workspace = true } -reqwest-eventsource = "0.6.0" +reqwest-eventsource = { version = "0.6.0", optional = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } -test_random_derive = { path = "../../common/test_random_derive" } +superstruct = { workspace = true } types = { workspace = true } -zeroize = { workspace = true } +zeroize = { workspace = true, optional = true } [dev-dependencies] +rand = { workspace = true } +test_random_derive = { path = "../../common/test_random_derive" } tokio = { workspace = true } diff --git a/consensus/types/src/beacon_response.rs b/common/eth2/src/beacon_response.rs similarity index 97% rename from consensus/types/src/beacon_response.rs rename to common/eth2/src/beacon_response.rs index fc59fc94329..d58734997ce 100644 --- a/consensus/types/src/beacon_response.rs +++ b/common/eth2/src/beacon_response.rs @@ -1,12 +1,8 @@ -use crate::{ContextDeserialize, ForkName}; +use context_deserialize::ContextDeserialize; use serde::de::DeserializeOwned; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; - -pub trait ForkVersionDecode: Sized { - /// SSZ decode with explicit fork variant. - fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; -} +use types::ForkName; /// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than /// version. If you *do* care about adding other fields you can mix in any type that implements diff --git a/common/eth2/src/error.rs b/common/eth2/src/error.rs index c1bacb4510b..1f21220b798 100644 --- a/common/eth2/src/error.rs +++ b/common/eth2/src/error.rs @@ -14,6 +14,7 @@ use std::{fmt, path::PathBuf}; pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), + #[cfg(feature = "events")] /// The `reqwest_eventsource` client raised an error. SseClient(Box), /// The server returned an error message where the body was able to be parsed. @@ -91,6 +92,7 @@ impl Error { pub fn status(&self) -> Option { match self { Error::HttpClient(error) => error.inner().status(), + #[cfg(feature = "events")] Error::SseClient(error) => { if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { Some(*status) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index bcd979daca6..820d817d9d8 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -7,6 +7,7 @@ //! Eventually it would be ideal to publish this crate on crates.io, however we have some local //! dependencies preventing this presently. +pub mod beacon_response; pub mod error; #[cfg(feature = "lighthouse")] pub mod lighthouse; @@ -15,22 +16,31 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; +pub use beacon_response::{ + BeaconResponse, EmptyMetadata, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; + pub use self::error::{Error, ok_or_error, success_or_error}; +pub use reqwest; +pub use reqwest::{StatusCode, Url}; +pub use sensitive_url::SensitiveUrl; + use self::mixin::{RequestAccept, ResponseOptional}; use self::types::*; -use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; +use bls::SignatureBytes; +use context_deserialize::ContextDeserialize; use educe::Educe; +#[cfg(feature = "events")] use futures::Stream; +#[cfg(feature = "events")] use futures_util::StreamExt; -use libp2p_identity::PeerId; -pub use reqwest; use reqwest::{ Body, IntoUrl, RequestBuilder, Response, header::{HeaderMap, HeaderValue}, }; -pub use reqwest::{StatusCode, Url}; +#[cfg(feature = "events")] use reqwest_eventsource::{Event, EventSource}; -pub use sensitive_url::SensitiveUrl; use serde::{Serialize, de::DeserializeOwned}; use ssz::Encode; use std::fmt; @@ -1973,7 +1983,7 @@ impl BeaconNodeHttpClient { /// `GET node/peers/{peer_id}` pub async fn get_node_peers_by_id( &self, - peer_id: PeerId, + peer_id: &str, ) -> Result, Error> { let mut path = self.eth_path(V1)?; @@ -1981,7 +1991,7 @@ impl BeaconNodeHttpClient { .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("node") .push("peers") - .push(&peer_id.to_string()); + .push(peer_id); self.get(path).await } @@ -2756,6 +2766,7 @@ impl BeaconNodeHttpClient { } /// `GET events?topics` + #[cfg(feature = "events")] pub async fn get_events( &self, topic: &[EventTopic], diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 8c9d3397a8c..3c850fcb052 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,5 +1,6 @@ use super::types::*; use crate::{Error, success_or_error}; +use bls::PublicKeyBytes; use reqwest::{ IntoUrl, header::{HeaderMap, HeaderValue}, diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 0290bdd0b79..c54252b9e33 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,6 +1,7 @@ +use bls::PublicKeyBytes; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use types::{Address, Graffiti, PublicKeyBytes}; +use types::{Address, Graffiti}; use zeroize::Zeroizing; pub use eip_3076::Interchange; diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 4407e30e436..07f8421dc5c 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -1,8 +1,8 @@ pub use crate::lighthouse::Health; pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; +use bls::{PublicKey, PublicKeyBytes}; use eth2_keystore::Keystore; -use graffiti::GraffitiString; use serde::{Deserialize, Serialize}; use std::path::PathBuf; pub use types::*; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 6aad00301a6..aace8f936c9 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1,13 +1,15 @@ //! This module exposes a superset of the `types` crate. It adds additional types that are only //! required for the HTTP API. +pub use types::*; + use crate::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, Error as ServerError, }; -use enr::{CombinedKey, Enr}; +use bls::{PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; use mediatype::{MediaType, MediaTypeList, names}; -use multiaddr::Multiaddr; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; use serde_utils::quoted_u64::Quoted; @@ -18,10 +20,18 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use superstruct::superstruct; + +#[cfg(test)] use test_random_derive::TestRandom; -use types::beacon_block_body::KzgCommitments; +#[cfg(test)] use types::test_utils::TestRandom; -pub use types::*; + +// TODO(mac): Temporary module and re-export hack to expose old `consensus/types` via `eth2/types`. +pub use crate::beacon_response::*; +pub mod beacon_response { + pub use crate::beacon_response::*; +} #[cfg(feature = "lighthouse")] use crate::lighthouse::BlockReward; @@ -552,9 +562,9 @@ pub struct ChainHeadData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct IdentityData { pub peer_id: String, - pub enr: Enr, - pub p2p_addresses: Vec, - pub discovery_addresses: Vec, + pub enr: String, + pub p2p_addresses: Vec, + pub discovery_addresses: Vec, pub metadata: MetaData, } @@ -2203,7 +2213,8 @@ pub enum ContentType { Ssz, } -#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[cfg_attr(test, derive(TestRandom))] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { pub commitments: KzgCommitments, diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index ec5b0cc1d71..416ffb1975a 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -10,6 +10,7 @@ build = "build.rs" bytes = { workspace = true } discv5 = { workspace = true } eth2_config = { workspace = true } +fixed_bytes = { workspace = true } kzg = { workspace = true } pretty_reqwest_error = { workspace = true } reqwest = { workspace = true } diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 12de21239a0..16ee45e524e 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -464,9 +464,10 @@ fn parse_state_download_url(url: &str) -> Result { #[cfg(test)] mod tests { use super::*; + use fixed_bytes::FixedBytesExtended; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Eth1Data, FixedBytesExtended, GnosisEthSpec, MainnetEthSpec}; + use types::{Eth1Data, GnosisEthSpec, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/common/health_metrics/Cargo.toml b/common/health_metrics/Cargo.toml index 20a8c6e4e48..816d4ec68cc 100644 --- a/common/health_metrics/Cargo.toml +++ b/common/health_metrics/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } metrics = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 9e2c36e2c76..e00b1f027b6 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } health_metrics = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index bae36789bb5..ab495242e49 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,5 +1,5 @@ use crate::{Error as DirError, ValidatorDir}; -use bls::get_withdrawal_credentials; +use bls::{Keypair, Signature, get_withdrawal_credentials}; use deposit_contract::{Error as DepositError, encode_eth1_tx_data}; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; @@ -7,7 +7,7 @@ use rand::{Rng, distr::Alphanumeric}; use std::fs::{File, create_dir_all}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; -use types::{ChainSpec, DepositData, Hash256, Keypair, Signature}; +use types::{ChainSpec, DepositData, Hash256}; /// The `Alphanumeric` crate only generates a-z, A-Z, 0-9, therefore it has a range of 62 /// characters. diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index 8b50ea66876..0799897a70f 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -2,6 +2,7 @@ use crate::builder::{ ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, keystore_password_path, }; +use bls::Keypair; use deposit_contract::decode_eth1_tx_data; use educe::Educe; use eth2_keystore::{Error as KeystoreError, Keystore, PlainText}; @@ -10,7 +11,7 @@ use std::fs::{File, read, write}; use std::io; use std::path::{Path, PathBuf}; use tree_hash::TreeHash; -use types::{DepositData, Hash256, Keypair}; +use types::{DepositData, Hash256}; /// The file used to save the Eth1 transaction hash from a deposit. pub const ETH1_DEPOSIT_TX_HASH_FILE: &str = "eth1-deposit-tx-hash.txt"; diff --git a/common/validator_dir/tests/tests.rs b/common/validator_dir/tests/tests.rs index 7d9730ebd37..ede80c244ee 100644 --- a/common/validator_dir/tests/tests.rs +++ b/common/validator_dir/tests/tests.rs @@ -1,10 +1,11 @@ #![cfg(not(debug_assertions))] +use bls::Keypair; use eth2_keystore::{Keystore, KeystoreBuilder, PlainText}; use std::fs::{self, File}; use std::path::Path; use tempfile::{TempDir, tempdir}; -use types::{EthSpec, Keypair, MainnetEthSpec, test_utils::generate_deterministic_keypair}; +use types::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use validator_dir::{ Builder, BuilderError, ETH1_DEPOSIT_DATA_FILE, ETH1_DEPOSIT_TX_HASH_FILE, VOTING_KEYSTORE_FILE, ValidatorDir, WITHDRAWAL_KEYSTORE_FILE, diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 0a244c2ba19..a07aa38aa5b 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } logging = { workspace = true } metrics = { workspace = true } proto_array = { workspace = true } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 6565e7cdaf6..9a8cae0c365 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,5 +1,6 @@ use crate::metrics::{self, scrape_for_metrics}; use crate::{ForkChoiceStore, InvalidationOperation}; +use fixed_bytes::FixedBytesExtended; use logging::crit; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, @@ -19,7 +20,7 @@ use tracing::{debug, instrument, warn}; use types::{ AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FixedBytesExtended, Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, consts::bellatrix::INTERVALS_PER_SLOT, }; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 67b792ef0d8..d3a84ee85be 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -7,6 +7,7 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; @@ -17,9 +18,9 @@ use std::time::Duration; use store::MemoryStore; use types::SingleAttestation; use types::{ - BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - ForkName, Hash256, IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, - SubnetId, test_utils::generate_deterministic_keypair, + BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, Hash256, + IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, + test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index bd6757c0fad..782610e0d35 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -11,6 +11,7 @@ path = "src/bin.rs" [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 43a7e3b77fe..e9deb6759fc 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,11 +5,12 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ - AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, FixedBytesExtended, - Hash256, MainnetEthSpec, Slot, + AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + MainnetEthSpec, Slot, }; pub use execution_status::*; diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index de84fbdd128..d20eaacb99a 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -1,4 +1,4 @@ -use types::FixedBytesExtended; +use fixed_bytes::FixedBytesExtended; use super::*; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 1d78ce9f443..5bfcdae463d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,6 @@ use crate::error::InvalidBestNodeInfo; use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz::four_byte_option_impl; @@ -7,8 +8,8 @@ use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use superstruct::superstruct; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - FixedBytesExtended, Hash256, Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, }; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 137471ce36d..3edf1e0644d 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -7,6 +7,7 @@ use crate::{ }, ssz_container::SszContainer, }; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -15,8 +16,8 @@ use std::{ fmt, }; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - FixedBytesExtended, Hash256, Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -1095,7 +1096,8 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; - use types::{FixedBytesExtended, MainnetEthSpec}; + use fixed_bytes::FixedBytesExtended; + use types::MainnetEthSpec; /// Gives a hash that is not the zero hash (unless i is `usize::MAX)`. fn hash_from_index(i: usize) -> Hash256 { diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 3821aa16891..a08035d5838 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -24,11 +24,13 @@ educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } int_to_bytes = { workspace = true } integer-sqrt = "0.1.5" itertools = { workspace = true } merkle_proof = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } rand = { workspace = true } rayon = { workspace = true } safe_arith = { workspace = true } @@ -37,6 +39,7 @@ ssz_types = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } tree_hash = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index e4f5aa3c8bc..dc7be7c2515 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -2,6 +2,7 @@ use types::*; pub mod attesting_indices_base { use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; + use ssz_types::{BitList, VariableList}; use types::*; /// Convert `attestation` to (almost) indexed-verifiable form. @@ -44,10 +45,10 @@ pub mod attesting_indices_base { } pub mod attesting_indices_electra { - use std::collections::HashSet; - use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; use safe_arith::SafeArith; + use ssz_types::{BitList, BitVector, VariableList}; + use std::collections::HashSet; use types::*; /// Compute an Electra IndexedAttestation given a list of committees. diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 52f360849e0..01c1855fb10 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -6,6 +6,7 @@ use crate::{ }; use safe_arith::SafeArith; use std::cmp; +use typenum::Unsigned; use types::{ consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, *, diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 86db037446b..ee03596d098 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -2,12 +2,11 @@ use crate::common::altair::BaseRewardPerIncrement; use crate::common::base::SqrtTotalActiveBalance; use crate::common::{altair, base}; use crate::metrics; +use fixed_bytes::FixedBytesExtended; use safe_arith::SafeArith; use tracing::instrument; use types::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -use types::{ - ActivationQueue, BeaconState, ChainSpec, EthSpec, FixedBytesExtended, ForkName, Hash256, -}; +use types::{ActivationQueue, BeaconState, ChainSpec, EthSpec, ForkName, Hash256}; /// Precursor to an `EpochCache`. pub struct PreEpochCache { diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 88ef79310dc..d00e1fcfacc 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -7,6 +7,7 @@ use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, upgrade_to_fulu, upgrade_to_gloas, }; +use fixed_bytes::FixedBytesExtended; use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; use tree_hash::TreeHash; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 9e7a20040e8..f78c8c4eb38 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -5,6 +5,7 @@ use safe_arith::{ArithError, SafeArith, SafeArithIter}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::borrow::Cow; use tree_hash::TreeHash; +use typenum::Unsigned; use types::*; pub use self::verify_attester_slashing::{ diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 1219c7df442..8cc9de42db0 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,12 +1,12 @@ use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::{VerifySignatures, signature_sets::sync_aggregate_signature_set}; +use bls::PublicKeyBytes; use safe_arith::SafeArith; use std::borrow::Cow; +use typenum::Unsigned; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; -use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned, -}; +use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec, SyncAggregate}; pub fn process_sync_aggregate( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9a1c6c2f6ad..8afeeb685bc 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -5,8 +5,9 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use ssz_types::FixedVector; +use typenum::U33; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -use types::typenum::U33; pub fn process_operations>( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index dafd0d79ea9..0e936007eec 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -2,17 +2,18 @@ //! validated individually, or alongside in others in a potentially cheaper bulk operation. //! //! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`. -use bls::SignatureSet; +use bls::{AggregateSignature, PublicKey, PublicKeyBytes, Signature, SignatureSet}; use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; +use typenum::Unsigned; use types::{ - AbstractExecPayload, AggregateSignature, AttesterSlashingRef, BeaconBlockRef, BeaconState, - BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, - InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, PublicKey, - PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, - SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + AbstractExecPayload, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, + ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, + IndexedAttestation, IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, + SyncAggregatorSelectionData, }; pub type Result = std::result::Result; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index c32797f77f3..739717b33ff 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -11,7 +11,10 @@ use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; +use fixed_bytes::FixedBytesExtended; use ssz_types::Bitfield; +use ssz_types::VariableList; use std::sync::{Arc, LazyLock}; use test_utils::generate_deterministic_keypairs; use types::*; diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index c996e580a78..d403bfa82b6 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -1,5 +1,6 @@ use super::errors::{BlockOperationError, DepositInvalid}; use crate::per_block_processing::signature_sets::deposit_pubkey_signature_message; +use bls::PublicKeyBytes; use merkle_proof::verify_merkle_proof; use safe_arith::SafeArith; use tree_hash::TreeHash; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index 5c08406eaef..5e177c5d2b7 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -1,5 +1,5 @@ use crate::EpochProcessingError; -use types::List; +use milhouse::List; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index fd712cc8e50..a818e087755 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -1,9 +1,10 @@ use super::base::{TotalBalances, ValidatorStatus, validator_statuses::InclusionInfo}; use crate::metrics; +use milhouse::List; use std::sync::Arc; use types::{ - BeaconStateError, Epoch, EthSpec, List, ParticipationFlags, ProgressiveBalancesCache, - SyncCommittee, Validator, + BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, + Validator, consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, }; diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index a5a2a69ebff..4818dcbf670 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,5 @@ -use types::{BeaconStateError, EpochCacheError, InconsistentFork, milhouse}; +use milhouse; +use types::{BeaconStateError, EpochCacheError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 8fcdda062c9..9172d954bc8 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -1,7 +1,7 @@ use super::errors::EpochProcessingError; use safe_arith::SafeArith; use tree_hash::TreeHash; -use types::Unsigned; +use typenum::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; diff --git a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs index 66d68804e1d..8d712fd19b8 100644 --- a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs +++ b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs @@ -1,4 +1,5 @@ -use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256}; +use ssz_types::BitVector; +use types::{BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256}; /// This is a subset of the `BeaconState` which is used to compute justification and finality /// without modifying the `BeaconState`. diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs index c9f69c3c95e..e05fb30c334 100644 --- a/consensus/state_processing/src/per_epoch_processing/resets.rs +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -1,8 +1,9 @@ use super::errors::EpochProcessingError; +use milhouse::List; use safe_arith::SafeArith; +use typenum::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::{List, Unsigned}; pub fn process_eth1_data_reset( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 1584e932bdf..914e025f2fe 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -8,19 +8,20 @@ use crate::{ per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; +use milhouse::{Cow, List, Vector}; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; use std::collections::{BTreeSet, HashMap}; use tracing::instrument; +use typenum::Unsigned; use types::{ ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, Vector, + EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, ProgressiveBalancesCache, + RelativeEpoch, Validator, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, - milhouse::Cow, }; pub struct SinglePassConfig { diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 47eb06e907a..6008276d150 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -4,7 +4,8 @@ use crate::per_epoch_processing::{ single_pass::{SinglePassConfig, process_epoch_single_pass}, }; use safe_arith::{SafeArith, SafeArithIter}; -use types::{BeaconState, ChainSpec, EthSpec, Unsigned}; +use typenum::Unsigned; +use types::{BeaconState, ChainSpec, EthSpec}; /// Process slashings. pub fn process_slashings( diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 8695054e1e7..0f8e5dc52d8 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -3,6 +3,7 @@ use crate::upgrade::{ upgrade_to_electra, upgrade_to_fulu, upgrade_to_gloas, }; use crate::{per_epoch_processing::EpochProcessingSummary, *}; +use fixed_bytes::FixedBytesExtended; use safe_arith::{ArithError, SafeArith}; use tracing::instrument; use types::*; diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 4d38e7797e6..19b21dad19a 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -5,7 +5,8 @@ //! duplication and protect against some easy-to-make mistakes when performing state advances. use crate::*; -use types::{BeaconState, ChainSpec, EthSpec, FixedBytesExtended, Hash256, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 3006da25ae7..022175ff999 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -2,11 +2,12 @@ use crate::common::update_progressive_balances_cache::initialize_progressive_bal use crate::common::{ attesting_indices_base::get_attesting_indices, get_attestation_participation_flag_indices, }; +use milhouse::List; use std::mem; use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, List, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, + Fork, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, }; /// Translate the participation information from the epoch prior to the fork into Altair's format. diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index ae0dbde7678..948fa511b73 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,7 +1,8 @@ +use milhouse::List; use std::mem; use types::{ BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, List, + Fork, }; /// Transform a `Bellatrix` state into an `Capella` state. diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs index c2aced7047a..c14c1edbec3 100644 --- a/consensus/state_processing/src/upgrade/fulu.rs +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -1,8 +1,7 @@ +use milhouse::Vector; use safe_arith::SafeArith; use std::mem; -use types::{ - BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork, Vector, -}; +use types::{BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork}; /// Transform a `Electra` state into an `Fulu` state. pub fn upgrade_to_fulu( diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 1f527c0de8a..78c6f871cb4 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -65,6 +65,7 @@ test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } @@ -73,6 +74,9 @@ paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } +[lints.clippy] +module_inception = "allow" + [[bench]] name = "benches" harness = false diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 814001d9660..397c33163e9 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,10 +1,11 @@ use criterion::{BatchSize, BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use fixed_bytes::FixedBytesExtended; use milhouse::List; use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; use types::{ - BeaconState, Epoch, Eth1Data, EthSpec, FixedBytesExtended, Hash256, MainnetEthSpec, Validator, + BeaconState, Epoch, Eth1Data, EthSpec, Hash256, MainnetEthSpec, Validator, test_utils::generate_deterministic_keypair, }; diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/attestation/aggregate_and_proof.rs similarity index 93% rename from consensus/types/src/aggregate_and_proof.rs rename to consensus/types/src/attestation/aggregate_and_proof.rs index e76ba48bf47..4c6e775e56d 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/attestation/aggregate_and_proof.rs @@ -1,17 +1,20 @@ -use super::{AttestationBase, AttestationElectra, AttestationRef}; -use super::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, SecretKey, SelectionProof, - Signature, SignedRoot, -}; -use crate::Attestation; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{PublicKey, SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation/attestation.rs similarity index 97% rename from consensus/types/src/attestation.rs rename to consensus/types/src/attestation/attestation.rs index 14305826589..693b5889f53 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation/attestation.rs @@ -1,23 +1,28 @@ -use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, +use std::{ + collections::HashSet, + hash::{Hash, Hasher}, }; -use crate::slot_data::SlotData; -use crate::{ - Checkpoint, ContextDeserialize, ForkName, IndexedAttestationBase, IndexedAttestationElectra, -}; -use crate::{Hash256, Slot, test_utils::TestRandom}; -use crate::{IndexedAttestation, context_deserialize}; + +use bls::{AggregateSignature, SecretKey, Signature}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::BitVector; -use std::collections::HashSet; -use std::hash::{Hash, Hasher}; +use ssz_types::{BitList, BitVector}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AttestationData, Checkpoint, IndexedAttestation, IndexedAttestationBase, + IndexedAttestationElectra, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation/attestation_data.rs similarity index 87% rename from consensus/types/src/attestation_data.rs rename to consensus/types/src/attestation/attestation_data.rs index a4643e54741..f3fceb9b70f 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation/attestation_data.rs @@ -1,11 +1,16 @@ -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{Checkpoint, ForkName, Hash256, SignedRoot, Slot}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; + +use crate::{ + attestation::Checkpoint, + core::{Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data upon which an attestation is based. /// /// Spec v0.12.1 diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation/attestation_duty.rs similarity index 92% rename from consensus/types/src/attestation_duty.rs rename to consensus/types/src/attestation/attestation_duty.rs index 70c7c5c170f..fe3da79a2b1 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation/attestation_duty.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::{attestation::CommitteeIndex, core::Slot}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/attestation/beacon_committee.rs similarity index 92% rename from consensus/types/src/beacon_committee.rs rename to consensus/types/src/attestation/beacon_committee.rs index 04fe763a11b..2dba30bad3c 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/attestation/beacon_committee.rs @@ -1,4 +1,4 @@ -use crate::*; +use crate::{attestation::CommitteeIndex, core::Slot}; #[derive(Default, Clone, Debug, PartialEq)] pub struct BeaconCommittee<'a> { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/attestation/checkpoint.rs similarity index 88% rename from consensus/types/src/checkpoint.rs rename to consensus/types/src/attestation/checkpoint.rs index 545af59985e..f5a95f0ad94 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/attestation/checkpoint.rs @@ -1,11 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName, Hash256}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Epoch, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/attestation/indexed_attestation.rs similarity index 96% rename from consensus/types/src/indexed_attestation.rs rename to consensus/types/src/attestation/indexed_attestation.rs index dc328842176..272b015d907 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/attestation/indexed_attestation.rs @@ -1,17 +1,21 @@ -use crate::context_deserialize; -use crate::{ - AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, test_utils::TestRandom, +use std::{ + hash::{Hash, Hasher}, + slice::Iter, }; -use core::slice::Iter; + +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use educe::Educe; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; +use ssz_types::VariableList; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// Details an attestation that can be slashable. /// /// To be included in an `AttesterSlashing`. @@ -208,9 +212,10 @@ impl Hash for IndexedAttestation { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::slot_epoch::Epoch; - use crate::test_utils::{SeedableRng, XorShiftRng}; + use crate::{ + core::{Epoch, MainnetEthSpec}, + test_utils::{SeedableRng, XorShiftRng}, + }; #[test] pub fn test_is_double_vote_true() { diff --git a/consensus/types/src/attestation/mod.rs b/consensus/types/src/attestation/mod.rs new file mode 100644 index 00000000000..2d2bf74e49a --- /dev/null +++ b/consensus/types/src/attestation/mod.rs @@ -0,0 +1,39 @@ +mod aggregate_and_proof; +mod attestation; +mod attestation_data; +mod attestation_duty; +mod beacon_committee; +mod checkpoint; +mod indexed_attestation; +mod participation_flags; +mod pending_attestation; +mod selection_proof; +mod shuffling_id; +mod signed_aggregate_and_proof; +mod subnet_id; + +pub use aggregate_and_proof::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, +}; +pub use attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationOnDisk, AttestationRef, + AttestationRefMut, AttestationRefOnDisk, Error as AttestationError, SingleAttestation, +}; +pub use attestation_data::AttestationData; +pub use attestation_duty::AttestationDuty; +pub use beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; +pub use checkpoint::Checkpoint; +pub use indexed_attestation::{ + IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, +}; +pub use participation_flags::ParticipationFlags; +pub use pending_attestation::PendingAttestation; +pub use selection_proof::SelectionProof; +pub use shuffling_id::AttestationShufflingId; +pub use signed_aggregate_and_proof::{ + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedAggregateAndProofRefMut, +}; +pub use subnet_id::SubnetId; + +pub type CommitteeIndex = u64; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/attestation/participation_flags.rs similarity index 96% rename from consensus/types/src/participation_flags.rs rename to consensus/types/src/attestation/participation_flags.rs index e59efc51704..66831abfac0 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/attestation/participation_flags.rs @@ -1,10 +1,14 @@ -use crate::{Hash256, consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; +use crate::{ + core::{Hash256, consts::altair::NUM_FLAG_INDICES}, + test_utils::TestRandom, +}; + #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/attestation/pending_attestation.rs similarity index 84% rename from consensus/types/src/pending_attestation.rs rename to consensus/types/src/attestation/pending_attestation.rs index 4a00a0495ac..84353ac1185 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/attestation/pending_attestation.rs @@ -1,11 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AttestationData, BitList, EthSpec, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/attestation/selection_proof.rs similarity index 95% rename from consensus/types/src/selection_proof.rs rename to consensus/types/src/attestation/selection_proof.rs index aa8c0c5658e..b4c48d00780 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/attestation/selection_proof.rs @@ -1,11 +1,15 @@ -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use std::cmp; + +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::Fork, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/attestation/shuffling_id.rs similarity index 93% rename from consensus/types/src/shuffling_id.rs rename to consensus/types/src/attestation/shuffling_id.rs index df16f605ed1..25217288f69 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/attestation/shuffling_id.rs @@ -1,7 +1,12 @@ -use crate::*; +use std::hash::Hash; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; + +use crate::{ + core::{Epoch, EthSpec, Hash256, RelativeEpoch}, + state::{BeaconState, BeaconStateError}, +}; /// Can be used to key (ID) the shuffling in some chain, in some epoch. /// diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/attestation/signed_aggregate_and_proof.rs similarity index 90% rename from consensus/types/src/signed_aggregate_and_proof.rs rename to consensus/types/src/attestation/signed_aggregate_and_proof.rs index 758ac2734b7..48c3f4c567e 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/attestation/signed_aggregate_and_proof.rs @@ -1,18 +1,21 @@ -use super::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -use super::{ - Attestation, AttestationRef, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - SelectionProof, Signature, SignedRoot, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, + Attestation, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A Validators signed aggregate proof to publish on the `beacon_aggregate_and_proof` /// gossipsub topic. /// diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/attestation/subnet_id.rs similarity index 97% rename from consensus/types/src/subnet_id.rs rename to consensus/types/src/attestation/subnet_id.rs index 6ec8ca4a27f..9585d077b5c 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/attestation/subnet_id.rs @@ -1,11 +1,17 @@ //! Identifies each shard by an integer identifier. -use crate::SingleAttestation; -use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use std::{ + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use alloy_primitives::{U256, bytes::Buf}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; + +use crate::{ + attestation::{AttestationRef, CommitteeIndex, SingleAttestation}, + core::{ChainSpec, EthSpec, Slot}, +}; const MAX_SUBNET_ID: usize = 64; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/block/beacon_block.rs similarity index 96% rename from consensus/types/src/beacon_block.rs rename to consensus/types/src/block/beacon_block.rs index 060709d6556..a4e7e800bcc 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -1,18 +1,40 @@ -use crate::attestation::AttestationBase; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, marker::PhantomData}; + +use bls::{AggregateSignature, PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; -use std::fmt; -use std::marker::PhantomData; +use ssz_types::{BitList, BitVector, FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; - -use self::indexed_attestation::IndexedAttestationBase; +use typenum::Unsigned; + +use crate::{ + attestation::{AttestationBase, AttestationData, IndexedAttestationBase}, + block::{ + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockHeader, + SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Graffiti, Hash256, SignedRoot, Slot}, + deposit::{Deposit, DepositData}, + execution::{ + AbstractExecPayload, BlindedPayload, Eth1Data, ExecutionPayload, ExecutionRequests, + FullPayload, + }, + exit::{SignedVoluntaryExit, VoluntaryExit}, + fork::{Fork, ForkName, InconsistentFork, map_fork_name}, + slashing::{AttesterSlashingBase, ProposerSlashing}, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// A block of the `BeaconChain`. #[superstruct( @@ -283,7 +305,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.body().execution_payload() } } @@ -865,7 +887,10 @@ impl fmt::Display for BlockImportSource { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}; + use crate::{ + core::MainnetEthSpec, + test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}, + }; use ssz::Encode; type BeaconBlock = super::BeaconBlock; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs similarity index 93% rename from consensus/types/src/beacon_block_body.rs rename to consensus/types/src/block/beacon_block_body.rs index ced8fea4a99..f85dd8909e1 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -1,18 +1,42 @@ -use crate::test_utils::TestRandom; -use crate::*; +use std::marker::PhantomData; + +use bls::Signature; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use merkle_proof::{MerkleTree, MerkleTreeError}; use metastruct::metastruct; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::{FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::{BYTES_PER_CHUNK, TreeHash}; use tree_hash_derive::TreeHash; -pub type KzgCommitments = - VariableList::MaxBlobCommitmentsPerBlock>; +use crate::{ + attestation::{AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut}, + core::{EthSpec, Graffiti, Hash256}, + deposit::Deposit, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, + Eth1Data, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, ExecutionRequests, FullPayload, FullPayloadBellatrix, + FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, + FullPayloadGloas, SignedBlsToExecutionChange, + }, + exit::SignedVoluntaryExit, + fork::{ForkName, map_fork_name}, + kzg_ext::KzgCommitments, + light_client::consts::{EXECUTION_PAYLOAD_INDEX, EXECUTION_PAYLOAD_PROOF_LEN}, + slashing::{ + AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingRef, ProposerSlashing, + }, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// @@ -63,8 +87,14 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Fulu(metastruct(mappings(beacon_block_body_fulu_fields(groups(fields))))), Gloas(metastruct(mappings(beacon_block_body_gloas_fields(groups(fields))))), ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -147,7 +177,7 @@ pub struct BeaconBlockBody = FullPay } impl> BeaconBlockBody { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.to_ref().execution_payload() } @@ -158,9 +188,9 @@ impl> BeaconBlockBody { } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { match self { - Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Base(_) | Self::Altair(_) => Err(BeaconStateError::IncorrectStateVariant), Self::Bellatrix(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), @@ -216,7 +246,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, pub fn kzg_commitment_merkle_proof( &self, index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let kzg_commitments_proof = self.kzg_commitments_merkle_proof()?; let proof = self.complete_kzg_commitment_merkle_proof(index, &kzg_commitments_proof)?; Ok(proof) @@ -228,10 +258,10 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, &self, index: usize, kzg_commitments_proof: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) | Self::Gloas(_) => { // We compute the branches by generating 2 merkle trees: @@ -253,7 +283,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let tree = MerkleTree::create(&blob_leaves, depth as usize); let (_, mut proof) = tree .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; // Add the branch corresponding to the length mix-in. let length = blob_leaves.len(); @@ -261,7 +291,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let mut length_bytes = [0; BYTES_PER_CHUNK]; length_bytes .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .ok_or(BeaconStateError::MerkleTreeError( + MerkleTreeError::PleaseNotifyTheDevs, + ))? .copy_from_slice(&length.to_le_bytes()); let length_root = Hash256::from_slice(length_bytes.as_slice()); proof.push(length_root); @@ -279,32 +311,41 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Produces the proof of inclusion for `self.blob_kzg_commitments`. pub fn kzg_commitments_merkle_proof( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let body_leaves = self.body_merkle_leaves(); let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); let (_, proof) = tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; Ok(FixedVector::new(proof)?) } - pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { + pub fn block_body_merkle_proof( + &self, + generalized_index: usize, + ) -> Result, BeaconStateError> { let field_index = match generalized_index { - light_client_update::EXECUTION_PAYLOAD_INDEX => { + EXECUTION_PAYLOAD_INDEX => { // Execution payload is a top-level field, subtract off the generalized indices // for the internal nodes. Result should be 9, the field offset of the execution // payload in the `BeaconBlockBody`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody generalized_index .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) - .ok_or(Error::GeneralizedIndexNotSupported(generalized_index))? + .ok_or(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + ))? + } + _ => { + return Err(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + )); } - _ => return Err(Error::GeneralizedIndexNotSupported(generalized_index)), }; let leaves = self.body_merkle_leaves(); - let depth = light_client_update::EXECUTION_PAYLOAD_PROOF_LEN; + let depth = EXECUTION_PAYLOAD_PROOF_LEN; let tree = merkle_proof::MerkleTree::create(&leaves, depth); let (_, proof) = tree.generate_proof(field_index, depth)?; @@ -1100,22 +1141,16 @@ impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, F } } -/// Util method helpful for logging. -pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { - let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); - let commitments_joined = commitment_strings.join(", "); - let surrounded_commitments = format!("[{}]", commitments_joined); - surrounded_commitments -} - #[cfg(test)] mod tests { mod base { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyBase); } mod altair { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyAltair); } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/block/beacon_block_header.rs similarity index 90% rename from consensus/types/src/beacon_block_header.rs rename to consensus/types/src/block/beacon_block_header.rs index e14a9fc8af7..06e1023d911 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/block/beacon_block_header.rs @@ -1,6 +1,4 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::SecretKey; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -8,6 +6,13 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/block/mod.rs b/consensus/types/src/block/mod.rs new file mode 100644 index 00000000000..81c8ffbd639 --- /dev/null +++ b/consensus/types/src/block/mod.rs @@ -0,0 +1,26 @@ +mod beacon_block; +mod beacon_block_body; +mod beacon_block_header; +mod signed_beacon_block; +mod signed_beacon_block_header; + +pub use beacon_block::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, + BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, +}; +pub use beacon_block_body::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, + BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES, +}; +pub use beacon_block_header::BeaconBlockHeader; + +pub use signed_beacon_block::{ + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, +}; +pub use signed_beacon_block_header::SignedBeaconBlockHeader; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs similarity index 95% rename from consensus/types/src/signed_beacon_block.rs rename to consensus/types/src/block/signed_beacon_block.rs index 7b04cc57711..e8927ee7659 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -1,17 +1,42 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, format_kzg_commitments}; -use crate::test_utils::TestRandom; -use crate::*; +use std::fmt; + +use bls::{PublicKey, Signature}; +use context_deserialize::ContextDeserialize; use educe::Educe; use merkle_proof::MerkleTree; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, + BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, + BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconBlockBodyGloas, + BeaconBlockCapella, BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, + BeaconBlockGloas, BeaconBlockHeader, BeaconBlockRef, BeaconBlockRefMut, + SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, SignedRoot, SigningData, Slot}, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, FullPayload, FullPayloadBellatrix, FullPayloadCapella, + FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadGloas, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + kzg_ext::format_kzg_commitments, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); @@ -272,7 +297,7 @@ impl> SignedBeaconBlock SignedBeaconBlockHeader, FixedVector, ), - Error, + BeaconStateError, > { // Create the block body merkle tree let body_leaves = self.message().body().body_merkle_leaves(); @@ -282,7 +307,7 @@ impl> SignedBeaconBlock // Compute the KZG commitments inclusion proof let (_, proof) = body_merkle_tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; let kzg_commitments_inclusion_proof = FixedVector::new(proof)?; let block_header = BeaconBlockHeader { @@ -919,6 +944,7 @@ pub mod ssz_tagged_signed_beacon_block_arc { #[cfg(test)] mod test { use super::*; + use crate::{block::EmptyBlock, core::MainnetEthSpec}; #[test] fn add_remove_payload_roundtrip() { diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/block/signed_beacon_block_header.rs similarity index 84% rename from consensus/types/src/signed_beacon_block_header.rs rename to consensus/types/src/block/signed_beacon_block_header.rs index 4a5ff2ec1a4..2fcd8a705f0 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/block/signed_beacon_block_header.rs @@ -1,13 +1,17 @@ -use crate::context_deserialize; -use crate::{ - BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, Signature, - SignedRoot, test_utils::TestRandom, -}; +use bls::{PublicKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::BeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder/builder_bid.rs similarity index 93% rename from consensus/types/src/builder_bid.rs rename to consensus/types/src/builder/builder_bid.rs index 3fb7af35ca1..be9bb281553 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder/builder_bid.rs @@ -1,13 +1,6 @@ -use crate::beacon_block_body::KzgCommitments; -use crate::{ - ChainSpec, ContextDeserialize, EthSpec, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, - Uint256, test_utils::TestRandom, -}; use bls::PublicKeyBytes; use bls::Signature; +use context_deserialize::ContextDeserialize; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; @@ -15,6 +8,19 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, SignedRoot, Uint256}, + execution::{ + ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionRequests, + }, + fork::{ForkName, ForkVersionDecode}, + kzg_ext::KzgCommitments, + test_utils::TestRandom, +}; + #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( diff --git a/consensus/types/src/builder/mod.rs b/consensus/types/src/builder/mod.rs new file mode 100644 index 00000000000..88a8e6a01a3 --- /dev/null +++ b/consensus/types/src/builder/mod.rs @@ -0,0 +1,6 @@ +mod builder_bid; + +pub use builder_bid::{ + BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, + BuilderBidFulu, BuilderBidGloas, SignedBuilderBid, +}; diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation/consolidation_request.rs similarity index 84% rename from consensus/types/src/consolidation_request.rs rename to consensus/types/src/consolidation/consolidation_request.rs index 2af3426b68f..3f09517a903 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation/consolidation_request.rs @@ -1,11 +1,17 @@ -use crate::context_deserialize; -use crate::{Address, ForkName, PublicKeyBytes, SignedRoot, test_utils::TestRandom}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/consolidation/mod.rs b/consensus/types/src/consolidation/mod.rs new file mode 100644 index 00000000000..a6a2f4a3317 --- /dev/null +++ b/consensus/types/src/consolidation/mod.rs @@ -0,0 +1,5 @@ +mod consolidation_request; +mod pending_consolidation; + +pub use consolidation_request::ConsolidationRequest; +pub use pending_consolidation::PendingConsolidation; diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/consolidation/pending_consolidation.rs similarity index 86% rename from consensus/types/src/pending_consolidation.rs rename to consensus/types/src/consolidation/pending_consolidation.rs index 9fb8c3566db..fcd76e43b65 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/consolidation/pending_consolidation.rs @@ -1,11 +1,11 @@ -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/core/application_domain.rs similarity index 100% rename from consensus/types/src/application_domain.rs rename to consensus/types/src/core/application_domain.rs diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/core/chain_spec.rs similarity index 99% rename from consensus/types/src/chain_spec.rs rename to consensus/types/src/core/chain_spec.rs index 4b0dd48c9c2..8838cac0942 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1,19 +1,27 @@ -use crate::application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; -use crate::blob_sidecar::BlobIdentifier; -use crate::data_column_sidecar::DataColumnsByRootIdentifier; -use crate::*; +use std::{fs::File, path::Path, time::Duration}; + use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; use ssz::Encode; -use std::fs::File; -use std::path::Path; -use std::time::Duration; +use ssz_types::{RuntimeVariableList, VariableList}; use tree_hash::TreeHash; +use crate::{ + core::{ + APPLICATION_DOMAIN_BUILDER, Address, ApplicationDomain, EnrForkId, Epoch, EthSpec, + EthSpecId, Hash256, MainnetEthSpec, Slot, Uint256, + }, + data::{BlobIdentifier, DataColumnSubnetId, DataColumnsByRootIdentifier}, + execution::ExecutionBlockHash, + fork::{Fork, ForkData, ForkName}, + state::BeaconState, +}; + /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { @@ -2674,6 +2682,7 @@ mod tests { #[cfg(test)] mod yaml_tests { use super::*; + use crate::core::MinimalEthSpec; use paste::paste; use std::sync::Arc; use tempfile::NamedTempFile; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/core/config_and_preset.rs similarity index 95% rename from consensus/types/src/config_and_preset.rs rename to consensus/types/src/core/config_and_preset.rs index 16b09c9c088..08141c77311 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/core/config_and_preset.rs @@ -1,13 +1,14 @@ -use crate::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, - ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts::altair, consts::deneb, -}; use maplit::hashmap; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; +use crate::core::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, + ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts, +}; + /// Fusion of a runtime-config with the compile-time preset values. /// /// Mostly useful for the API. @@ -131,11 +132,11 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "domain_sync_committee_selection_proof".to_uppercase() => u32_hex(spec.domain_sync_committee_selection_proof), "sync_committee_subnet_count".to_uppercase() => - altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), "target_aggregators_per_sync_subcommittee".to_uppercase() => - altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), + consts::altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), // Deneb - "versioned_hash_version_kzg".to_uppercase() => deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), + "versioned_hash_version_kzg".to_uppercase() => consts::deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), // Electra "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), diff --git a/consensus/types/src/consts.rs b/consensus/types/src/core/consts.rs similarity index 94% rename from consensus/types/src/consts.rs rename to consensus/types/src/core/consts.rs index c20d5fe8f33..b6d63c47a88 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -23,5 +23,5 @@ pub mod bellatrix { pub const INTERVALS_PER_SLOT: u64 = 3; } pub mod deneb { - pub use crate::VERSIONED_HASH_VERSION_KZG; + pub use kzg::VERSIONED_HASH_VERSION_KZG; } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/core/enr_fork_id.rs similarity index 95% rename from consensus/types/src/enr_fork_id.rs rename to consensus/types/src/core/enr_fork_id.rs index e22672aeb60..c3b400cd136 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/core/enr_fork_id.rs @@ -1,11 +1,10 @@ -use crate::Epoch; -use crate::test_utils::TestRandom; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, test_utils::TestRandom}; + /// Specifies a fork which allows nodes to identify each other on the network. This fork is used in /// a nodes local ENR. /// diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/core/eth_spec.rs similarity index 98% rename from consensus/types/src/eth_spec.rs rename to consensus/types/src/core/eth_spec.rs index 47d32ad9e4d..72fd1ebc9eb 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -1,16 +1,22 @@ -use crate::*; +use std::{ + fmt::{self, Debug}, + str::FromStr, +}; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use ssz_types::typenum::{ +use typenum::{ U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, - U1099511627776, UInt, bit::B0, + U1099511627776, UInt, Unsigned, bit::B0, +}; + +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, }; -use std::fmt::{self, Debug}; -use std::str::FromStr; -pub type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 +type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; @@ -182,7 +188,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn get_committee_count_per_slot( active_validator_count: usize, spec: &ChainSpec, - ) -> Result { + ) -> Result { Self::get_committee_count_per_slot_with( active_validator_count, spec.max_committees_per_slot, @@ -194,7 +200,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + active_validator_count: usize, max_committees_per_slot: usize, target_committee_size: usize, - ) -> Result { + ) -> Result { let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); Ok(std::cmp::max( @@ -619,7 +625,7 @@ impl EthSpec for GnosisEthSpec { #[cfg(test)] mod test { use crate::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; - use ssz_types::typenum::Unsigned; + use typenum::Unsigned; fn assert_valid_spec() { let spec = E::default_spec(); diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/core/graffiti.rs similarity index 98% rename from consensus/types/src/graffiti.rs rename to consensus/types/src/core/graffiti.rs index 31cc4187a67..d0e0e1b1a89 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/core/graffiti.rs @@ -1,14 +1,13 @@ -use crate::{ - Hash256, - test_utils::{RngCore, TestRandom}, -}; +use std::{fmt, str::FromStr}; + +use rand::RngCore; use regex::bytes::Regex; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::str::FromStr; use tree_hash::{PackedEncoding, TreeHash}; +use crate::{core::Hash256, test_utils::TestRandom}; + pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. diff --git a/consensus/types/src/core/mod.rs b/consensus/types/src/core/mod.rs new file mode 100644 index 00000000000..bb50bb18568 --- /dev/null +++ b/consensus/types/src/core/mod.rs @@ -0,0 +1,44 @@ +pub mod consts; + +mod application_domain; +mod chain_spec; +mod config_and_preset; +mod enr_fork_id; +mod eth_spec; +mod graffiti; +mod non_zero_usize; +mod preset; +mod relative_epoch; +mod signing_data; +mod slot_data; +#[macro_use] +mod slot_epoch_macros; +mod slot_epoch; +#[cfg(feature = "sqlite")] +mod sqlite; + +pub use application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; +pub use chain_spec::{BlobParameters, BlobSchedule, ChainSpec, Config, Domain}; +pub use config_and_preset::{ + ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, + ConfigAndPresetGloas, get_extra_fields, +}; +pub use enr_fork_id::EnrForkId; +pub use eth_spec::{EthSpec, EthSpecId, GNOSIS, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; +pub use graffiti::{GRAFFITI_BYTES_LEN, Graffiti, GraffitiString}; +pub use non_zero_usize::new_non_zero_usize; +pub use preset::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, + FuluPreset, GloasPreset, +}; +pub use relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use signing_data::{SignedRoot, SigningData}; +pub use slot_data::SlotData; +pub use slot_epoch::{Epoch, Slot}; + +pub type Hash256 = alloy_primitives::B256; +pub type Uint256 = alloy_primitives::U256; +pub type Hash64 = alloy_primitives::B64; +pub type Address = alloy_primitives::Address; +pub type VersionedHash = Hash256; +pub type MerkleProof = Vec; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/core/non_zero_usize.rs similarity index 100% rename from consensus/types/src/non_zero_usize.rs rename to consensus/types/src/core/non_zero_usize.rs diff --git a/consensus/types/src/preset.rs b/consensus/types/src/core/preset.rs similarity index 99% rename from consensus/types/src/preset.rs rename to consensus/types/src/core/preset.rs index ab54c0345f7..75d2d8df6b3 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/core/preset.rs @@ -1,5 +1,7 @@ -use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; use serde::{Deserialize, Serialize}; +use typenum::Unsigned; + +use crate::core::{ChainSpec, Epoch, EthSpec}; /// Value-level representation of an Ethereum consensus "preset". /// diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/core/relative_epoch.rs similarity index 99% rename from consensus/types/src/relative_epoch.rs rename to consensus/types/src/core/relative_epoch.rs index 2fa0ae41bda..d1ee7ecc7c6 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/core/relative_epoch.rs @@ -1,6 +1,7 @@ -use crate::*; use safe_arith::{ArithError, SafeArith}; +use crate::core::{Epoch, Slot}; + #[derive(Debug, PartialEq, Clone, Copy)] pub enum Error { EpochTooLow { base: Epoch, other: Epoch }, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/core/signing_data.rs similarity index 85% rename from consensus/types/src/signing_data.rs rename to consensus/types/src/core/signing_data.rs index 69b7dabfe5a..907f03fac7b 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/core/signing_data.rs @@ -1,13 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] diff --git a/consensus/types/src/slot_data.rs b/consensus/types/src/core/slot_data.rs similarity index 92% rename from consensus/types/src/slot_data.rs rename to consensus/types/src/core/slot_data.rs index 19775913b98..f0bd01814f2 100644 --- a/consensus/types/src/slot_data.rs +++ b/consensus/types/src/core/slot_data.rs @@ -1,4 +1,4 @@ -use crate::Slot; +use crate::core::Slot; /// A trait providing a `Slot` getter for messages that are related to a single slot. Useful in /// making parts of attestation and sync committee processing generic. diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/core/slot_epoch.rs similarity index 98% rename from consensus/types/src/slot_epoch.rs rename to consensus/types/src/core/slot_epoch.rs index 05af9c5232d..97457701b11 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/core/slot_epoch.rs @@ -10,15 +10,17 @@ //! implement `Into`, however this would allow operations between `Slots` and `Epochs` which //! may lead to programming errors which are not detected by the compiler. -use crate::test_utils::TestRandom; -use crate::{ChainSpec, SignedRoot}; +use std::{fmt, hash::Hash}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::hash::Hash; + +use crate::{ + core::{ChainSpec, SignedRoot}, + test_utils::TestRandom, +}; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/core/slot_epoch_macros.rs similarity index 100% rename from consensus/types/src/slot_epoch_macros.rs rename to consensus/types/src/core/slot_epoch_macros.rs diff --git a/consensus/types/src/sqlite.rs b/consensus/types/src/core/sqlite.rs similarity index 96% rename from consensus/types/src/sqlite.rs rename to consensus/types/src/core/sqlite.rs index b6318dc4ce5..de892b4e98f 100644 --- a/consensus/types/src/sqlite.rs +++ b/consensus/types/src/core/sqlite.rs @@ -1,10 +1,11 @@ //! Implementations of SQLite compatibility traits. -use crate::{Epoch, Slot}; use rusqlite::{ Error, types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}, }; +use crate::core::{Epoch, Slot}; + macro_rules! impl_to_from_sql { ($type:ty) => { impl ToSql for $type { diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs similarity index 94% rename from consensus/types/src/blob_sidecar.rs rename to consensus/types/src/data/blob_sidecar.rs index d2c7331a579..709e556933b 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -1,12 +1,7 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - AbstractExecPayload, BeaconBlockHeader, BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, - FixedVector, ForkName, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, -}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + use bls::Signature; +use context_deserialize::context_deserialize; use educe::Educe; use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; @@ -15,13 +10,24 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; +use ssz_types::{FixedVector, RuntimeFixedVector, RuntimeVariableList, VariableList}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + data::Blob, + execution::AbstractExecPayload, + fork::ForkName, + kzg_ext::KzgProofs, + state::BeaconStateError, + test_utils::TestRandom, +}; + /// Container of the data that identifies an individual blob. #[derive( Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data/data_column_custody_group.rs similarity index 98% rename from consensus/types/src/data_column_custody_group.rs rename to consensus/types/src/data/data_column_custody_group.rs index 7ecabab0abc..d96d13cfff6 100644 --- a/consensus/types/src/data_column_custody_group.rs +++ b/consensus/types/src/data/data_column_custody_group.rs @@ -1,8 +1,14 @@ -use crate::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; +use std::collections::HashSet; + use alloy_primitives::U256; use itertools::Itertools; use safe_arith::{ArithError, SafeArith}; -use std::collections::HashSet; + +use crate::{ + EthSpec, + core::ChainSpec, + data::{ColumnIndex, DataColumnSubnetId}, +}; pub type CustodyIndex = u64; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data/data_column_sidecar.rs similarity index 94% rename from consensus/types/src/data_column_sidecar.rs rename to consensus/types/src/data/data_column_sidecar.rs index 62ce4467dfa..71d821f83ef 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data/data_column_sidecar.rs @@ -1,13 +1,8 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, KzgCommitments}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, BeaconStateError, Epoch, EthSpec, ForkName, Hash256, - SignedBeaconBlockHeader, Slot, -}; +use std::sync::Arc; + use bls::Signature; +use context_deserialize::context_deserialize; use educe::Educe; -use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; use safe_arith::ArithError; @@ -16,11 +11,19 @@ use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; -use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlockHeader}, + core::{Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + kzg_ext::{KzgCommitments, KzgError}, + state::BeaconStateError, + test_utils::TestRandom, +}; + pub type ColumnIndex = u64; pub type Cell = FixedVector::BytesPerCell>; pub type DataColumn = VariableList, ::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data/data_column_subnet_id.rs similarity index 80% rename from consensus/types/src/data_column_subnet_id.rs rename to consensus/types/src/data/data_column_subnet_id.rs index c6b8846c783..c30ebbba20e 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data/data_column_subnet_id.rs @@ -1,10 +1,13 @@ //! Identifies each data column subnet by an integer identifier. -use crate::ChainSpec; -use crate::data_column_sidecar::ColumnIndex; -use safe_arith::{ArithError, SafeArith}; +use std::{ + fmt::{self, Display}, + ops::{Deref, DerefMut}, +}; + +use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; + +use crate::{core::ChainSpec, data::ColumnIndex}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] @@ -69,15 +72,3 @@ impl From<&DataColumnSubnetId> for u64 { val.0 } } - -#[derive(Debug)] -pub enum Error { - ArithError(ArithError), - InvalidCustodySubnetCount(u64), -} - -impl From for Error { - fn from(e: ArithError) -> Self { - Error::ArithError(e) - } -} diff --git a/consensus/types/src/data/mod.rs b/consensus/types/src/data/mod.rs new file mode 100644 index 00000000000..10d062bada9 --- /dev/null +++ b/consensus/types/src/data/mod.rs @@ -0,0 +1,23 @@ +mod blob_sidecar; +mod data_column_custody_group; +mod data_column_sidecar; +mod data_column_subnet_id; + +pub use blob_sidecar::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobSidecarList, BlobsList, FixedBlobSidecarList, +}; +pub use data_column_custody_group::{ + CustodyIndex, DataColumnCustodyGroupError, compute_columns_for_custody_group, + compute_ordered_custody_column_indices, compute_subnets_for_node, + compute_subnets_from_custody_group, get_custody_groups, +}; +pub use data_column_sidecar::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, DataColumnsByRootIdentifier, +}; +pub use data_column_subnet_id::DataColumnSubnetId; + +use crate::core::EthSpec; +use ssz_types::FixedVector; + +pub type Blob = FixedVector::BytesPerBlob>; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit/deposit.rs similarity index 78% rename from consensus/types/src/deposit.rs rename to consensus/types/src/deposit/deposit.rs index 724f3de2f07..0b08bd6509f 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit/deposit.rs @@ -1,11 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::U33; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use typenum::U33; + +use crate::{core::Hash256, deposit::DepositData, fork::ForkName, test_utils::TestRandom}; pub const DEPOSIT_TREE_DEPTH: usize = 32; diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit/deposit_data.rs similarity index 86% rename from consensus/types/src/deposit_data.rs rename to consensus/types/src/deposit/deposit_data.rs index 3d9ae128088..51697f5d1a2 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit/deposit_data.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Hash256, SignedRoot}, + deposit::DepositMessage, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit/deposit_message.rs similarity index 81% rename from consensus/types/src/deposit_message.rs rename to consensus/types/src/deposit/deposit_message.rs index 9fe3b878858..4495a5c0236 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit/deposit_message.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit/deposit_request.rs similarity index 86% rename from consensus/types/src/deposit_request.rs rename to consensus/types/src/deposit/deposit_request.rs index 16acfb3b443..8d3c6e88bae 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit/deposit_request.rs @@ -1,13 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, PublicKeyBytes}; -use bls::SignatureBytes; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit/deposit_tree_snapshot.rs similarity index 95% rename from consensus/types/src/deposit_tree_snapshot.rs rename to consensus/types/src/deposit/deposit_tree_snapshot.rs index 400fca217da..24f41397a0a 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit/deposit_tree_snapshot.rs @@ -1,10 +1,11 @@ -use crate::*; use ethereum_hashing::{ZERO_HASHES, hash32_concat}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use test_utils::TestRandom; + +use crate::{core::Hash256, deposit::DEPOSIT_TREE_DEPTH, test_utils::TestRandom}; #[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] pub struct FinalizedExecutionBlock { diff --git a/consensus/types/src/deposit/mod.rs b/consensus/types/src/deposit/mod.rs new file mode 100644 index 00000000000..ff80f65cdb3 --- /dev/null +++ b/consensus/types/src/deposit/mod.rs @@ -0,0 +1,13 @@ +mod deposit; +mod deposit_data; +mod deposit_message; +mod deposit_request; +mod deposit_tree_snapshot; +mod pending_deposit; + +pub use deposit::{DEPOSIT_TREE_DEPTH, Deposit}; +pub use deposit_data::DepositData; +pub use deposit_message::DepositMessage; +pub use deposit_request::DepositRequest; +pub use deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; +pub use pending_deposit::PendingDeposit; diff --git a/consensus/types/src/pending_deposit.rs b/consensus/types/src/deposit/pending_deposit.rs similarity index 78% rename from consensus/types/src/pending_deposit.rs rename to consensus/types/src/deposit/pending_deposit.rs index 4a921edd549..4c039af39cd 100644 --- a/consensus/types/src/pending_deposit.rs +++ b/consensus/types/src/deposit/pending_deposit.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/execution/bls_to_execution_change.rs similarity index 83% rename from consensus/types/src/bls_to_execution_change.rs rename to consensus/types/src/execution/bls_to_execution_change.rs index 72d737ac714..de14f1b4c5d 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/execution/bls_to_execution_change.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, ChainSpec, Domain, Hash256, SignedRoot}, + execution::SignedBlsToExecutionChange, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/execution/eth1_data.rs similarity index 86% rename from consensus/types/src/eth1_data.rs rename to consensus/types/src/execution/eth1_data.rs index 800f3e25f94..89a4e634a66 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/execution/eth1_data.rs @@ -1,12 +1,11 @@ -use super::Hash256; -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution/execution_block_hash.rs similarity index 96% rename from consensus/types/src/execution_block_hash.rs rename to consensus/types/src/execution/execution_block_hash.rs index 31905d64dfa..91c019ce040 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution/execution_block_hash.rs @@ -1,10 +1,11 @@ -use crate::FixedBytesExtended; -use crate::Hash256; -use crate::test_utils::TestRandom; +use std::fmt; + +use fixed_bytes::FixedBytesExtended; use rand::RngCore; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; + +use crate::{core::Hash256, test_utils::TestRandom}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution/execution_block_header.rs similarity index 98% rename from consensus/types/src/execution_block_header.rs rename to consensus/types/src/execution/execution_block_header.rs index 02152adbf73..e596ba1831d 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution/execution_block_header.rs @@ -17,10 +17,15 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayloadRef, Hash64, Hash256, Uint256}; use alloy_rlp::RlpEncodable; +use fixed_bytes::Uint256; use metastruct::metastruct; +use crate::{ + core::{Address, EthSpec, Hash64, Hash256}, + execution::ExecutionPayloadRef, +}; + /// Execution block header as used for RLP encoding and Keccak hashing. /// /// Credit to Reth for the type definition. diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution/execution_payload.rs similarity index 92% rename from consensus/types/src/execution_payload.rs rename to consensus/types/src/execution/execution_payload.rs index 3548f67db2e..7973b7efdce 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution/execution_payload.rs @@ -1,19 +1,29 @@ -use crate::{test_utils::TestRandom, *}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; +use fixed_bytes::Uint256; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::ExecutionBlockHash, + fork::{ForkName, ForkVersionDecode}, + state::BeaconStateError, + test_utils::TestRandom, + withdrawal::Withdrawals, +}; + pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, ::MaxTransactionsPerPayload, >; -pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; - #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -38,8 +48,14 @@ pub type Withdrawals = VariableList::MaxWithdrawal arbitrary(bound = "E: EthSpec"), ), ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_into(FullPayload, BlindedPayload), map_ref_into(ExecutionPayloadHeader) )] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution/execution_payload_header.rs similarity index 96% rename from consensus/types/src/execution_payload_header.rs rename to consensus/types/src/execution/execution_payload_header.rs index 241ecb4ce6e..bd91a6471b2 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution/execution_payload_header.rs @@ -1,12 +1,27 @@ -use crate::{test_utils::TestRandom, *}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256, Uint256}, + execution::{ + ExecutionBlockHash, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, ExecutionPayloadRef, Transactions, + }, + fork::ForkName, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -35,8 +50,14 @@ use tree_hash_derive::TreeHash; derive(PartialEq, TreeHash, Debug), tree_hash(enum_behaviour = "transparent") ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_into(ExecutionPayloadHeader) )] #[cfg_attr( diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution/execution_requests.rs similarity index 93% rename from consensus/types/src/execution_requests.rs rename to consensus/types/src/execution/execution_requests.rs index 67396af71d4..92d717778e3 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution/execution_requests.rs @@ -1,7 +1,5 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, ForkName, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; +use context_deserialize::context_deserialize; use educe::Educe; use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; @@ -11,6 +9,15 @@ use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + consolidation::ConsolidationRequest, + core::{EthSpec, Hash256}, + deposit::DepositRequest, + fork::ForkName, + test_utils::TestRandom, + withdrawal::WithdrawalRequest, +}; + pub type DepositRequests = VariableList::MaxDepositRequestsPerPayload>; pub type WithdrawalRequests = diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs new file mode 100644 index 00000000000..0708bc5d960 --- /dev/null +++ b/consensus/types/src/execution/mod.rs @@ -0,0 +1,36 @@ +mod eth1_data; +mod execution_block_hash; +mod execution_block_header; +#[macro_use] +mod execution_payload; +mod bls_to_execution_change; +mod execution_payload_header; +mod execution_requests; +mod payload; +mod signed_bls_to_execution_change; + +pub use bls_to_execution_change::BlsToExecutionChange; +pub use eth1_data::Eth1Data; +pub use execution_block_hash::ExecutionBlockHash; +pub use execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; +pub use execution_payload::{ + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, + Transaction, Transactions, +}; +pub use execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; +pub use execution_requests::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, +}; +pub use payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, + BlindedPayloadRef, BlockProductionVersion, BlockType, ExecPayload, FullPayload, + FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, + FullPayloadFulu, FullPayloadGloas, FullPayloadRef, OwnedExecPayload, +}; +pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/execution/payload.rs similarity index 91% rename from consensus/types/src/payload.rs rename to consensus/types/src/execution/payload.rs index 370c73ad0a6..c1cc6c4eb66 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/execution/payload.rs @@ -1,16 +1,29 @@ -use crate::{test_utils::TestRandom, *}; use educe::Educe; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::borrow::Cow; -use std::fmt::Debug; -use std::hash::Hash; +use ssz_types::VariableList; +use std::{borrow::Cow, fmt::Debug, hash::Hash}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::{ + ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadGloas, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, + ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, + ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadRef, Transactions, + }, + fork::ForkName, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum BlockType { Blinded, @@ -38,8 +51,8 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - fn withdrawals_root(&self) -> Result; - fn blob_gas_used(&self) -> Result; + fn withdrawals_root(&self) -> Result; + fn blob_gas_used(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -179,8 +192,14 @@ pub trait AbstractExecPayload: ), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -311,9 +330,9 @@ impl ExecPayload for FullPayload { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayload::Capella(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Deneb(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Electra(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), @@ -322,10 +341,10 @@ impl ExecPayload for FullPayload { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayload::Bellatrix(_) | FullPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayload::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -354,9 +373,9 @@ impl FullPayload { }) } - pub fn default_at_fork(fork_name: ForkName) -> Result { + pub fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Base | ForkName::Altair => Err(BeaconStateError::IncorrectStateVariant), ForkName::Bellatrix => Ok(FullPayloadBellatrix::default().into()), ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), @@ -450,9 +469,9 @@ impl ExecPayload for FullPayloadRef<'_, E> { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } @@ -469,10 +488,10 @@ impl ExecPayload for FullPayloadRef<'_, E> { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayloadRef::Bellatrix(_) | FullPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), @@ -548,8 +567,14 @@ impl TryFrom> for FullPayload { tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayloadHeader), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", @@ -658,9 +683,9 @@ impl ExecPayload for BlindedPayload { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayload::Capella(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.withdrawals_root), @@ -669,10 +694,10 @@ impl ExecPayload for BlindedPayload { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayload::Bellatrix(_) | BlindedPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -766,9 +791,9 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } @@ -781,10 +806,10 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayloadRef::Bellatrix(_) | BlindedPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), @@ -877,12 +902,12 @@ macro_rules! impl_exec_payload_common { f(self) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { let g = $g; g(self) } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { let h = $h; h(self) } @@ -917,15 +942,16 @@ macro_rules! impl_exec_payload_for_fork { }, { |_| { None } }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = - |payload: &$wrapper_type_header| { - let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawals_root() - }; + let c: for<'a> fn( + &'a $wrapper_type_header, + ) -> Result = |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; c }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = |payload: &$wrapper_type_header| { let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -935,12 +961,12 @@ macro_rules! impl_exec_payload_for_fork { ); impl TryInto<$wrapper_type_header> for BlindedPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { match self { BlindedPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::IncorrectStateVariant), + _ => Err(BeaconStateError::IncorrectStateVariant), } } } @@ -963,13 +989,13 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_header { - type Error = Error; + type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { match header { ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { Ok(execution_payload_header.into()) } - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } @@ -1004,7 +1030,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.withdrawals_root() @@ -1012,7 +1038,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -1039,26 +1065,26 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryFrom<$wrapped_type_header> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: $wrapped_type_header) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryInto<$wrapper_type_full> for FullPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { match self { FullPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/execution/signed_bls_to_execution_change.rs similarity index 78% rename from consensus/types/src/signed_bls_to_execution_change.rs rename to consensus/types/src/execution/signed_bls_to_execution_change.rs index 910c4c7d7ef..535960fb3f9 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/execution/signed_bls_to_execution_change.rs @@ -1,10 +1,12 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::Signature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{execution::BlsToExecutionChange, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/exit/mod.rs b/consensus/types/src/exit/mod.rs new file mode 100644 index 00000000000..cb066d1d7a0 --- /dev/null +++ b/consensus/types/src/exit/mod.rs @@ -0,0 +1,5 @@ +mod signed_voluntary_exit; +mod voluntary_exit; + +pub use signed_voluntary_exit::SignedVoluntaryExit; +pub use voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/exit/signed_voluntary_exit.rs similarity index 84% rename from consensus/types/src/signed_voluntary_exit.rs rename to consensus/types/src/exit/signed_voluntary_exit.rs index 0beffa1e04a..b49401a7215 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/exit/signed_voluntary_exit.rs @@ -1,12 +1,12 @@ -use crate::context_deserialize; -use crate::{ForkName, VoluntaryExit, test_utils::TestRandom}; use bls::Signature; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{exit::VoluntaryExit, fork::ForkName, test_utils::TestRandom}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/exit/voluntary_exit.rs similarity index 90% rename from consensus/types/src/voluntary_exit.rs rename to consensus/types/src/exit/voluntary_exit.rs index 42d792a814d..30c6a97c4d1 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/exit/voluntary_exit.rs @@ -1,14 +1,17 @@ -use crate::context_deserialize; -use crate::{ - ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, - test_utils::TestRandom, -}; - +use bls::SecretKey; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, Epoch, Hash256, SignedRoot}, + exit::SignedVoluntaryExit, + fork::ForkName, + test_utils::TestRandom, +}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork/fork.rs similarity index 96% rename from consensus/types/src/fork.rs rename to consensus/types/src/fork/fork.rs index 5c5bd7ffd18..371b11e05c5 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork/fork.rs @@ -1,12 +1,11 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork/fork_context.rs similarity index 97% rename from consensus/types/src/fork_context.rs rename to consensus/types/src/fork/fork_context.rs index 66617326e13..89f69bcbb62 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork/fork_context.rs @@ -1,7 +1,11 @@ +use std::collections::BTreeMap; + use parking_lot::RwLock; -use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; -use std::collections::BTreeMap; +use crate::{ + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, +}; /// Represents a hard fork in the consensus protocol. /// @@ -59,8 +63,7 @@ impl ForkContext { let current_epoch = current_slot.epoch(E::slots_per_epoch()); let current_fork = epoch_to_forks .values() - .filter(|&fork| fork.fork_epoch <= current_epoch) - .next_back() + .rfind(|&fork| fork.fork_epoch <= current_epoch) .cloned() .expect("should match at least genesis epoch"); @@ -152,8 +155,7 @@ impl ForkContext { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::chain_spec::{BlobParameters, BlobSchedule}; + use crate::core::{BlobParameters, BlobSchedule, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork/fork_data.rs similarity index 88% rename from consensus/types/src/fork_data.rs rename to consensus/types/src/fork/fork_data.rs index 2d5e905efb9..1b9c8bad9ff 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork/fork_data.rs @@ -1,12 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, SignedRoot}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork/fork_macros.rs b/consensus/types/src/fork/fork_macros.rs new file mode 100644 index 00000000000..0c7f382ffc5 --- /dev/null +++ b/consensus/types/src/fork/fork_macros.rs @@ -0,0 +1,60 @@ +/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. +/// +/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism +/// in the return type, which is not usually possible in Rust without trait objects. +/// +/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode +/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body +/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is +/// taken, the important thing is that they are re-unified by injecting them back into the +/// `BeaconBlock` parent enum. +/// +/// If you would also like to extract additional data alongside the superstruct type, use +/// the more flexible `map_fork_name_with` macro. +#[macro_export] +macro_rules! map_fork_name { + ($fork_name:expr, $t:tt, $body:expr) => { + $crate::map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 + }; +} + +/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. +#[macro_export] +macro_rules! map_fork_name_with { + ($fork_name:expr, $t:tt, $body:block) => { + match $fork_name { + $crate::fork::ForkName::Base => { + let (value, extra_data) = $body; + ($t::Base(value), extra_data) + } + $crate::fork::ForkName::Altair => { + let (value, extra_data) = $body; + ($t::Altair(value), extra_data) + } + $crate::fork::ForkName::Bellatrix => { + let (value, extra_data) = $body; + ($t::Bellatrix(value), extra_data) + } + $crate::fork::ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } + $crate::fork::ForkName::Deneb => { + let (value, extra_data) = $body; + ($t::Deneb(value), extra_data) + } + $crate::fork::ForkName::Electra => { + let (value, extra_data) = $body; + ($t::Electra(value), extra_data) + } + $crate::fork::ForkName::Fulu => { + let (value, extra_data) = $body; + ($t::Fulu(value), extra_data) + } + $crate::fork::ForkName::Gloas => { + let (value, extra_data) = $body; + ($t::Gloas(value), extra_data) + } + } + }; +} diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork/fork_name.rs similarity index 84% rename from consensus/types/src/fork_name.rs rename to consensus/types/src/fork/fork_name.rs index 1d7bf3795b2..e9ec5fbe41e 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork/fork_name.rs @@ -1,8 +1,12 @@ -use crate::{ChainSpec, Epoch}; +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt::{self, Display, Formatter}; -use std::str::FromStr; + +use crate::core::{ChainSpec, Epoch}; #[derive( Debug, Clone, Copy, Decode, Encode, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, @@ -243,67 +247,6 @@ impl ForkName { } } -/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. -/// -/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism -/// in the return type, which is not usually possible in Rust without trait objects. -/// -/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode -/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body -/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is -/// taken, the important thing is that they are re-unified by injecting them back into the -/// `BeaconBlock` parent enum. -/// -/// If you would also like to extract additional data alongside the superstruct type, use -/// the more flexible `map_fork_name_with` macro. -#[macro_export] -macro_rules! map_fork_name { - ($fork_name:expr, $t:tt, $body:expr) => { - map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 - }; -} - -/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. -#[macro_export] -macro_rules! map_fork_name_with { - ($fork_name:expr, $t:tt, $body:block) => { - match $fork_name { - ForkName::Base => { - let (value, extra_data) = $body; - ($t::Base(value), extra_data) - } - ForkName::Altair => { - let (value, extra_data) = $body; - ($t::Altair(value), extra_data) - } - ForkName::Bellatrix => { - let (value, extra_data) = $body; - ($t::Bellatrix(value), extra_data) - } - ForkName::Capella => { - let (value, extra_data) = $body; - ($t::Capella(value), extra_data) - } - ForkName::Deneb => { - let (value, extra_data) = $body; - ($t::Deneb(value), extra_data) - } - ForkName::Electra => { - let (value, extra_data) = $body; - ($t::Electra(value), extra_data) - } - ForkName::Fulu => { - let (value, extra_data) = $body; - ($t::Fulu(value), extra_data) - } - ForkName::Gloas => { - let (value, extra_data) = $body; - ($t::Gloas(value), extra_data) - } - } - }; -} - impl FromStr for ForkName { type Err = String; diff --git a/consensus/types/src/fork/fork_version_decode.rs b/consensus/types/src/fork/fork_version_decode.rs new file mode 100644 index 00000000000..4349efb21f9 --- /dev/null +++ b/consensus/types/src/fork/fork_version_decode.rs @@ -0,0 +1,6 @@ +use crate::fork::ForkName; + +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} diff --git a/consensus/types/src/fork/mod.rs b/consensus/types/src/fork/mod.rs new file mode 100644 index 00000000000..1ad1c7cb622 --- /dev/null +++ b/consensus/types/src/fork/mod.rs @@ -0,0 +1,15 @@ +mod fork; +mod fork_context; +mod fork_data; +mod fork_macros; +mod fork_name; +mod fork_version_decode; + +pub use crate::{map_fork_name, map_fork_name_with}; +pub use fork::Fork; +pub use fork_context::{ForkContext, HardFork}; +pub use fork_data::ForkData; +pub use fork_name::{ForkName, InconsistentFork}; +pub use fork_version_decode::ForkVersionDecode; + +pub type ForkVersion = [u8; 4]; diff --git a/consensus/types/src/kzg_ext/consts.rs b/consensus/types/src/kzg_ext/consts.rs new file mode 100644 index 00000000000..06c9f9c749e --- /dev/null +++ b/consensus/types/src/kzg_ext/consts.rs @@ -0,0 +1,3 @@ +pub use kzg::{ + BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, VERSIONED_HASH_VERSION_KZG, +}; diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs new file mode 100644 index 00000000000..63533ec71f5 --- /dev/null +++ b/consensus/types/src/kzg_ext/mod.rs @@ -0,0 +1,27 @@ +pub mod consts; + +pub use kzg::{Blob as KzgBlob, Error as KzgError, Kzg, KzgCommitment, KzgProof}; + +use ssz_types::VariableList; + +use crate::core::EthSpec; + +// Note on List limit: +// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` +// - Fulu: `MaxCellsPerBlock` +// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to +// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types +// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, +// which we don't current do on `KzgProofs` anyway. +pub type KzgProofs = VariableList::MaxCellsPerBlock>; + +pub type KzgCommitments = + VariableList::MaxBlobCommitmentsPerBlock>; + +/// Util method helpful for logging. +pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { + let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); + let commitments_joined = commitment_strings.join(", "); + let surrounded_commitments = format!("[{}]", commitments_joined); + surrounded_commitments +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5aba30246fa..b1da5f0a701 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum 2.0 types +//! Ethereum Consensus types // Clippy lint set up #![cfg_attr( not(test), @@ -12,291 +12,162 @@ #[macro_use] pub mod test_utils; -pub mod aggregate_and_proof; -pub mod application_domain; pub mod attestation; -pub mod attestation_data; -pub mod attestation_duty; -pub mod attester_slashing; -pub mod beacon_block; -pub mod beacon_block_body; -pub mod beacon_block_header; -pub mod beacon_committee; -pub mod beacon_response; -pub mod beacon_state; -pub mod bls_to_execution_change; -pub mod builder_bid; -pub mod chain_spec; -pub mod checkpoint; -pub mod consolidation_request; -pub mod consts; -pub mod contribution_and_proof; +pub mod block; +pub mod builder; +pub mod consolidation; +pub mod core; +pub mod data; pub mod deposit; -pub mod deposit_data; -pub mod deposit_message; -pub mod deposit_request; -pub mod deposit_tree_snapshot; -pub mod enr_fork_id; -pub mod eth1_data; -pub mod eth_spec; -pub mod execution_block_hash; -pub mod execution_payload; -pub mod execution_payload_header; +pub mod execution; pub mod execution_proof; pub mod execution_proof_id; +pub mod exit; pub mod fork; -pub mod fork_data; -pub mod fork_name; -pub mod graffiti; -pub mod historical_batch; -pub mod historical_summary; -pub mod indexed_attestation; -pub mod light_client_bootstrap; -pub mod light_client_finality_update; -pub mod light_client_optimistic_update; -pub mod light_client_update; -pub mod pending_attestation; -pub mod pending_consolidation; -pub mod pending_deposit; -pub mod pending_partial_withdrawal; -pub mod proposer_preparation_data; -pub mod proposer_slashing; -pub mod relative_epoch; -pub mod selection_proof; -pub mod shuffling_id; -pub mod signed_aggregate_and_proof; -pub mod signed_beacon_block; -pub mod signed_beacon_block_header; -pub mod signed_bls_to_execution_change; -pub mod signed_contribution_and_proof; -pub mod signed_voluntary_exit; -pub mod signing_data; -pub mod sync_committee_subscription; -pub mod sync_duty; -pub mod validator; -pub mod validator_subscription; -pub mod voluntary_exit; -pub mod withdrawal_credentials; -pub mod withdrawal_request; -#[macro_use] -pub mod slot_epoch_macros; -pub mod activation_queue; -pub mod config_and_preset; -pub mod execution_block_header; -pub mod execution_requests; -pub mod fork_context; -pub mod participation_flags; -pub mod payload; -pub mod preset; -pub mod slot_epoch; -pub mod subnet_id; -pub mod sync_aggregate; -pub mod sync_aggregator_selection_data; +pub mod kzg_ext; +pub mod light_client; +pub mod slashing; +pub mod state; pub mod sync_committee; -pub mod sync_committee_contribution; -pub mod sync_committee_message; -pub mod sync_selection_proof; -pub mod sync_subnet_id; -pub mod validator_registration_data; +pub mod validator; pub mod withdrawal; -pub mod epoch_cache; -pub mod slot_data; -#[cfg(feature = "sqlite")] -pub mod sqlite; +// Temporary root level exports to maintain backwards compatibility for Lighthouse. +pub use attestation::*; +pub use block::*; +pub use builder::*; +pub use consolidation::*; +pub use core::{consts, *}; +pub use data::*; +pub use deposit::*; +pub use execution::*; +pub use exit::*; +pub use fork::*; +pub use kzg_ext::*; +pub use light_client::*; +pub use slashing::*; +pub use state::*; +pub use sync_committee::*; +pub use validator::*; +pub use withdrawal::*; -pub mod blob_sidecar; -pub mod data_column_custody_group; -pub mod data_column_sidecar; -pub mod data_column_subnet_id; -pub mod light_client_header; -pub mod non_zero_usize; -pub mod runtime_fixed_vector; -pub mod runtime_var_list; +// Temporary facade modules to maintain backwards compatibility for Lighthouse. +pub mod eth_spec { + pub use crate::core::EthSpec; +} -pub use crate::activation_queue::ActivationQueue; -pub use crate::aggregate_and_proof::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -pub use crate::attestation::{ - Attestation, AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, - Error as AttestationError, SingleAttestation, -}; -pub use crate::attestation_data::AttestationData; -pub use crate::attestation_duty::AttestationDuty; -pub use crate::attester_slashing::{ - AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, - AttesterSlashingRef, AttesterSlashingRefOnDisk, -}; -pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, - BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, -}; -pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, - BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, -}; -pub use crate::beacon_block_header::BeaconBlockHeader; -pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_response::{ - BeaconResponse, ForkVersionDecode, ForkVersionedResponse, UnversionedResponse, -}; -pub use crate::beacon_state::{Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{BlobIdentifier, BlobSidecar, BlobSidecarList, BlobsList}; -pub use crate::bls_to_execution_change::BlsToExecutionChange; -pub use crate::chain_spec::{ChainSpec, Config, Domain}; -pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, - ConfigAndPresetGloas, -}; -pub use crate::consolidation_request::ConsolidationRequest; -pub use crate::contribution_and_proof::ContributionAndProof; -pub use crate::data_column_sidecar::{ - ColumnIndex, DataColumnSidecar, DataColumnSidecarList, DataColumnsByRootIdentifier, -}; -pub use crate::data_column_subnet_id::DataColumnSubnetId; -pub use crate::deposit::{DEPOSIT_TREE_DEPTH, Deposit}; -pub use crate::deposit_data::DepositData; -pub use crate::deposit_message::DepositMessage; -pub use crate::deposit_request::DepositRequest; -pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; -pub use crate::enr_fork_id::EnrForkId; -pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -pub use crate::eth_spec::EthSpecId; -pub use crate::eth1_data::Eth1Data; -pub use crate::execution_block_hash::ExecutionBlockHash; -pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; -pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, - Transaction, Transactions, Withdrawals, -}; -pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, -}; +pub mod chain_spec { + pub use crate::core::ChainSpec; +} + +// Re-export execution_proof types for backwards compatibility pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES}; pub use crate::execution_proof_id::{EXECUTION_PROOF_TYPE_COUNT, ExecutionProofId}; -pub use crate::execution_requests::{ExecutionRequests, RequestType}; -pub use crate::fork::Fork; -pub use crate::fork_context::ForkContext; -pub use crate::fork_data::ForkData; -pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::graffiti::{GRAFFITI_BYTES_LEN, Graffiti}; -pub use crate::historical_batch::HistoricalBatch; -pub use crate::indexed_attestation::{ - IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -pub use crate::light_client_bootstrap::{ - LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, - LightClientBootstrapGloas, -}; -pub use crate::light_client_finality_update::{ - LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, - LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, - LightClientFinalityUpdateFulu, LightClientFinalityUpdateGloas, -}; -pub use crate::light_client_header::{ - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, -}; -pub use crate::light_client_optimistic_update::{ - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, - LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, - LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, - LightClientOptimisticUpdateGloas, -}; -pub use crate::light_client_update::{ - Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, - LightClientUpdateFulu, LightClientUpdateGloas, MerkleProof, -}; -pub use crate::participation_flags::ParticipationFlags; -pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, - BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, - BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadBellatrix, - FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadGloas, - FullPayloadRef, OwnedExecPayload, -}; -pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_consolidation::PendingConsolidation; -pub use crate::pending_deposit::PendingDeposit; -pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; -pub use crate::preset::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, - FuluPreset, GloasPreset, -}; -pub use crate::proposer_preparation_data::ProposerPreparationData; -pub use crate::proposer_slashing::ProposerSlashing; -pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; -pub use crate::runtime_fixed_vector::RuntimeFixedVector; -pub use crate::runtime_var_list::RuntimeVariableList; -pub use crate::selection_proof::SelectionProof; -pub use crate::shuffling_id::AttestationShufflingId; -pub use crate::signed_aggregate_and_proof::{ - SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, -}; -pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, - ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, -}; -pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; -pub use crate::signed_contribution_and_proof::SignedContributionAndProof; -pub use crate::signed_voluntary_exit::SignedVoluntaryExit; -pub use crate::signing_data::{SignedRoot, SigningData}; -pub use crate::slot_epoch::{Epoch, Slot}; -pub use crate::subnet_id::SubnetId; -pub use crate::sync_aggregate::SyncAggregate; -pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; -pub use crate::sync_committee::SyncCommittee; -pub use crate::sync_committee_contribution::{SyncCommitteeContribution, SyncContributionData}; -pub use crate::sync_committee_message::SyncCommitteeMessage; -pub use crate::sync_committee_subscription::SyncCommitteeSubscription; -pub use crate::sync_duty::SyncDuty; -pub use crate::sync_selection_proof::SyncSelectionProof; -pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::Validator; -pub use crate::validator_registration_data::*; -pub use crate::validator_subscription::ValidatorSubscription; -pub use crate::voluntary_exit::VoluntaryExit; -pub use crate::withdrawal::Withdrawal; -pub use crate::withdrawal_credentials::WithdrawalCredentials; -pub use crate::withdrawal_request::WithdrawalRequest; -pub use fixed_bytes::FixedBytesExtended; -pub type CommitteeIndex = u64; -pub type Hash256 = fixed_bytes::Hash256; -pub type Uint256 = fixed_bytes::Uint256; -pub type Address = fixed_bytes::Address; -pub type ForkVersion = [u8; 4]; -pub type BLSFieldElement = Uint256; -pub type Blob = FixedVector::BytesPerBlob>; -// Note on List limit: -// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` -// - Fulu: `MaxCellsPerBlock` -// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to -// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types -// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, -// which we don't current do on `KzgProofs` anyway. -pub type KzgProofs = VariableList::MaxCellsPerBlock>; -pub type VersionedHash = Hash256; -pub type Hash64 = alloy_primitives::B64; +pub mod beacon_block { + pub use crate::block::{BlindedBeaconBlock, BlockImportSource}; +} + +pub mod beacon_block_body { + pub use crate::kzg_ext::{KzgCommitments, format_kzg_commitments}; +} + +pub mod beacon_state { + pub use crate::state::{ + BeaconState, BeaconStateBase, CommitteeCache, compute_committee_index_in_epoch, + compute_committee_range_in_epoch, epoch_committee_count, + }; +} + +pub mod graffiti { + pub use crate::core::GraffitiString; +} + +pub mod indexed_attestation { + pub use crate::attestation::{IndexedAttestationBase, IndexedAttestationElectra}; +} + +pub mod historical_summary { + pub use crate::state::HistoricalSummary; +} + +pub mod participation_flags { + pub use crate::attestation::ParticipationFlags; +} + +pub mod epoch_cache { + pub use crate::state::{EpochCache, EpochCacheError, EpochCacheKey}; +} + +pub mod non_zero_usize { + pub use crate::core::new_non_zero_usize; +} + +pub mod data_column_sidecar { + pub use crate::data::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, + }; +} + +pub mod builder_bid { + pub use crate::builder::*; +} + +pub mod blob_sidecar { + pub use crate::data::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobsList, FixedBlobSidecarList, + }; +} + +pub mod payload { + pub use crate::execution::BlockProductionVersion; +} + +pub mod execution_requests { + pub use crate::execution::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, + }; +} + +pub mod data_column_custody_group { + pub use crate::data::{ + CustodyIndex, compute_columns_for_custody_group, compute_ordered_custody_column_indices, + compute_subnets_for_node, compute_subnets_from_custody_group, get_custody_groups, + }; +} + +pub mod sync_aggregate { + pub use crate::sync_committee::SyncAggregateError as Error; +} + +pub mod light_client_update { + pub use crate::light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, MAX_REQUEST_LIGHT_CLIENT_UPDATES, NEXT_SYNC_COMMITTEE_INDEX, + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }; +} + +pub mod sync_committee_contribution { + pub use crate::sync_committee::{ + SyncCommitteeContributionError as Error, SyncContributionData, + }; +} + +pub mod slot_data { + pub use crate::core::SlotData; +} + +pub mod signed_aggregate_and_proof { + pub use crate::attestation::SignedAggregateAndProofRefMut; +} + +pub mod application_domain { + pub use crate::core::ApplicationDomain; +} -pub use bls::{ - AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, - Signature, SignatureBytes, -}; -pub use context_deserialize::{ContextDeserialize, context_deserialize}; -pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; -pub use milhouse::{self, List, Vector}; -pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; -pub use superstruct::superstruct; +// Temporary re-exports to maintain backwards compatibility for Lighthouse. +pub use crate::kzg_ext::consts::VERSIONED_HASH_VERSION_KZG; +pub use crate::light_client::LightClientError as LightClientUpdateError; +pub use crate::state::BeaconStateError as Error; diff --git a/consensus/types/src/light_client/consts.rs b/consensus/types/src/light_client/consts.rs new file mode 100644 index 00000000000..0092e75e873 --- /dev/null +++ b/consensus/types/src/light_client/consts.rs @@ -0,0 +1,21 @@ +pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; + +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; +pub const EXECUTION_PAYLOAD_INDEX: usize = 25; + +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; diff --git a/consensus/types/src/light_client/error.rs b/consensus/types/src/light_client/error.rs new file mode 100644 index 00000000000..c492cfcbde3 --- /dev/null +++ b/consensus/types/src/light_client/error.rs @@ -0,0 +1,41 @@ +use safe_arith::ArithError; + +use crate::state::BeaconStateError; + +#[derive(Debug, PartialEq, Clone)] +pub enum LightClientError { + SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), + BeaconStateError(BeaconStateError), + ArithError(ArithError), + AltairForkNotActive, + NotEnoughSyncCommitteeParticipants, + MismatchingPeriods, + InvalidFinalizedBlock, + BeaconBlockBodyError, + InconsistentFork, +} + +impl From for LightClientError { + fn from(e: ssz_types::Error) -> LightClientError { + LightClientError::SszTypesError(e) + } +} + +impl From for LightClientError { + fn from(e: BeaconStateError) -> LightClientError { + LightClientError::BeaconStateError(e) + } +} + +impl From for LightClientError { + fn from(e: ArithError) -> LightClientError { + LightClientError::ArithError(e) + } +} + +impl From for LightClientError { + fn from(e: milhouse::Error) -> LightClientError { + LightClientError::MilhouseError(e) + } +} diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client/light_client_bootstrap.rs similarity index 88% rename from consensus/types/src/light_client_bootstrap.rs rename to consensus/types/src/light_client/light_client_bootstrap.rs index 80d5bbacf9e..847b2a2a963 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client/light_client_bootstrap.rs @@ -1,19 +1,30 @@ -use crate::context_deserialize; -use crate::{ - BeaconState, ChainSpec, ContextDeserialize, EthSpec, FixedVector, ForkName, Hash256, - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, - SignedBlindedBeaconBlock, Slot, SyncCommittee, light_client_update::*, test_utils::TestRandom, -}; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, LightClientError, + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + LightClientHeaderGloas, + }, + state::BeaconState, + sync_committee::SyncCommittee, + test_utils::TestRandom, +}; + /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( @@ -142,53 +153,53 @@ impl LightClientBootstrap { current_sync_committee: Arc>, current_sync_committee_branch: Vec, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { header: LightClientHeaderGloas::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), }; @@ -199,56 +210,56 @@ impl LightClientBootstrap { beacon_state: &mut BeaconState, block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { header: LightClientHeaderGloas::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch: current_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, }), }; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client/light_client_finality_update.rs similarity index 89% rename from consensus/types/src/light_client_finality_update.rs rename to consensus/types/src/light_client/light_client_finality_update.rs index e58d7f4d72b..04374edcd96 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client/light_client_finality_update.rs @@ -1,21 +1,27 @@ -use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregate}; -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{ - ContextDeserialize, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, - LightClientHeaderGloas, SignedBlindedBeaconBlock, light_client_update::*, - test_utils::TestRandom, -}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientError, LightClientHeader, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + #[superstruct( variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -103,10 +109,10 @@ impl LightClientFinalityUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let finality_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientFinalityUpdateAltair { @@ -116,7 +122,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }) @@ -128,7 +136,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -139,7 +149,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -150,7 +162,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -161,7 +175,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderFulu::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -172,12 +188,14 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderGloas::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(finality_update) diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client/light_client_header.rs similarity index 91% rename from consensus/types/src/light_client_header.rs rename to consensus/types/src/light_client/light_client_header.rs index 5820efcc91b..a7ecd3b7fb2 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client/light_client_header.rs @@ -1,22 +1,27 @@ -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{BeaconBlockBody, light_client_update::*}; -use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; -use crate::{ContextDeserialize, ForkName}; -use crate::{ - EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, - FixedVector, Hash256, SignedBlindedBeaconBlock, test_utils::TestRandom, -}; +use std::marker::PhantomData; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::{BeaconBlockBody, BeaconBlockHeader, SignedBlindedBeaconBlock}, + core::{ChainSpec, EthSpec, Hash256}, + execution::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, + }, + fork::ForkName, + light_client::{ExecutionPayloadProofLen, LightClientError, consts::EXECUTION_PAYLOAD_INDEX}, + test_utils::TestRandom, +}; + #[superstruct( variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -85,12 +90,12 @@ impl LightClientHeader { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let header = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => LightClientHeader::Altair( LightClientHeaderAltair::block_to_light_client_header(block)?, ), @@ -163,7 +168,7 @@ impl LightClientHeader { impl LightClientHeaderAltair { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { Ok(LightClientHeaderAltair { beacon: block.message().block_header(), _phantom_data: PhantomData, @@ -183,7 +188,7 @@ impl Default for LightClientHeaderAltair { impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -194,7 +199,7 @@ impl LightClientHeaderCapella { block .message() .body_capella() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -225,7 +230,7 @@ impl Default for LightClientHeaderCapella { impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let header = block .message() .execution_payload()? @@ -236,7 +241,7 @@ impl LightClientHeaderDeneb { block .message() .body_deneb() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -267,7 +272,7 @@ impl Default for LightClientHeaderDeneb { impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -278,7 +283,7 @@ impl LightClientHeaderElectra { block .message() .body_electra() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -309,7 +314,7 @@ impl Default for LightClientHeaderElectra { impl LightClientHeaderFulu { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -320,7 +325,7 @@ impl LightClientHeaderFulu { block .message() .body_fulu() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -351,7 +356,7 @@ impl Default for LightClientHeaderFulu { impl LightClientHeaderGloas { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -362,7 +367,7 @@ impl LightClientHeaderGloas { block .message() .body_gloas() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client/light_client_optimistic_update.rs similarity index 94% rename from consensus/types/src/light_client_optimistic_update.rs rename to consensus/types/src/light_client/light_client_optimistic_update.rs index ca9957331f8..9266ce647a4 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client/light_client_optimistic_update.rs @@ -1,21 +1,26 @@ -use super::{ContextDeserialize, EthSpec, ForkName, LightClientHeader, Slot, SyncAggregate}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, - SignedBlindedBeaconBlock, light_client_update::*, -}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; -use ssz_derive::Decode; -use ssz_derive::Encode; +use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::Hash256; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + LightClientHeaderGloas, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( @@ -79,10 +84,10 @@ impl LightClientOptimisticUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let optimistic_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientOptimisticUpdateAltair { @@ -128,7 +133,7 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(optimistic_update) diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client/light_client_update.rs similarity index 86% rename from consensus/types/src/light_client_update.rs rename to consensus/types/src/light_client/light_client_update.rs index ede9436c50d..aa7b800cc89 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client/light_client_update.rs @@ -1,12 +1,6 @@ -use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::LightClientHeader; -use crate::context_deserialize; -use crate::light_client_header::LightClientHeaderElectra; -use crate::{ - ChainSpec, ContextDeserialize, Epoch, ForkName, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, - LightClientHeaderGloas, SignedBlindedBeaconBlock, beacon_state, test_utils::TestRandom, -}; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use safe_arith::ArithError; use safe_arith::SafeArith; @@ -14,20 +8,24 @@ use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6, U7}; -use std::sync::Arc; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use typenum::{U4, U5, U6, U7}; -pub const FINALIZED_ROOT_INDEX: usize = 105; -pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; -pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; -pub const EXECUTION_PAYLOAD_INDEX: usize = 25; - -pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; -pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; -pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + LightClientHeaderGloas, + }, + sync_committee::{SyncAggregate, SyncCommittee}, + test_utils::TestRandom, +}; pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; @@ -38,64 +36,12 @@ pub type FinalizedRootProofLenElectra = U7; pub type CurrentSyncCommitteeProofLenElectra = U6; pub type NextSyncCommitteeProofLenElectra = U6; -pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; - -pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; - -pub type MerkleProof = Vec; -// Max light client updates by range request limits -// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration -pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; - type FinalityBranch = FixedVector; type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; type NextSyncCommitteeBranchElectra = FixedVector; -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - SszTypesError(ssz_types::Error), - MilhouseError(milhouse::Error), - BeaconStateError(beacon_state::Error), - ArithError(ArithError), - AltairForkNotActive, - NotEnoughSyncCommitteeParticipants, - MismatchingPeriods, - InvalidFinalizedBlock, - BeaconBlockBodyError, - InconsistentFork, -} - -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) - } -} - -impl From for Error { - fn from(e: beacon_state::Error) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) - } -} - -impl From for Error { - fn from(e: milhouse::Error) -> Error { - Error::MilhouseError(e) - } -} - /// A LightClientUpdate is the update we request solely to either complete the bootstrapping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. @@ -238,12 +184,12 @@ impl LightClientUpdate { attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), fork_name @ ForkName::Altair | fork_name @ ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; @@ -263,9 +209,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -289,9 +237,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -315,9 +265,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -341,9 +293,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -367,9 +321,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -393,9 +349,11 @@ impl LightClientUpdate { next_sync_committee, next_sync_committee_branch: next_sync_committee_branch .try_into() - .map_err(Error::SszTypesError)?, + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.try_into().map_err(Error::SszTypesError)?, + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -452,23 +410,32 @@ impl LightClientUpdate { fn attested_header_sync_committee_period( &self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + fn signature_slot_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + pub fn is_sync_committee_update( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok(!self.is_next_sync_committee_branch_empty() && (self.attested_header_sync_committee_period(chain_spec)? == self.signature_slot_sync_committee_period(chain_spec)?)) } - pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + pub fn has_sync_committee_finality( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok( compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? == self.attested_header_sync_committee_period(chain_spec)?, @@ -482,7 +449,7 @@ impl LightClientUpdate { &self, new: &Self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { // Compare super majority (> 2/3) sync committee participation let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); @@ -606,7 +573,8 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use ssz_types::typenum::Unsigned; + use crate::light_client::consts::*; + use typenum::Unsigned; // `ssz_tests!` can only be defined once per namespace #[cfg(test)] diff --git a/consensus/types/src/light_client/mod.rs b/consensus/types/src/light_client/mod.rs new file mode 100644 index 00000000000..4e287c22942 --- /dev/null +++ b/consensus/types/src/light_client/mod.rs @@ -0,0 +1,37 @@ +mod error; +mod light_client_bootstrap; +mod light_client_finality_update; +mod light_client_header; +mod light_client_optimistic_update; +mod light_client_update; + +pub mod consts; + +pub use error::LightClientError; +pub use light_client_bootstrap::{ + LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, + LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, + LightClientBootstrapGloas, +}; +pub use light_client_finality_update::{ + LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, + LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, + LightClientFinalityUpdateFulu, LightClientFinalityUpdateGloas, +}; +pub use light_client_header::{ + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, +}; +pub use light_client_optimistic_update::{ + LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, + LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, + LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, + LightClientOptimisticUpdateGloas, +}; +pub use light_client_update::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, ExecutionPayloadProofLen, + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientUpdate, + LightClientUpdateAltair, LightClientUpdateCapella, LightClientUpdateDeneb, + LightClientUpdateElectra, LightClientUpdateFulu, LightClientUpdateGloas, + NextSyncCommitteeProofLen, NextSyncCommitteeProofLenElectra, +}; diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs deleted file mode 100644 index f562322a3df..00000000000 --- a/consensus/types/src/runtime_fixed_vector.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Emulates a fixed size array but with the length set at runtime. -//! -//! The length of the list cannot be changed once it is set. - -use std::fmt; -use std::fmt::Debug; - -#[derive(Clone)] -pub struct RuntimeFixedVector { - vec: Vec, - len: usize, -} - -impl Debug for RuntimeFixedVector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (len={})", self.vec, self.len) - } -} - -impl RuntimeFixedVector { - pub fn new(vec: Vec) -> Self { - let len = vec.len(); - Self { vec, len } - } - - pub fn to_vec(&self) -> Vec { - self.vec.clone() - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.len - } - - pub fn into_vec(self) -> Vec { - self.vec - } - - pub fn default(max_len: usize) -> Self { - Self { - vec: vec![T::default(); max_len], - len: max_len, - } - } - - pub fn take(&mut self) -> Self { - let new = std::mem::take(&mut self.vec); - *self = Self::new(vec![T::default(); self.len]); - Self { - vec: new, - len: self.len, - } - } -} - -impl std::ops::Deref for RuntimeFixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl std::ops::DerefMut for RuntimeFixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl IntoIterator for RuntimeFixedVector { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl<'a, T> IntoIterator for &'a RuntimeFixedVector { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.vec.iter() - } -} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs deleted file mode 100644 index e7b846029ef..00000000000 --- a/consensus/types/src/runtime_var_list.rs +++ /dev/null @@ -1,387 +0,0 @@ -use crate::ContextDeserialize; -use educe::Educe; -use serde::de::Error as DeError; -use serde::{Deserialize, Deserializer, Serialize}; -use ssz::Decode; -use ssz_types::Error; -use std::fmt; -use std::fmt::Debug; -use std::ops::{Deref, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, TreeHashType}; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `max_len` values. -/// -/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. -/// -/// ## Example -/// -/// ``` -/// use types::{RuntimeVariableList}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `RuntimeVariableList` from a `Vec` that has the expected length. -/// let exact: RuntimeVariableList<_> = RuntimeVariableList::new(base.clone(), 4).unwrap(); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is too long you'll get an error. -/// let err = RuntimeVariableList::new(base.clone(), 3).unwrap_err(); -/// assert_eq!(err, ssz_types::Error::OutOfBounds { i: 4, len: 3 }); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: RuntimeVariableList<_> = RuntimeVariableList::new(base, 5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// -/// ``` -#[derive(Clone, Serialize, Deserialize, Educe)] -#[educe(PartialEq, Eq, Hash(bound(T: std::hash::Hash)))] -#[serde(transparent)] -pub struct RuntimeVariableList { - vec: Vec, - #[serde(skip)] - max_len: usize, -} - -impl Debug for RuntimeVariableList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (max_len={})", self.vec, self.max_len) - } -} - -impl RuntimeVariableList { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err(OutOfBounds { .. })`. - pub fn new(vec: Vec, max_len: usize) -> Result { - if vec.len() <= max_len { - Ok(Self { vec, max_len }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: max_len, - }) - } - } - - /// Create an empty list with the given `max_len`. - pub fn empty(max_len: usize) -> Self { - Self { - vec: vec![], - max_len, - } - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - pub fn as_mut_slice(&mut self) -> &mut [T] { - self.vec.as_mut_slice() - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - /// - /// Returns `None` if self is uninitialized with a max_len. - pub fn max_len(&self) -> usize { - self.max_len - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < self.max_len { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len().saturating_add(1), - len: self.max_len, - }) - } - } -} - -impl RuntimeVariableList { - pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { - let vec = if bytes.is_empty() { - vec![] - } else if ::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "RuntimeVariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes.chunks(::ssz_fixed_len()).try_fold( - Vec::with_capacity(num_items), - |mut vec, chunk| { - vec.push(::from_ssz_bytes(chunk)?); - Ok(vec) - }, - )? - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? - }; - Ok(Self { vec, max_len }) - } -} - -impl From> for Vec { - fn from(list: RuntimeVariableList) -> Vec { - list.vec - } -} - -impl> Index for RuntimeVariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for RuntimeVariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for RuntimeVariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl<'a, T> IntoIterator for &'a RuntimeVariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for RuntimeVariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl ssz::Encode for RuntimeVariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } -} - -impl<'de, C, T> ContextDeserialize<'de, (C, usize)> for RuntimeVariableList -where - T: ContextDeserialize<'de, C>, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: (C, usize)) -> Result - where - D: Deserializer<'de>, - { - // first parse out a Vec using the Vec impl you already have - let vec: Vec = Vec::context_deserialize(deserializer, context.0)?; - let vec_len = vec.len(); - RuntimeVariableList::new(vec, context.1).map_err(|e| { - DeError::custom(format!( - "RuntimeVariableList length {} exceeds max_len {}: {e:?}", - vec_len, context.1, - )) - }) - } -} - -impl TreeHash for RuntimeVariableList { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = runtime_vec_tree_hash_root::(&self.vec, self.max_len); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -// We can delete this once the upstream `vec_tree_hash_root` is modified to use a runtime max len. -pub fn runtime_vec_tree_hash_root(vec: &[T], max_len: usize) -> Hash256 -where - T: TreeHash, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = - MerkleHasher::with_leaves(max_len.div_ceil(T::tree_hash_packing_factor())); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(max_len); - - for item in vec { - hasher - .write(item.tree_hash_root().as_slice()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use std::fmt::Debug; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: RuntimeVariableList = - RuntimeVariableList::new(vec.clone(), 8192).unwrap(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!(fixed[..].len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - // Too long. - let vec = vec![42; 5]; - let err = RuntimeVariableList::::new(vec.clone(), 4).unwrap_err(); - assert_eq!(err, Error::OutOfBounds { i: 5, len: 4 }); - - let vec = vec![42; 3]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec.clone(), 4).unwrap(); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: RuntimeVariableList = RuntimeVariableList::new(vec![0; 2], 2).unwrap(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: RuntimeVariableList) { - let max_len = item.max_len(); - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!( - RuntimeVariableList::from_ssz_bytes(encoded, max_len), - Ok(item) - ); - } - - #[test] - fn u16_len_8() { - round_trip::(RuntimeVariableList::new(vec![42; 8], 8).unwrap()); - round_trip::(RuntimeVariableList::new(vec![0; 8], 8).unwrap()); - } -} diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/slashing/attester_slashing.rs similarity index 96% rename from consensus/types/src/attester_slashing.rs rename to consensus/types/src/slashing/attester_slashing.rs index 2bfb65653c6..5c214b35f74 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/slashing/attester_slashing.rs @@ -1,9 +1,4 @@ -use crate::context_deserialize; -use crate::indexed_attestation::{ - IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -use crate::{ContextDeserialize, ForkName}; -use crate::{EthSpec, test_utils::TestRandom}; +use context_deserialize::{ContextDeserialize, context_deserialize}; use educe::Educe; use rand::{Rng, RngCore}; use serde::{Deserialize, Deserializer, Serialize}; @@ -12,6 +7,13 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef}, + core::EthSpec, + fork::ForkName, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/slashing/mod.rs b/consensus/types/src/slashing/mod.rs new file mode 100644 index 00000000000..551b8e31377 --- /dev/null +++ b/consensus/types/src/slashing/mod.rs @@ -0,0 +1,8 @@ +mod attester_slashing; +mod proposer_slashing; + +pub use attester_slashing::{ + AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, + AttesterSlashingRef, AttesterSlashingRefOnDisk, +}; +pub use proposer_slashing::ProposerSlashing; diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/slashing/proposer_slashing.rs similarity index 86% rename from consensus/types/src/proposer_slashing.rs rename to consensus/types/src/slashing/proposer_slashing.rs index f4d914c1e59..697bd1a9aa5 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/slashing/proposer_slashing.rs @@ -1,12 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedBeaconBlockHeader}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{block::SignedBeaconBlockHeader, fork::ForkName, test_utils::TestRandom}; + /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/state/activation_queue.rs similarity index 95% rename from consensus/types/src/activation_queue.rs rename to consensus/types/src/state/activation_queue.rs index dd3ce5f88cb..0d920a20cf0 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/state/activation_queue.rs @@ -1,6 +1,10 @@ -use crate::{ChainSpec, Epoch, Validator}; use std::collections::BTreeSet; +use crate::{ + core::{ChainSpec, Epoch}, + validator::Validator, +}; + /// Activation queue computed during epoch processing for use in the *next* epoch. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Eq, Default, Clone)] diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/state/balance.rs similarity index 100% rename from consensus/types/src/beacon_state/balance.rs rename to consensus/types/src/state/balance.rs diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/state/beacon_state.rs similarity index 88% rename from consensus/types/src/beacon_state.rs rename to consensus/types/src/state/beacon_state.rs index d13e2235574..f36c02ce6bd 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -1,49 +1,56 @@ -use self::committee_cache::get_active_validator_indices; -use crate::ContextDeserialize; -use crate::FixedBytesExtended; -use crate::historical_summary::HistoricalSummary; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, hash::Hash, mem, sync::Arc}; + +use bls::{AggregatePublicKey, PublicKeyBytes, Signature}; use compare_fields::CompareFields; +use context_deserialize::ContextDeserialize; use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{NumFields, metastruct}; -pub use pubkey_cache::PubkeyCache; +use milhouse::{List, Vector}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; -use std::{fmt, mem, sync::Arc}; +use ssz_types::{BitVector, FixedVector}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; +use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use typenum::Unsigned; -pub use self::committee_cache::{ - CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, - epoch_committee_count, +use crate::{ + attestation::{ + AttestationDuty, BeaconCommittee, Checkpoint, CommitteeIndex, ParticipationFlags, + PendingAttestation, + }, + block::{BeaconBlock, BeaconBlockHeader, SignedBeaconBlockHash}, + consolidation::PendingConsolidation, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, RelativeEpoch, RelativeEpochError, Slot}, + deposit::PendingDeposit, + execution::{ + Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, NEXT_SYNC_COMMITTEE_INDEX, NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }, + state::{ + BlockRootsIter, CommitteeCache, EpochCache, EpochCacheError, ExitCache, HistoricalBatch, + HistoricalSummary, ProgressiveBalancesCache, PubkeyCache, SlashingsCache, + get_active_validator_indices, + }, + sync_committee::{SyncCommittee, SyncDuty}, + test_utils::TestRandom, + validator::Validator, + withdrawal::PendingPartialWithdrawal, }; -pub use crate::beacon_state::balance::Balance; -pub use crate::beacon_state::exit_cache::ExitCache; -pub use crate::beacon_state::progressive_balances_cache::*; -pub use crate::beacon_state::slashings_cache::SlashingsCache; -pub use eth_spec::*; -pub use iter::BlockRootsIter; -pub use milhouse::{List, Vector, interface::Interface}; -use tracing::instrument; - -#[macro_use] -mod committee_cache; -mod balance; -mod exit_cache; -mod iter; -mod progressive_balances_cache; -mod pubkey_cache; -mod slashings_cache; -mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; @@ -53,7 +60,7 @@ pub type Validators = List::ValidatorRegistryLimit> pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] -pub enum Error { +pub enum BeaconStateError { /// A state for a different hard-fork was required -- a severe logic error. IncorrectStateVariant, EpochOutOfBounds, @@ -197,7 +204,7 @@ enum AllowNextEpoch { } impl AllowNextEpoch { - fn upper_bound_of(self, current_epoch: Epoch) -> Result { + fn upper_bound_of(self, current_epoch: Epoch) -> Result { match self { AllowNextEpoch::True => Ok(current_epoch.safe_add(1)?), AllowNextEpoch::False => Ok(current_epoch), @@ -378,8 +385,14 @@ impl From for Hash256 { num_fields(all()), )) ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_mut_into(BeaconStateRef) )] #[cfg_attr( @@ -740,11 +753,11 @@ impl BeaconState { } /// Returns the `tree_hash_root` of the state. - pub fn canonical_root(&mut self) -> Result { + pub fn canonical_root(&mut self) -> Result { self.update_tree_hash_cache() } - pub fn historical_batch(&mut self) -> Result, Error> { + pub fn historical_batch(&mut self) -> Result, BeaconStateError> { // Updating before cloning makes the clone cheap and saves repeated hashing. self.block_roots_mut().apply_updates()?; self.state_roots_mut().apply_updates()?; @@ -758,7 +771,10 @@ impl BeaconState { /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator /// exists in the registry. If a validator pubkey exists in the validator registry, returns `Some(i)`, /// otherwise returns `None`. - pub fn get_validator_index(&mut self, pubkey: &PublicKeyBytes) -> Result, Error> { + pub fn get_validator_index( + &mut self, + pubkey: &PublicKeyBytes, + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; Ok(self.pubkey_cache().get(pubkey)) } @@ -783,7 +799,7 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// /// Spec v0.12.1 - pub fn next_epoch(&self) -> Result { + pub fn next_epoch(&self) -> Result { Ok(self.current_epoch().safe_add(1)?) } @@ -792,7 +808,7 @@ impl BeaconState { /// Makes use of the committee cache and will fail if no cache exists for the slot's epoch. /// /// Spec v0.12.1 - pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { + pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { let cache = self.committee_cache_at_slot(slot)?; Ok(cache.committees_per_slot()) } @@ -800,7 +816,10 @@ impl BeaconState { /// Compute the number of committees in an entire epoch. /// /// Spec v0.12.1 - pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { + pub fn get_epoch_committee_count( + &self, + relative_epoch: RelativeEpoch, + ) -> Result { let cache = self.committee_cache(relative_epoch)?; Ok(cache.epoch_committee_count() as u64) } @@ -813,7 +832,7 @@ impl BeaconState { pub fn get_cached_active_validator_indices( &self, relative_epoch: RelativeEpoch, - ) -> Result<&[usize], Error> { + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.active_validator_indices()) @@ -826,7 +845,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec)? { Err(BeaconStateError::EpochOutOfBounds) } else { @@ -839,7 +858,10 @@ impl BeaconState { /// Note: the indices are shuffled (i.e., not in ascending order). /// /// Returns an error if that epoch is not cached, or the cache is not initialized. - pub fn get_shuffling(&self, relative_epoch: RelativeEpoch) -> Result<&[usize], Error> { + pub fn get_shuffling( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.shuffling()) @@ -854,14 +876,14 @@ impl BeaconState { &self, slot: Slot, index: CommitteeIndex, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; let cache = self.committee_cache(relative_epoch)?; cache .get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) } /// Get all of the Beacon committees at a given slot. @@ -872,7 +894,7 @@ impl BeaconState { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache_at_slot(slot)?; cache.get_beacon_committees_at_slot(slot) } @@ -885,7 +907,7 @@ impl BeaconState { pub fn get_beacon_committees_at_epoch( &self, relative_epoch: RelativeEpoch, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; cache.get_all_beacon_committees() } @@ -901,7 +923,7 @@ impl BeaconState { epoch: Epoch, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { let decision_slot = spec.proposer_shuffling_decision_slot::(epoch); if self.slot() <= decision_slot { Ok(block_root) @@ -917,7 +939,7 @@ impl BeaconState { &self, epoch: Epoch, head_block_root: Hash256, - ) -> Result { + ) -> Result { let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); if self.slot() <= decision_slot { Ok(head_block_root) @@ -937,11 +959,14 @@ impl BeaconState { &self, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { self.proposer_shuffling_decision_root_at_epoch(self.current_epoch(), block_root, spec) } - pub fn epoch_cache_decision_root(&self, block_root: Hash256) -> Result { + pub fn epoch_cache_decision_root( + &self, + block_root: Hash256, + ) -> Result { // Epoch cache decision root for the current epoch (N) is the block root at the end of epoch // N - 1. This is the same as the root that determines the next epoch attester shuffling. self.attester_shuffling_decision_root(block_root, RelativeEpoch::Next) @@ -958,7 +983,7 @@ impl BeaconState { &self, block_root: Hash256, relative_epoch: RelativeEpoch, - ) -> Result { + ) -> Result { let decision_slot = self.attester_shuffling_decision_slot(relative_epoch); if self.slot() == decision_slot { Ok(block_root) @@ -985,9 +1010,9 @@ impl BeaconState { indices: &[usize], seed: &[u8], spec: &ChainSpec, - ) -> Result { + ) -> Result { if indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); @@ -1005,10 +1030,10 @@ impl BeaconState { seed, spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; if effective_balance.safe_mul(max_random_value)? @@ -1027,11 +1052,11 @@ impl BeaconState { seed: &[u8], indices: &[usize], spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Regardless of fork, we never support computing proposer indices for past epochs. let current_epoch = self.current_epoch(); if epoch < current_epoch { - return Err(Error::ComputeProposerIndicesPastEpoch { + return Err(BeaconStateError::ComputeProposerIndicesPastEpoch { current_epoch, request_epoch: epoch, }); @@ -1050,17 +1075,19 @@ impl BeaconState { if self.fork_name_unchecked().fulu_enabled() && epoch < current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesInsufficientLookahead { - current_epoch, - request_epoch: epoch, - }); + return Err( + BeaconStateError::ComputeProposerIndicesInsufficientLookahead { + current_epoch, + request_epoch: epoch, + }, + ); } } else { // Pre-Fulu the situation is reversed, we *should not* compute proposer indices using // too much lookahead. To do so would make us vulnerable to changes in the proposer // indices caused by effective balance changes. if epoch >= current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesExcessiveLookahead { + return Err(BeaconStateError::ComputeProposerIndicesExcessiveLookahead { current_epoch, request_epoch: epoch, }); @@ -1083,7 +1110,7 @@ impl BeaconState { /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. /// /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. - fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { if self.fork_name_unchecked().electra_enabled() { Self::shuffling_random_u16_electra(i, seed).map(u64::from) } else { @@ -1094,37 +1121,39 @@ impl BeaconState { /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. - fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); let index = i.safe_rem(32)?; hash(&preimage) .get(index) .copied() - .ok_or(Error::ShuffleIndexOutOfBounds(index)) + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(index)) } /// Get two random bytes from the given `seed`. /// /// This is used in place of `shuffling_random_byte` from Electra onwards. - fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); let offset = i.safe_rem(16)?.safe_mul(2)?; hash(&preimage) .get(offset..offset.safe_add(2)?) - .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(offset))? .try_into() .map(u16::from_le_bytes) - .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + .map_err(|_| BeaconStateError::ShuffleIndexOutOfBounds(offset)) } /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRef::Bellatrix( &state.latest_execution_payload_header, )), @@ -1148,9 +1177,11 @@ impl BeaconState { pub fn latest_execution_payload_header_mut( &mut self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRefMut::Bellatrix( &mut state.latest_execution_payload_header, )), @@ -1181,7 +1212,7 @@ impl BeaconState { index: CommitteeIndex, slot_signature: &Signature, spec: &ChainSpec, - ) -> Result { + ) -> Result { let committee = self.get_beacon_committee(slot, index)?; let modulo = std::cmp::max( 1, @@ -1192,7 +1223,7 @@ impl BeaconState { signature_hash .get(0..8) .and_then(|bytes| bytes.try_into().ok()) - .ok_or(Error::IsAggregatorOutOfBounds)?, + .ok_or(BeaconStateError::IsAggregatorOutOfBounds)?, ); Ok(signature_hash_int.safe_rem(modulo)? == 0) @@ -1201,13 +1232,17 @@ impl BeaconState { /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v1.6.0-alpha.1 - pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result { // Proposer indices are only known for the current epoch, due to the dependence on the // effective balances of validators, which change at every epoch transition. let epoch = slot.epoch(E::slots_per_epoch()); // TODO(EIP-7917): Explore allowing this function to be called with a slot one epoch in the future. if epoch != self.current_epoch() { - return Err(Error::SlotOutOfBounds); + return Err(BeaconStateError::SlotOutOfBounds); } if let Ok(proposer_lookahead) = self.proposer_lookahead() { @@ -1215,7 +1250,7 @@ impl BeaconState { let index = slot.as_usize().safe_rem(E::slots_per_epoch() as usize)?; proposer_lookahead .get(index) - .ok_or(Error::ProposerLookaheadOutOfBounds { i: index }) + .ok_or(BeaconStateError::ProposerLookaheadOutOfBounds { i: index }) .map(|index| *index as usize) } else { // Pre-Fulu @@ -1233,7 +1268,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // This isn't in the spec, but we remove the footgun that is requesting the current epoch // for a Fulu state. if let Ok(proposer_lookahead) = self.proposer_lookahead() @@ -1263,7 +1298,11 @@ impl BeaconState { /// Compute the seed to use for the beacon proposer selection at the given `slot`. /// /// Spec v0.12.1 - pub fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { + pub fn get_beacon_proposer_seed( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let mut preimage = self .get_seed(epoch, Domain::BeaconProposer, spec)? @@ -1278,7 +1317,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result<&Arc>, Error> { + ) -> Result<&Arc>, BeaconStateError> { let sync_committee_period = epoch.sync_committee_period(spec)?; let current_sync_committee_period = self.current_epoch().sync_committee_period(spec)?; let next_sync_committee_period = current_sync_committee_period.safe_add(1)?; @@ -1288,7 +1327,7 @@ impl BeaconState { } else if sync_committee_period == next_sync_committee_period { self.next_sync_committee() } else { - Err(Error::SyncCommitteeNotKnown { + Err(BeaconStateError::SyncCommitteeNotKnown { current_epoch: self.current_epoch(), epoch, }) @@ -1299,7 +1338,7 @@ impl BeaconState { pub fn get_sync_committee_indices( &mut self, sync_committee: &SyncCommittee, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; sync_committee .pubkeys @@ -1307,13 +1346,16 @@ impl BeaconState { .map(|pubkey| { self.pubkey_cache() .get(pubkey) - .ok_or(Error::PubkeyCacheInconsistent) + .ok_or(BeaconStateError::PubkeyCacheInconsistent) }) .collect() } /// Compute the sync committee indices for the next sync committee. - fn get_next_sync_committee_indices(&self, spec: &ChainSpec) -> Result, Error> { + fn get_next_sync_committee_indices( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = self.current_epoch().safe_add(1)?; let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; @@ -1336,10 +1378,10 @@ impl BeaconState { seed.as_slice(), spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *active_validator_indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; if effective_balance.safe_mul(max_random_value)? @@ -1353,7 +1395,10 @@ impl BeaconState { } /// Compute the next sync committee. - pub fn get_next_sync_committee(&self, spec: &ChainSpec) -> Result, Error> { + pub fn get_next_sync_committee( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let sync_committee_indices = self.get_next_sync_committee_indices(spec)?; let pubkeys = sync_committee_indices @@ -1362,7 +1407,7 @@ impl BeaconState { self.validators() .get(index) .map(|v| v.pubkey) - .ok_or(Error::UnknownValidator(index)) + .ok_or(BeaconStateError::UnknownValidator(index)) }) .collect::, _>>()?; let decompressed_pubkeys = pubkeys @@ -1386,7 +1431,7 @@ impl BeaconState { epoch: Epoch, validator_indices: &[u64], spec: &ChainSpec, - ) -> Result, Error>>, Error> { + ) -> Result, BeaconStateError>>, BeaconStateError> { let sync_committee = self.get_built_sync_committee(epoch, spec)?; Ok(validator_indices @@ -1421,7 +1466,7 @@ impl BeaconState { /// Safely obtains the index for latest block roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_block_roots_index(&self, slot: Slot) -> Result { + fn get_latest_block_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.block_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.block_roots().len())?) } else { @@ -1441,7 +1486,7 @@ impl BeaconState { let i = self.get_latest_block_roots_index(slot)?; self.block_roots() .get(i) - .ok_or(Error::BlockRootsOutOfBounds(i)) + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i)) } /// Return the block root at a recent `epoch`. @@ -1461,12 +1506,12 @@ impl BeaconState { *self .block_roots_mut() .get_mut(i) - .ok_or(Error::BlockRootsOutOfBounds(i))? = block_root; + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i))? = block_root; Ok(()) } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), BeaconStateError> { *self.randao_mixes_mut() = Vector::from_elem(index_root)?; Ok(()) } @@ -1478,7 +1523,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { let current_epoch = self.current_epoch(); let len = E::EpochsPerHistoricalVector::to_u64(); @@ -1487,7 +1532,7 @@ impl BeaconState { { Ok(epoch.as_usize().safe_rem(len as usize)?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1503,7 +1548,11 @@ impl BeaconState { /// # Errors: /// /// See `Self::get_randao_mix`. - pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { + pub fn update_randao_mix( + &mut self, + epoch: Epoch, + signature: &Signature, + ) -> Result<(), BeaconStateError> { let i = epoch .as_usize() .safe_rem(E::EpochsPerHistoricalVector::to_usize())?; @@ -1513,36 +1562,36 @@ impl BeaconState { *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = *self.get_randao_mix(epoch)? ^ signature_hash; Ok(()) } /// Return the randao mix at a recent ``epoch``. - pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { + pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; self.randao_mixes() .get(i) - .ok_or(Error::RandaoMixesOutOfBounds(i)) + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i)) } /// Set the randao mix at a recent ``epoch``. /// /// Spec v0.12.1 - pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { + pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = mix; + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = mix; Ok(()) } /// Safely obtains the index for latest state roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_state_roots_index(&self, slot: Slot) -> Result { + fn get_latest_state_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.state_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.state_roots().len())?) } else { @@ -1551,38 +1600,42 @@ impl BeaconState { } /// Gets the state root for some slot. - pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { + pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; self.state_roots() .get(i) - .ok_or(Error::StateRootsOutOfBounds(i)) + .ok_or(BeaconStateError::StateRootsOutOfBounds(i)) } /// Gets the state root for the start slot of some epoch. - pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { + pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { self.get_state_root(epoch.start_slot(E::slots_per_epoch())) .copied() } /// Gets the oldest (earliest slot) state root. - pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_state_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.state_roots().len()); self.get_state_root(oldest_slot) } /// Gets the oldest (earliest slot) block root. - pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_block_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.block_roots().len()); self.get_block_root(oldest_slot) } /// Sets the latest state root for slot. - pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { + pub fn set_state_root( + &mut self, + slot: Slot, + state_root: Hash256, + ) -> Result<(), BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; *self .state_roots_mut() .get_mut(i) - .ok_or(Error::StateRootsOutOfBounds(i))? = state_root; + .ok_or(BeaconStateError::StateRootsOutOfBounds(i))? = state_root; Ok(()) } @@ -1591,7 +1644,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { // We allow the slashings vector to be accessed at any cached epoch at or before // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. let current_epoch = self.current_epoch(); @@ -1602,7 +1655,7 @@ impl BeaconState { .as_usize() .safe_rem(E::EpochsPerSlashingsVector::to_usize())?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1612,21 +1665,21 @@ impl BeaconState { } /// Get the total slashed balances for some epoch. - pub fn get_slashings(&self, epoch: Epoch) -> Result { + pub fn get_slashings(&self, epoch: Epoch) -> Result { let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; self.slashings() .get(i) .copied() - .ok_or(Error::SlashingsOutOfBounds(i)) + .ok_or(BeaconStateError::SlashingsOutOfBounds(i)) } /// Set the total slashed balances for some epoch. - pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { + pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), BeaconStateError> { let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; *self .slashings_mut() .get_mut(i) - .ok_or(Error::SlashingsOutOfBounds(i))? = value; + .ok_or(BeaconStateError::SlashingsOutOfBounds(i))? = value; Ok(()) } @@ -1666,10 +1719,10 @@ impl BeaconState { &mut ExitCache, &mut EpochCache, ), - Error, + BeaconStateError, > { match self { - BeaconState::Base(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(( &mut state.validators, &mut state.balances, @@ -1744,18 +1797,21 @@ impl BeaconState { } /// Get the balance of a single validator. - pub fn get_balance(&self, validator_index: usize) -> Result { + pub fn get_balance(&self, validator_index: usize) -> Result { self.balances() .get(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) .copied() } /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_balance_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.balances_mut() .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) } /// Generate a seed for the given `epoch`. @@ -1764,7 +1820,7 @@ impl BeaconState { epoch: Epoch, domain_type: Domain, spec: &ChainSpec, - ) -> Result { + ) -> Result { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. let mix = { @@ -1775,7 +1831,7 @@ impl BeaconState { let i_mod = i.as_usize().safe_rem(self.randao_mixes().len())?; self.randao_mixes() .get(i_mod) - .ok_or(Error::RandaoMixesOutOfBounds(i_mod))? + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i_mod))? }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); @@ -1794,17 +1850,20 @@ impl BeaconState { } /// Safe indexer for the `validators` list. - pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, Error> { + pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, BeaconStateError> { self.validators() .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Safe mutator for the `validators` list. - pub fn get_validator_mut(&mut self, validator_index: usize) -> Result<&mut Validator, Error> { + pub fn get_validator_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut Validator, BeaconStateError> { self.validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Add a validator to the registry and return the validator index that was allocated for it. @@ -1814,7 +1873,7 @@ impl BeaconState { withdrawal_credentials: Hash256, amount: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let index = self.validators().len(); let fork_name = self.fork_name_unchecked(); self.validators_mut().push(Validator::from_deposit( @@ -1846,7 +1905,7 @@ impl BeaconState { if pubkey_cache.len() == index { let success = pubkey_cache.insert(pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } @@ -1857,14 +1916,14 @@ impl BeaconState { pub fn get_validator_cow( &mut self, validator_index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.validators_mut() .get_cow(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Return the effective balance for a validator with the given `validator_index`. - pub fn get_effective_balance(&self, validator_index: usize) -> Result { + pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) .map(|v| v.effective_balance) } @@ -1872,20 +1931,27 @@ impl BeaconState { /// Get the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score(&self, validator_index: usize) -> Result { + pub fn get_inactivity_score(&self, validator_index: usize) -> Result { self.inactivity_scores()? .get(validator_index) .copied() - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Get a mutable reference to the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_inactivity_score_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.inactivity_scores_mut()? .get_mut(validator_index) - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -1895,14 +1961,14 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result { Ok(spec.compute_activation_exit_epoch(epoch)?) } /// Return the churn limit for the current epoch (number of validators who can leave per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, (self @@ -1915,7 +1981,7 @@ impl BeaconState { /// Return the activation churn limit for the current epoch (number of validators who can enter per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(match self { BeaconState::Base(_) | BeaconState::Altair(_) @@ -1941,7 +2007,7 @@ impl BeaconState { &self, validator_index: usize, relative_epoch: RelativeEpoch, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.get_attestation_duties(validator_index)) @@ -1951,7 +2017,10 @@ impl BeaconState { /// /// This method should rarely be invoked because single-pass epoch processing keeps the total /// active balance cache up to date. - pub fn compute_total_active_balance_slow(&self, spec: &ChainSpec) -> Result { + pub fn compute_total_active_balance_slow( + &self, + spec: &ChainSpec, + ) -> Result { let current_epoch = self.current_epoch(); let mut total_active_balance = 0; @@ -1973,20 +2042,20 @@ impl BeaconState { /// the current committee cache is. /// /// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0. - pub fn get_total_active_balance(&self) -> Result { + pub fn get_total_active_balance(&self) -> Result { self.get_total_active_balance_at_epoch(self.current_epoch()) } /// Get the cached total active balance while checking that it is for the correct `epoch`. - pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { + pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { let (initialized_epoch, balance) = self .total_active_balance() - .ok_or(Error::TotalActiveBalanceCacheUninitialized)?; + .ok_or(BeaconStateError::TotalActiveBalanceCacheUninitialized)?; if initialized_epoch == epoch { Ok(balance) } else { - Err(Error::TotalActiveBalanceCacheInconsistent { + Err(BeaconStateError::TotalActiveBalanceCacheInconsistent { initialized_epoch, current_epoch: epoch, }) @@ -2006,7 +2075,10 @@ impl BeaconState { /// Build the total active balance cache for the current epoch if it is not already built. #[instrument(skip_all, level = "debug")] - pub fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_total_active_balance_cache( + &mut self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { if self .get_total_active_balance_at_epoch(self.current_epoch()) .is_err() @@ -2020,7 +2092,7 @@ impl BeaconState { pub fn force_build_total_active_balance_cache( &mut self, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let total_active_balance = self.compute_total_active_balance_slow(spec)?; *self.total_active_balance_mut() = Some((self.current_epoch(), total_active_balance)); Ok(()) @@ -2037,7 +2109,7 @@ impl BeaconState { epoch: Epoch, previous_epoch: Epoch, current_epoch: Epoch, - ) -> Result<&mut List, Error> { + ) -> Result<&mut List, BeaconStateError> { if epoch == current_epoch { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -2067,7 +2139,7 @@ impl BeaconState { /// Build all caches (except the tree hash cache), if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -2078,7 +2150,7 @@ impl BeaconState { /// Build all committee caches, if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_committee_cache(RelativeEpoch::Previous, spec)?; self.build_committee_cache(RelativeEpoch::Current, spec)?; self.build_committee_cache(RelativeEpoch::Next, spec)?; @@ -2087,7 +2159,7 @@ impl BeaconState { /// Build the exit cache, if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { if self.exit_cache().check_initialized().is_err() { *self.exit_cache_mut() = ExitCache::new(self.validators(), spec)?; } @@ -2096,7 +2168,7 @@ impl BeaconState { /// Build the slashings cache if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_slashings_cache(&mut self) -> Result<(), Error> { + pub fn build_slashings_cache(&mut self) -> Result<(), BeaconStateError> { let latest_block_slot = self.latest_block_header().slot; if !self.slashings_cache().is_initialized(latest_block_slot) { *self.slashings_cache_mut() = SlashingsCache::new(latest_block_slot, self.validators()); @@ -2110,7 +2182,7 @@ impl BeaconState { } /// Drop all caches on the state. - pub fn drop_all_caches(&mut self) -> Result<(), Error> { + pub fn drop_all_caches(&mut self) -> Result<(), BeaconStateError> { self.drop_total_active_balance_cache(); self.drop_committee_cache(RelativeEpoch::Previous)?; self.drop_committee_cache(RelativeEpoch::Current)?; @@ -2138,7 +2210,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let is_initialized = self .committee_cache_at_index(i)? @@ -2159,7 +2231,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let epoch = relative_epoch.into_epoch(self.current_epoch()); let i = Self::committee_cache_index(relative_epoch); @@ -2175,7 +2247,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { CommitteeCache::initialized(self, epoch, spec) } @@ -2185,7 +2257,7 @@ impl BeaconState { /// /// Note: this function will not build any new committee caches, nor will it update the total /// active balance cache. The total active balance cache must be updated separately. - pub fn advance_caches(&mut self) -> Result<(), Error> { + pub fn advance_caches(&mut self) -> Result<(), BeaconStateError> { self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); @@ -2204,27 +2276,33 @@ impl BeaconState { /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { + fn committee_cache_at_slot( + &self, + slot: Slot, + ) -> Result<&Arc, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { + fn committee_cache_at_index( + &self, + index: usize, + ) -> Result<&Arc, BeaconStateError> { self.committee_caches() .get(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. fn committee_cache_at_index_mut( &mut self, index: usize, - ) -> Result<&mut Arc, Error> { + ) -> Result<&mut Arc, BeaconStateError> { self.committee_caches_mut() .get_mut(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been @@ -2232,19 +2310,24 @@ impl BeaconState { pub fn committee_cache( &self, relative_epoch: RelativeEpoch, - ) -> Result<&Arc, Error> { + ) -> Result<&Arc, BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; if cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) { Ok(cache) } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) + Err(BeaconStateError::CommitteeCacheUninitialized(Some( + relative_epoch, + ))) } } /// Drops the cache, leaving it in an uninitialized state. - pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { + pub fn drop_committee_cache( + &mut self, + relative_epoch: RelativeEpoch, + ) -> Result<(), BeaconStateError> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = Arc::new(CommitteeCache::default()); Ok(()) @@ -2255,7 +2338,7 @@ impl BeaconState { /// Adds all `pubkeys` from the `validators` which are not already in the cache. Will /// never re-add a pubkey. #[instrument(skip_all, level = "debug")] - pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { + pub fn update_pubkey_cache(&mut self) -> Result<(), BeaconStateError> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); let start_index = pubkey_cache.len(); @@ -2263,7 +2346,7 @@ impl BeaconState { let index = start_index.safe_add(i)?; let success = pubkey_cache.insert(validator.pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } *self.pubkey_cache_mut() = pubkey_cache; @@ -2341,7 +2424,7 @@ impl BeaconState { /// /// Initialize the tree hash cache if it isn't already initialized. #[instrument(skip_all, level = "debug")] - pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { + pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { self.apply_pending_mutations()?; map_beacon_state_ref!(&'a _, self.to_ref(), |inner, cons| { let root = inner.tree_hash_root(); @@ -2353,7 +2436,7 @@ impl BeaconState { /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. - pub fn update_validators_tree_hash_cache(&mut self) -> Result { + pub fn update_validators_tree_hash_cache(&mut self) -> Result { self.validators_mut().apply_updates()?; Ok(self.validators().tree_hash_root()) } @@ -2364,7 +2447,7 @@ impl BeaconState { &self, previous_epoch: Epoch, val: &Validator, - ) -> Result { + ) -> Result { Ok(val.is_active_at(previous_epoch) || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } @@ -2388,7 +2471,7 @@ impl BeaconState { pub fn get_sync_committee_for_next_slot( &self, spec: &ChainSpec, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let next_slot_epoch = self .slot() .saturating_add(Slot::new(1)) @@ -2414,7 +2497,7 @@ impl BeaconState { // ******* Electra accessors ******* /// Return the churn limit for the current epoch. - pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { let total_active_balance = self.get_total_active_balance()?; let churn = std::cmp::max( spec.min_per_epoch_churn_limit_electra, @@ -2425,20 +2508,26 @@ impl BeaconState { } /// Return the churn limit for the current epoch dedicated to activations and exits. - pub fn get_activation_exit_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_exit_churn_limit( + &self, + spec: &ChainSpec, + ) -> Result { Ok(std::cmp::min( spec.max_per_epoch_activation_exit_churn_limit, self.get_balance_churn_limit(spec)?, )) } - pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { self.get_balance_churn_limit(spec)? .safe_sub(self.get_activation_exit_churn_limit(spec)?) .map_err(Into::into) } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { + pub fn get_pending_balance_to_withdraw( + &self, + validator_index: usize, + ) -> Result { let mut pending_balance = 0; for withdrawal in self .pending_partial_withdrawals()? @@ -2456,11 +2545,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let balance = self .balances_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; @@ -2481,11 +2570,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let validator = self .validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = spec.compounding_withdrawal_prefix_byte; @@ -2497,7 +2586,7 @@ impl BeaconState { &mut self, exit_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_exit_epoch = std::cmp::max( self.earliest_exit_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2527,7 +2616,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables *self.exit_balance_to_consume_mut()? = @@ -2542,7 +2631,7 @@ impl BeaconState { &mut self, consolidation_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_consolidation_epoch = std::cmp::max( self.earliest_consolidation_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2574,7 +2663,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables. *self.consolidation_balance_to_consume_mut()? = @@ -2586,7 +2675,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), BeaconStateError> { // Required for macros (which use type-hints internally). match (&mut *self, base) { @@ -2677,7 +2766,11 @@ impl BeaconState { Ok(()) } - pub fn rebase_caches_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_caches_on( + &mut self, + base: &Self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { // Use pubkey cache from `base` if it contains superior information (likely if our cache is // uninitialized). Be careful not to use a cache which has *more* validators than expected, // as other code expects `self.pubkey_cache().len() <= self.validators.len()`. @@ -2766,7 +2859,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + pub fn apply_pending_mutations(&mut self) -> Result<(), BeaconStateError> { match self { Self::Base(inner) => { map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) @@ -2796,43 +2889,43 @@ impl BeaconState { Ok(()) } - pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_current_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + CURRENT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_next_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX + NEXT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_finalized_root_proof(&self) -> Result, Error> { + pub fn compute_finalized_root_proof(&self) -> Result, BeaconStateError> { // Finalized root is the right child of `finalized_checkpoint`, divide by two to get // the generalized index of `state.finalized_checkpoint`. let checkpoint_root_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + FINALIZED_ROOT_INDEX_ELECTRA } else { - light_client_update::FINALIZED_ROOT_INDEX + FINALIZED_ROOT_INDEX }; let checkpoint_gindex = checkpoint_root_gindex / 2; @@ -2855,9 +2948,9 @@ impl BeaconState { &self, field_index: usize, leaves: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if field_index >= leaves.len() { - return Err(Error::IndexNotSupported(field_index)); + return Err(BeaconStateError::IndexNotSupported(field_index)); } let depth = self.num_fields_pow2().ilog2() as usize; @@ -2916,45 +3009,45 @@ impl BeaconState { } } -impl From for Error { - fn from(e: RelativeEpochError) -> Error { - Error::RelativeEpochError(e) +impl From for BeaconStateError { + fn from(e: RelativeEpochError) -> BeaconStateError { + BeaconStateError::RelativeEpochError(e) } } -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) +impl From for BeaconStateError { + fn from(e: ssz_types::Error) -> BeaconStateError { + BeaconStateError::SszTypesError(e) } } -impl From for Error { - fn from(e: bls::Error) -> Error { - Error::BlsError(e) +impl From for BeaconStateError { + fn from(e: bls::Error) -> BeaconStateError { + BeaconStateError::BlsError(e) } } -impl From for Error { - fn from(e: tree_hash::Error) -> Error { - Error::TreeHashError(e) +impl From for BeaconStateError { + fn from(e: tree_hash::Error) -> BeaconStateError { + BeaconStateError::TreeHashError(e) } } -impl From for Error { - fn from(e: merkle_proof::MerkleTreeError) -> Error { - Error::MerkleTreeError(e) +impl From for BeaconStateError { + fn from(e: merkle_proof::MerkleTreeError) -> BeaconStateError { + BeaconStateError::MerkleTreeError(e) } } -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) +impl From for BeaconStateError { + fn from(e: ArithError) -> BeaconStateError { + BeaconStateError::ArithError(e) } } -impl From for Error { - fn from(e: milhouse::Error) -> Self { - Self::MilhouseError(e) +impl From for BeaconStateError { + fn from(e: milhouse::Error) -> BeaconStateError { + BeaconStateError::MilhouseError(e) } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs similarity index 93% rename from consensus/types/src/beacon_state/committee_cache.rs rename to consensus/types/src/state/committee_cache.rs index 408c269da5f..15f6a4cd376 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -1,17 +1,20 @@ #![allow(clippy::arithmetic_side_effects)] -use crate::*; -use core::num::NonZeroUsize; +use std::{num::NonZeroUsize, ops::Range, sync::Arc}; + use educe::Educe; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; -use std::ops::Range; -use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; -mod tests; +use crate::{ + attestation::{AttestationDuty, BeaconCommittee, CommitteeIndex}, + core::{ChainSpec, Domain, Epoch, EthSpec, Slot}, + state::{BeaconState, BeaconStateError}, + validator::Validator, +}; // Define "legacy" implementations of `Option`, `Option` which use four bytes // for encoding the union selector. @@ -66,7 +69,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -77,23 +80,23 @@ impl CommitteeCache { .saturating_sub(1u64); if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { - return Err(Error::EpochOutOfBounds); + return Err(BeaconStateError::EpochOutOfBounds); } // May cause divide-by-zero errors. if E::slots_per_epoch() == 0 { - return Err(Error::ZeroSlotsPerEpoch); + return Err(BeaconStateError::ZeroSlotsPerEpoch); } // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. if state.validators().len() == usize::MAX { - return Err(Error::TooManyValidators); + return Err(BeaconStateError::TooManyValidators); } let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let committees_per_slot = @@ -107,13 +110,14 @@ impl CommitteeCache { &seed[..], false, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let mut shuffling_positions = vec![<_>::default(); state.validators().len()]; for (i, &v) in shuffling.iter().enumerate() { *shuffling_positions .get_mut(v) - .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(v))? = + NonZeroUsize::new(i + 1).into(); } Ok(Arc::new(CommitteeCache { @@ -188,24 +192,24 @@ impl CommitteeCache { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { if self.initialized_epoch.is_none() { - return Err(Error::CommitteeCacheUninitialized(None)); + return Err(BeaconStateError::CommitteeCacheUninitialized(None)); } (0..self.committees_per_slot()) .map(|index| { self.get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) }) .collect() } /// Returns all committees for `self.initialized_epoch`. - pub fn get_all_beacon_committees(&self) -> Result>, Error> { + pub fn get_all_beacon_committees(&self) -> Result>, BeaconStateError> { let initialized_epoch = self .initialized_epoch - .ok_or(Error::CommitteeCacheUninitialized(None))?; + .ok_or(BeaconStateError::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( Vec::with_capacity(self.epoch_committee_count()), diff --git a/consensus/types/src/epoch_cache.rs b/consensus/types/src/state/epoch_cache.rs similarity index 97% rename from consensus/types/src/epoch_cache.rs rename to consensus/types/src/state/epoch_cache.rs index 9956cb400a7..cdea0d143df 100644 --- a/consensus/types/src/epoch_cache.rs +++ b/consensus/types/src/state/epoch_cache.rs @@ -1,7 +1,12 @@ -use crate::{ActivationQueue, BeaconStateError, ChainSpec, Epoch, Hash256, Slot}; -use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; +use safe_arith::{ArithError, SafeArith}; + +use crate::{ + core::{ChainSpec, Epoch, Hash256, Slot}, + state::{ActivationQueue, BeaconStateError}, +}; + /// Cache of values which are uniquely determined at the start of an epoch. /// /// The values are fixed with respect to the last block of the _prior_ epoch, which we refer diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/state/exit_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/exit_cache.rs rename to consensus/types/src/state/exit_cache.rs index 2828a6138c6..43809d1af0e 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/state/exit_cache.rs @@ -1,7 +1,13 @@ -use super::{BeaconStateError, ChainSpec, Epoch, Validator}; -use safe_arith::SafeArith; use std::cmp::Ordering; +use safe_arith::SafeArith; + +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, + validator::Validator, +}; + /// Map from exit epoch to the number of validators with that exit epoch. #[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/state/historical_batch.rs similarity index 81% rename from consensus/types/src/historical_batch.rs rename to consensus/types/src/state/historical_batch.rs index 55377f24894..0167d64f62a 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/state/historical_batch.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use context_deserialize::context_deserialize; +use milhouse::Vector; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Historical block and state roots. /// /// Spec v0.12.1 @@ -26,6 +31,7 @@ pub struct HistoricalBatch { #[cfg(test)] mod tests { use super::*; + use crate::core::MainnetEthSpec; pub type FoundationHistoricalBatch = HistoricalBatch; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/state/historical_summary.rs similarity index 87% rename from consensus/types/src/historical_summary.rs rename to consensus/types/src/state/historical_summary.rs index dc147ad0428..f520e464837 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/state/historical_summary.rs @@ -1,13 +1,18 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{BeaconState, EthSpec, ForkName, Hash256}; use compare_fields::CompareFields; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` /// making the two hash_tree_root-compatible. This struct is introduced into the beacon state /// in the Capella hard fork. diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/state/iter.rs similarity index 95% rename from consensus/types/src/beacon_state/iter.rs rename to consensus/types/src/state/iter.rs index d99c769e402..63f28d74c4b 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/state/iter.rs @@ -1,4 +1,7 @@ -use crate::*; +use crate::{ + core::{EthSpec, Hash256, Slot}, + state::{BeaconState, BeaconStateError}, +}; /// Returns an iterator across the past block roots of `state` in descending slot-order. /// @@ -28,7 +31,7 @@ impl<'a, E: EthSpec> BlockRootsIter<'a, E> { } impl Iterator for BlockRootsIter<'_, E> { - type Item = Result<(Slot, Hash256), Error>; + type Item = Result<(Slot, Hash256), BeaconStateError>; fn next(&mut self) -> Option { if self.prev > self.genesis_slot @@ -53,6 +56,7 @@ impl Iterator for BlockRootsIter<'_, E> { #[cfg(test)] mod test { use crate::*; + use fixed_bytes::FixedBytesExtended; type E = MinimalEthSpec; diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs new file mode 100644 index 00000000000..309796d3592 --- /dev/null +++ b/consensus/types/src/state/mod.rs @@ -0,0 +1,35 @@ +mod activation_queue; +mod balance; +mod beacon_state; +#[macro_use] +mod committee_cache; +mod epoch_cache; +mod exit_cache; +mod historical_batch; +mod historical_summary; +mod iter; +mod progressive_balances_cache; +mod pubkey_cache; +mod slashings_cache; + +pub use activation_queue::ActivationQueue; +pub use balance::Balance; +pub use beacon_state::{ + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, BeaconStateCapella, + BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateFulu, BeaconStateGloas, + BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, +}; +pub use committee_cache::{ + CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, + epoch_committee_count, get_active_validator_indices, +}; +pub use epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; +pub use exit_cache::ExitCache; +pub use historical_batch::HistoricalBatch; +pub use historical_summary::HistoricalSummary; +pub use iter::BlockRootsIter; +pub use progressive_balances_cache::{ + EpochTotalBalances, ProgressiveBalancesCache, is_progressive_balances_enabled, +}; +pub use pubkey_cache::PubkeyCache; +pub use slashings_cache::SlashingsCache; diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/state/progressive_balances_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/progressive_balances_cache.rs rename to consensus/types/src/state/progressive_balances_cache.rs index 67d1155dbf1..1e4c311f9a2 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/state/progressive_balances_cache.rs @@ -1,14 +1,16 @@ -use crate::beacon_state::balance::Balance; +#[cfg(feature = "arbitrary")] +use arbitrary::Arbitrary; +use safe_arith::SafeArith; + use crate::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, - consts::altair::{ + attestation::ParticipationFlags, + core::consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, + core::{ChainSpec, Epoch, EthSpec}, + state::{Balance, BeaconState, BeaconStateError}, }; -#[cfg(feature = "arbitrary")] -use arbitrary::Arbitrary; -use safe_arith::SafeArith; /// This cache keeps track of the accumulated target attestation balance for the current & previous /// epochs. The cached values can be utilised by fork choice to calculate unrealized justification diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/state/pubkey_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/pubkey_cache.rs rename to consensus/types/src/state/pubkey_cache.rs index 85ed00340d7..e62fafb53a6 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/state/pubkey_cache.rs @@ -1,4 +1,4 @@ -use crate::*; +use bls::PublicKeyBytes; use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/state/slashings_cache.rs similarity index 96% rename from consensus/types/src/beacon_state/slashings_cache.rs rename to consensus/types/src/state/slashings_cache.rs index 6530f795e9f..b6ed583df89 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/state/slashings_cache.rs @@ -1,8 +1,9 @@ -use crate::{BeaconStateError, Slot, Validator}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use rpds::HashTrieSetSync as HashTrieSet; +use crate::{core::Slot, state::BeaconStateError, validator::Validator}; + /// Persistent (cheap to clone) cache of all slashed validator indices. #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Default, Clone, PartialEq)] diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/sync_committee/contribution_and_proof.rs similarity index 88% rename from consensus/types/src/contribution_and_proof.rs rename to consensus/types/src/sync_committee/contribution_and_proof.rs index 4d70cd1f8a0..2a344b89dee 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, - SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators aggregate sync committee contribution and selection proof. #[cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/sync_committee/mod.rs b/consensus/types/src/sync_committee/mod.rs new file mode 100644 index 00000000000..5a75975fe0a --- /dev/null +++ b/consensus/types/src/sync_committee/mod.rs @@ -0,0 +1,25 @@ +mod contribution_and_proof; +mod signed_contribution_and_proof; +mod sync_aggregate; +mod sync_aggregator_selection_data; +mod sync_committee; +mod sync_committee_contribution; +mod sync_committee_message; +mod sync_committee_subscription; +mod sync_duty; +mod sync_selection_proof; +mod sync_subnet_id; + +pub use contribution_and_proof::ContributionAndProof; +pub use signed_contribution_and_proof::SignedContributionAndProof; +pub use sync_aggregate::{Error as SyncAggregateError, SyncAggregate}; +pub use sync_aggregator_selection_data::SyncAggregatorSelectionData; +pub use sync_committee::{Error as SyncCommitteeError, SyncCommittee}; +pub use sync_committee_contribution::{ + Error as SyncCommitteeContributionError, SyncCommitteeContribution, SyncContributionData, +}; +pub use sync_committee_message::SyncCommitteeMessage; +pub use sync_committee_subscription::SyncCommitteeSubscription; +pub use sync_duty::SyncDuty; +pub use sync_selection_proof::SyncSelectionProof; +pub use sync_subnet_id::{SyncSubnetId, sync_subnet_id_to_string}; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs similarity index 87% rename from consensus/types/src/signed_contribution_and_proof.rs rename to consensus/types/src/sync_committee/signed_contribution_and_proof.rs index 51c453d32ff..0027003b9f3 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - Signature, SignedRoot, SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{ContributionAndProof, SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. #[cfg_attr( diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_committee/sync_aggregate.rs similarity index 91% rename from consensus/types/src/sync_aggregate.rs rename to consensus/types/src/sync_committee/sync_aggregate.rs index ba6d840a526..e5848aa22ce 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_committee/sync_aggregate.rs @@ -1,14 +1,20 @@ -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AggregateSignature, BitVector, EthSpec, ForkName, SyncCommitteeContribution}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use educe::Educe; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}, + fork::ForkName, + sync_committee::SyncCommitteeContribution, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs similarity index 82% rename from consensus/types/src/sync_aggregator_selection_data.rs rename to consensus/types/src/sync_committee/sync_aggregator_selection_data.rs index a280369fea3..e905ca036b3 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs @@ -1,11 +1,15 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedRoot, Slot}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{SignedRoot, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee/sync_committee.rs similarity index 95% rename from consensus/types/src/sync_committee.rs rename to consensus/types/src/sync_committee/sync_committee.rs index a9fde425540..54484118002 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee/sync_committee.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{EthSpec, FixedVector, ForkName, SyncSubnetId}; +use std::collections::HashMap; + use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::EthSpec, fork::ForkName, sync_committee::SyncSubnetId, test_utils::TestRandom}; + #[derive(Debug, PartialEq)] pub enum Error { ArithError(ArithError), diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee/sync_committee_contribution.rs similarity index 93% rename from consensus/types/src/sync_committee_contribution.rs rename to consensus/types/src/sync_committee/sync_committee_contribution.rs index db22a3bdbc8..09376fbe5c0 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee/sync_committee_contribution.rs @@ -1,12 +1,18 @@ -use super::{AggregateSignature, EthSpec, ForkName, SignedRoot}; -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::{BitVector, Hash256, Slot, SyncCommitteeMessage, test_utils::TestRandom}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + sync_committee::SyncCommitteeMessage, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee/sync_committee_message.rs similarity index 88% rename from consensus/types/src/sync_committee_message.rs rename to consensus/types/src/sync_committee/sync_committee_message.rs index d5bb7250bb4..ed42555c43f 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee/sync_committee_message.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, Slot, -}; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// The data upon which a `SyncCommitteeContribution` is based. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee/sync_committee_subscription.rs similarity index 96% rename from consensus/types/src/sync_committee_subscription.rs rename to consensus/types/src/sync_committee/sync_committee_subscription.rs index 8e040279d73..6365b015dd2 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee/sync_committee_subscription.rs @@ -1,7 +1,8 @@ -use crate::Epoch; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::core::Epoch; + /// A sync committee subscription created when a validator subscribes to sync committee subnets to perform /// sync committee duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_committee/sync_duty.rs similarity index 96% rename from consensus/types/src/sync_duty.rs rename to consensus/types/src/sync_committee/sync_duty.rs index 59fbc960db5..773cc008f9f 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_committee/sync_duty.rs @@ -1,8 +1,13 @@ -use crate::{EthSpec, SyncCommittee, SyncSubnetId}; +use std::collections::HashSet; + use bls::PublicKeyBytes; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; + +use crate::{ + core::EthSpec, + sync_committee::{SyncCommittee, SyncSubnetId}, +}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_committee/sync_selection_proof.rs similarity index 90% rename from consensus/types/src/sync_selection_proof.rs rename to consensus/types/src/sync_committee/sync_selection_proof.rs index b1e9e8186f5..723f0c06c96 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_committee/sync_selection_proof.rs @@ -1,16 +1,20 @@ -use crate::consts::altair::{ - SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, -}; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, - SyncAggregatorSelectionData, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use ssz_types::typenum::Unsigned; -use std::cmp; +use typenum::Unsigned; + +use crate::{ + core::{ + ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, + consts::altair::{SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE}, + }, + fork::Fork, + sync_committee::SyncAggregatorSelectionData, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -108,8 +112,9 @@ impl From for SyncSelectionProof { #[cfg(test)] mod test { use super::*; - use crate::{FixedBytesExtended, MainnetEthSpec}; + use crate::MainnetEthSpec; use eth2_interop_keypairs::keypair; + use fixed_bytes::FixedBytesExtended; #[test] fn proof_sign_and_verify() { diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_committee/sync_subnet_id.rs similarity index 90% rename from consensus/types/src/sync_subnet_id.rs rename to consensus/types/src/sync_committee/sync_subnet_id.rs index 3d0d853fcaa..6cb11f6b038 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_committee/sync_subnet_id.rs @@ -1,13 +1,16 @@ //! Identifies each sync committee subnet by an integer identifier. -use crate::EthSpec; -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use std::{ + collections::HashSet, + fmt::{self, Display}, + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use ssz_types::typenum::Unsigned; -use std::collections::HashSet; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; +use typenum::Unsigned; + +use crate::core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}; static SYNC_SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); diff --git a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs index f30afda257e..5ccd748c25c 100644 --- a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,7 +1,8 @@ -use crate::*; +use std::path::PathBuf; + +use bls::Keypair; use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; use rayon::prelude::*; -use std::path::PathBuf; use tracing::debug; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index 8f4908291ee..cf7b5df891a 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -1,11 +1,16 @@ -use rand::Rng; - +use bls::Signature; use kzg::{KzgCommitment, KzgProof}; +use rand::Rng; -use crate::beacon_block_body::KzgCommitments; -use crate::*; - -use super::*; +use crate::{ + block::{BeaconBlock, SignedBeaconBlock}, + core::{EthSpec, MainnetEthSpec}, + data::{Blob, BlobSidecar, BlobsList}, + execution::FullPayload, + fork::{ForkName, map_fork_name}, + kzg_ext::{KzgCommitments, KzgProofs}, + test_utils::TestRandom, +}; type BlobsBundle = (KzgCommitments, KzgProofs, BlobsList); @@ -73,6 +78,7 @@ pub fn generate_blobs(n_blobs: usize) -> Result, Stri mod test { use super::*; use rand::rng; + use ssz_types::FixedVector; #[test] fn test_verify_blob_inclusion_proof() { diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index 37d58d43420..c4409b43924 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,17 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] -use std::fmt::Debug; - -pub use rand::{RngCore, SeedableRng}; -pub use rand_xorshift::XorShiftRng; - -pub use generate_deterministic_keypairs::generate_deterministic_keypair; -pub use generate_deterministic_keypairs::generate_deterministic_keypairs; -pub use generate_deterministic_keypairs::load_keypairs_from_yaml; -use ssz::{Decode, Encode, ssz_encode}; -pub use test_random::{TestRandom, test_random_instance}; -use tree_hash::TreeHash; - #[macro_use] mod macros; mod generate_deterministic_keypairs; @@ -19,6 +7,18 @@ mod generate_deterministic_keypairs; mod generate_random_block_and_blobs; mod test_random; +pub use generate_deterministic_keypairs::generate_deterministic_keypair; +pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use generate_deterministic_keypairs::load_keypairs_from_yaml; +pub use test_random::{TestRandom, test_random_instance}; + +pub use rand::{RngCore, SeedableRng}; +pub use rand_xorshift::XorShiftRng; + +use ssz::{Decode, Encode, ssz_encode}; +use std::fmt::Debug; +use tree_hash::TreeHash; + pub fn test_ssz_tree_hash_pair(v1: &T, v2: &U) where T: TreeHash + Encode + Decode + Debug + PartialEq, diff --git a/consensus/types/src/test_utils/test_random/address.rs b/consensus/types/src/test_utils/test_random/address.rs index 421801ce53c..2f601cb91ec 100644 --- a/consensus/types/src/test_utils/test_random/address.rs +++ b/consensus/types/src/test_utils/test_random/address.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Address, test_utils::TestRandom}; impl TestRandom for Address { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 20]; rng.fill_bytes(&mut key_bytes); Address::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/aggregate_signature.rs b/consensus/types/src/test_utils/test_random/aggregate_signature.rs index 772f2844313..f9f3dd95677 100644 --- a/consensus/types/src/test_utils/test_random/aggregate_signature.rs +++ b/consensus/types/src/test_utils/test_random/aggregate_signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{AggregateSignature, Signature}; + +use crate::test_utils::TestRandom; impl TestRandom for AggregateSignature { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let signature = Signature::random_for_test(rng); let mut aggregate_signature = AggregateSignature::infinity(); aggregate_signature.add_assign(&signature); diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index e335ac7fe8b..762f41eb34a 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -1,8 +1,11 @@ -use super::*; use smallvec::smallvec; +use ssz_types::{BitList, BitVector}; +use typenum::Unsigned; + +use crate::test_utils::TestRandom; impl TestRandom for BitList { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -23,7 +26,7 @@ impl TestRandom for BitList { } impl TestRandom for BitVector { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 diff --git a/consensus/types/src/test_utils/test_random/hash256.rs b/consensus/types/src/test_utils/test_random/hash256.rs index 21d443c0e2a..4d7570fb55c 100644 --- a/consensus/types/src/test_utils/test_random/hash256.rs +++ b/consensus/types/src/test_utils/test_random/hash256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Hash256, test_utils::TestRandom}; impl TestRandom for Hash256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 32]; rng.fill_bytes(&mut key_bytes); Hash256::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/kzg_commitment.rs b/consensus/types/src/test_utils/test_random/kzg_commitment.rs index a4030f2b6a3..31e316a1987 100644 --- a/consensus/types/src/test_utils/test_random/kzg_commitment.rs +++ b/consensus/types/src/test_utils/test_random/kzg_commitment.rs @@ -1,4 +1,6 @@ -use super::*; +use kzg::KzgCommitment; + +use crate::test_utils::TestRandom; impl TestRandom for KzgCommitment { fn random_for_test(rng: &mut impl rand::RngCore) -> Self { diff --git a/consensus/types/src/test_utils/test_random/kzg_proof.rs b/consensus/types/src/test_utils/test_random/kzg_proof.rs index 7e771ca5660..4465d5ab39d 100644 --- a/consensus/types/src/test_utils/test_random/kzg_proof.rs +++ b/consensus/types/src/test_utils/test_random/kzg_proof.rs @@ -1,8 +1,9 @@ -use super::*; -use kzg::BYTES_PER_COMMITMENT; +use kzg::{BYTES_PER_COMMITMENT, KzgProof}; + +use crate::test_utils::TestRandom; impl TestRandom for KzgProof { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut bytes = [0; BYTES_PER_COMMITMENT]; rng.fill_bytes(&mut bytes); Self(bytes) diff --git a/consensus/types/src/test_utils/test_random/mod.rs b/consensus/types/src/test_utils/test_random/mod.rs new file mode 100644 index 00000000000..41812593fa7 --- /dev/null +++ b/consensus/types/src/test_utils/test_random/mod.rs @@ -0,0 +1,15 @@ +mod address; +mod aggregate_signature; +mod bitfield; +mod hash256; +mod kzg_commitment; +mod kzg_proof; +mod public_key; +mod public_key_bytes; +mod secret_key; +mod signature; +mod signature_bytes; +mod test_random; +mod uint256; + +pub use test_random::{TestRandom, test_random_instance}; diff --git a/consensus/types/src/test_utils/test_random/public_key.rs b/consensus/types/src/test_utils/test_random/public_key.rs index d33e9ac7043..9d287c23d73 100644 --- a/consensus/types/src/test_utils/test_random/public_key.rs +++ b/consensus/types/src/test_utils/test_random/public_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{PublicKey, SecretKey}; + +use crate::test_utils::TestRandom; impl TestRandom for PublicKey { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { SecretKey::random_for_test(rng).public_key() } } diff --git a/consensus/types/src/test_utils/test_random/public_key_bytes.rs b/consensus/types/src/test_utils/test_random/public_key_bytes.rs index 6e5cafc4f03..587c3baf8fb 100644 --- a/consensus/types/src/test_utils/test_random/public_key_bytes.rs +++ b/consensus/types/src/test_utils/test_random/public_key_bytes.rs @@ -1,9 +1,9 @@ -use bls::PUBLIC_KEY_BYTES_LEN; +use bls::{PUBLIC_KEY_BYTES_LEN, PublicKey, PublicKeyBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for PublicKeyBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random/secret_key.rs b/consensus/types/src/test_utils/test_random/secret_key.rs index da1614aa24e..a8295d968af 100644 --- a/consensus/types/src/test_utils/test_random/secret_key.rs +++ b/consensus/types/src/test_utils/test_random/secret_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::SecretKey; + +use crate::test_utils::TestRandom; impl TestRandom for SecretKey { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: Not deterministic generation. Using `SecretKey::deserialize` results in // `BlstError(BLST_BAD_ENCODING)`, need to debug with blst source on what encoding expects. SecretKey::random() diff --git a/consensus/types/src/test_utils/test_random/signature.rs b/consensus/types/src/test_utils/test_random/signature.rs index 8bc0d711103..006aba9650a 100644 --- a/consensus/types/src/test_utils/test_random/signature.rs +++ b/consensus/types/src/test_utils/test_random/signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::Signature; + +use crate::test_utils::TestRandom; impl TestRandom for Signature { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: `SecretKey::random_for_test` does not return a deterministic signature. Since this // signature will not pass verification we could just return the generator point or the // generator point multiplied by a random scalar if we want disctint signatures. diff --git a/consensus/types/src/test_utils/test_random/signature_bytes.rs b/consensus/types/src/test_utils/test_random/signature_bytes.rs index 2117a482321..6992e574679 100644 --- a/consensus/types/src/test_utils/test_random/signature_bytes.rs +++ b/consensus/types/src/test_utils/test_random/signature_bytes.rs @@ -1,9 +1,9 @@ -use bls::SIGNATURE_BYTES_LEN; +use bls::{SIGNATURE_BYTES_LEN, Signature, SignatureBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for SignatureBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random/test_random.rs similarity index 90% rename from consensus/types/src/test_utils/test_random.rs rename to consensus/types/src/test_utils/test_random/test_random.rs index 7c8f86e14df..101fbec51b0 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random/test_random.rs @@ -1,23 +1,10 @@ -use crate::*; -use rand::RngCore; -use rand::SeedableRng; +use std::{marker::PhantomData, sync::Arc}; + +use rand::{RngCore, SeedableRng}; use rand_xorshift::XorShiftRng; use smallvec::{SmallVec, smallvec}; -use std::marker::PhantomData; -use std::sync::Arc; - -mod address; -mod aggregate_signature; -mod bitfield; -mod hash256; -mod kzg_commitment; -mod kzg_proof; -mod public_key; -mod public_key_bytes; -mod secret_key; -mod signature; -mod signature_bytes; -mod uint256; +use ssz_types::VariableList; +use typenum::Unsigned; pub fn test_random_instance() -> T { let mut rng = XorShiftRng::from_seed([0x42; 16]); diff --git a/consensus/types/src/test_utils/test_random/uint256.rs b/consensus/types/src/test_utils/test_random/uint256.rs index 30077f0e0f6..eccf4765955 100644 --- a/consensus/types/src/test_utils/test_random/uint256.rs +++ b/consensus/types/src/test_utils/test_random/uint256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Uint256, test_utils::TestRandom}; impl TestRandom for Uint256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = [0; 32]; rng.fill_bytes(&mut key_bytes); Self::from_le_slice(&key_bytes[..]) diff --git a/consensus/types/src/validator/mod.rs b/consensus/types/src/validator/mod.rs new file mode 100644 index 00000000000..8a67407821c --- /dev/null +++ b/consensus/types/src/validator/mod.rs @@ -0,0 +1,9 @@ +mod proposer_preparation_data; +mod validator; +mod validator_registration_data; +mod validator_subscription; + +pub use proposer_preparation_data::ProposerPreparationData; +pub use validator::{Validator, is_compounding_withdrawal_credential}; +pub use validator_registration_data::{SignedValidatorRegistrationData, ValidatorRegistrationData}; +pub use validator_subscription::ValidatorSubscription; diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/validator/proposer_preparation_data.rs similarity index 95% rename from consensus/types/src/proposer_preparation_data.rs rename to consensus/types/src/validator/proposer_preparation_data.rs index 477fb3b9d15..8ef675de4fd 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/validator/proposer_preparation_data.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::core::Address; + /// A proposer preparation, created when a validator prepares the beacon node for potential proposers /// by supplying information required when proposing blocks for the given validators. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator/validator.rs similarity index 97% rename from consensus/types/src/validator.rs rename to consensus/types/src/validator/validator.rs index dec8bba627f..7898ab9073a 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator/validator.rs @@ -1,13 +1,19 @@ -use crate::context_deserialize; -use crate::{ - Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, - Hash256, PublicKeyBytes, test_utils::TestRandom, -}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::Checkpoint, + core::{Address, ChainSpec, Epoch, EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator/validator_registration_data.rs similarity index 93% rename from consensus/types/src/validator_registration_data.rs rename to consensus/types/src/validator/validator_registration_data.rs index 345771074c5..a0a1df7dc54 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator/validator_registration_data.rs @@ -1,8 +1,10 @@ -use crate::*; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; +use crate::core::{Address, ChainSpec, SignedRoot}; + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SignedValidatorRegistrationData { diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator/validator_subscription.rs similarity index 93% rename from consensus/types/src/validator_subscription.rs rename to consensus/types/src/validator/validator_subscription.rs index 62932638ec1..92fb200e10d 100644 --- a/consensus/types/src/validator_subscription.rs +++ b/consensus/types/src/validator/validator_subscription.rs @@ -1,7 +1,8 @@ -use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::{attestation::CommitteeIndex, core::Slot}; + /// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation /// duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, Eq, PartialOrd, Ord)] diff --git a/consensus/types/src/withdrawal/mod.rs b/consensus/types/src/withdrawal/mod.rs new file mode 100644 index 00000000000..bac80d00bed --- /dev/null +++ b/consensus/types/src/withdrawal/mod.rs @@ -0,0 +1,9 @@ +mod pending_partial_withdrawal; +mod withdrawal; +mod withdrawal_credentials; +mod withdrawal_request; + +pub use pending_partial_withdrawal::PendingPartialWithdrawal; +pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal_credentials::WithdrawalCredentials; +pub use withdrawal_request::WithdrawalRequest; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs similarity index 85% rename from consensus/types/src/pending_partial_withdrawal.rs rename to consensus/types/src/withdrawal/pending_partial_withdrawal.rs index e9b10f79b5f..cd866369a47 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs @@ -1,11 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal/withdrawal.rs similarity index 73% rename from consensus/types/src/withdrawal.rs rename to consensus/types/src/withdrawal/withdrawal.rs index ef4a1f285d3..d75bd4f501f 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal/withdrawal.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, @@ -21,6 +27,8 @@ pub struct Withdrawal { pub amount: u64, } +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal/withdrawal_credentials.rs similarity index 91% rename from consensus/types/src/withdrawal_credentials.rs rename to consensus/types/src/withdrawal/withdrawal_credentials.rs index 52d51ed559c..b732222ca1b 100644 --- a/consensus/types/src/withdrawal_credentials.rs +++ b/consensus/types/src/withdrawal/withdrawal_credentials.rs @@ -1,5 +1,6 @@ -use crate::*; -use bls::get_withdrawal_credentials; +use bls::{PublicKey, get_withdrawal_credentials}; + +use crate::core::{Address, ChainSpec, Hash256}; pub struct WithdrawalCredentials(Hash256); @@ -27,7 +28,7 @@ impl From for Hash256 { #[cfg(test)] mod test { use super::*; - use crate::test_utils::generate_deterministic_keypair; + use crate::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use std::str::FromStr; #[test] diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal/withdrawal_request.rs similarity index 87% rename from consensus/types/src/withdrawal_request.rs rename to consensus/types/src/withdrawal/withdrawal_request.rs index c08921a68c4..98a40016f9f 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal/withdrawal_request.rs @@ -1,12 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Address, ForkName, PublicKeyBytes}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Address, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/tests/committee_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/committee_cache/tests.rs rename to consensus/types/tests/committee_cache.rs index 1d2ca4ccdb7..751ef05d299 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/tests/committee_cache.rs @@ -1,9 +1,14 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::*; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; use swap_or_not_shuffle::shuffle_list; +use types::*; + +use crate::test_utils::generate_deterministic_keypairs; pub const VALIDATOR_COUNT: usize = 16; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/tests/state.rs similarity index 97% rename from consensus/types/src/beacon_state/tests.rs rename to consensus/types/tests/state.rs index e5b05a4a5bd..63ab3b8084b 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/tests/state.rs @@ -1,15 +1,17 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::{ - BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, ChainSpec, Domain, Epoch, - EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, - Slot, Vector, test_utils::TestRandom, -}; -use ssz::Encode; use std::ops::Mul; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use ssz::Encode; use swap_or_not_shuffle::compute_shuffled_index; +use types::test_utils::{TestRandom, generate_deterministic_keypairs}; +use types::*; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 04eb41960ba..43e361b60df 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -26,6 +26,7 @@ eth2_wallet = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } hex = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 71186904d0b..620539a95f1 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,4 +1,5 @@ use clap::ArgMatches; +use fixed_bytes::FixedBytesExtended; use lighthouse_network::{ NETWORK_KEY_FILENAME, NetworkConfig, discovery::{CombinedKey, ENR_FILENAME, build_enr}, @@ -9,7 +10,7 @@ use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; use std::{fs::File, num::NonZeroU16}; -use types::{ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, Hash256}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index dd941cda74e..6a0eb2a0e1d 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -132,7 +132,6 @@ async fn get_block_from_source( let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source .iter() - .cloned() .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) .unzip(); diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 0b945bcb2d4..9bfcae85e57 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -18,6 +18,7 @@ use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, }; +use bls::{Keypair, PublicKey}; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use std::env; use std::fs::{self, File}; @@ -26,7 +27,6 @@ use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output, Stdio}; use std::str::from_utf8; use tempfile::{TempDir, tempdir}; -use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; use zeroize::Zeroizing; diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 99afa7b6824..d6d720a561d 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use eth2::SensitiveUrl; use serde::de::DeserializeOwned; use std::fs; diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 9d9844c4c41..6260f910192 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -21,7 +21,7 @@ cd ./scripts/local_testnet ``` It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. -You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. +You can also select your own Lighthouse docker image to use by specifying it in `network_params.yaml` under the `cl_image` key. Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index cdfacbced4b..a048674e630 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -1,19 +1,37 @@ # Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). participants: - - el_type: geth + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth el_image: ethereum/client-go:latest - cl_type: lighthouse + supernode: true + cl_extra_params: + - --target-peers=3 + count: 2 + - cl_type: lighthouse cl_image: lighthouse:local + el_type: geth + el_image: ethereum/client-go:latest + supernode: false cl_extra_params: - --target-peers=3 - count: 4 + count: 2 network_params: - electra_fork_epoch: 0 - seconds_per_slot: 3 -global_log_level: debug + fulu_fork_epoch: 0 + seconds_per_slot: 6 snooper_enabled: false +global_log_level: debug additional_services: - dora - spamoor - prometheus_grafana - tempo +spamoor_params: + image: ethpandaops/spamoor:master + spammers: + - scenario: eoatx + config: + throughput: 200 + - scenario: blobs + config: + throughput: 20 \ No newline at end of file diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml deleted file mode 100644 index e3bc5131531..00000000000 --- a/scripts/local_testnet/network_params_das.yaml +++ /dev/null @@ -1,41 +0,0 @@ -participants: - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - supernode: true - cl_extra_params: - # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - - --sync-tolerance-epochs=0 - - --target-peers=3 - count: 2 - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - supernode: false - cl_extra_params: - # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - - --sync-tolerance-epochs=0 - - --target-peers=3 - count: 2 -network_params: - electra_fork_epoch: 0 - fulu_fork_epoch: 1 - seconds_per_slot: 6 -snooper_enabled: false -global_log_level: debug -additional_services: - - dora - - spamoor - - prometheus_grafana - - tempo -spamoor_params: - image: ethpandaops/spamoor:master - spammers: - - scenario: eoatx - config: - throughput: 200 - - scenario: blobs - config: - throughput: 20 \ No newline at end of file diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 442e6fd98d9..8d8b33526d3 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -78,6 +78,11 @@ if [ "$RUN_ASSERTOOR_TESTS" = true ]; then echo "Assertoor has been added to $NETWORK_PARAMS_FILE." fi +if [ "$KEEP_ENCLAVE" = false ]; then + # Stop local testnet + kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true +fi + if [ "$BUILD_IMAGE" = true ]; then echo "Building Lighthouse Docker image." ROOT_DIR="$SCRIPT_DIR/../.." @@ -86,11 +91,6 @@ else echo "Not rebuilding Lighthouse Docker image." fi -if [ "$KEEP_ENCLAVE" = false ]; then - # Stop local testnet - kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true -fi - kurtosis run --enclave $ENCLAVE_NAME github.com/ethpandaops/ethereum-package@$ETHEREUM_PKG_VERSION --args-file $NETWORK_PARAMS_FILE echo "Started!" diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml deleted file mode 100644 index 2392011ed33..00000000000 --- a/scripts/tests/checkpoint-sync-config-devnet.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Kurtosis config file to checkpoint sync to a running devnet supported by ethPandaOps and `ethereum-package`. -participants: - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - cl_extra_params: - - --disable-backfill-rate-limiting - supernode: true - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - cl_extra_params: - - --disable-backfill-rate-limiting - supernode: false - -checkpoint_sync_enabled: true -checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-3.ethpandaops.io" - -global_log_level: debug - -network_params: - network: fusaka-devnet-3 diff --git a/scripts/tests/genesis-sync-config-electra.yaml b/scripts/tests/genesis-sync-config-electra.yaml index 153f754c94a..1d1ed4d3152 100644 --- a/scripts/tests/genesis-sync-config-electra.yaml +++ b/scripts/tests/genesis-sync-config-electra.yaml @@ -6,15 +6,14 @@ participants: # nodes without validators, used for testing sync. - cl_type: lighthouse cl_image: lighthouse:local - supernode: true # no supernode in Electra, this is for future proof validator_count: 0 - cl_type: lighthouse cl_image: lighthouse:local - supernode: false validator_count: 0 network_params: seconds_per_slot: 6 electra_fork_epoch: 0 + fulu_fork_epoch: 100000 # a really big number so this test stays in electra preset: "minimal" additional_services: - tx_fuzz diff --git a/scripts/tests/genesis-sync-config-fulu.yaml b/scripts/tests/genesis-sync-config-fulu.yaml index 98dc8751d62..6d2c2647a90 100644 --- a/scripts/tests/genesis-sync-config-fulu.yaml +++ b/scripts/tests/genesis-sync-config-fulu.yaml @@ -21,8 +21,7 @@ participants: validator_count: 0 network_params: seconds_per_slot: 6 - electra_fork_epoch: 0 - fulu_fork_epoch: 1 + fulu_fork_epoch: 0 preset: "minimal" additional_services: - tx_fuzz diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml index 0fda1aa34ba..35916ac1e4e 100644 --- a/scripts/tests/network_params.yaml +++ b/scripts/tests/network_params.yaml @@ -6,9 +6,10 @@ participants: cl_image: lighthouse:local cl_extra_params: - --target-peers=3 + supernode: true count: 4 network_params: - electra_fork_epoch: 0 + fulu_fork_epoch: 0 seconds_per_slot: 3 num_validator_keys_per_node: 20 global_log_level: debug diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 94d048ef72e..a068b2e8856 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -14,11 +14,13 @@ portable = ["types/portable"] [dependencies] bincode = { workspace = true } +bls = { workspace = true } byteorder = { workspace = true } educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } @@ -38,6 +40,7 @@ strum = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 67145193acc..db326a9d80b 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -1,5 +1,7 @@ use crate::{Error, database::IndexedAttestationId}; +use bls::AggregateSignature; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use std::borrow::Cow; use std::sync::{ Arc, @@ -7,7 +9,7 @@ use std::sync::{ }; use tree_hash::TreeHash as _; use tree_hash_derive::TreeHash; -use types::{AggregateSignature, EthSpec, Hash256, IndexedAttestation, VariableList}; +use types::{EthSpec, Hash256, IndexedAttestation}; #[derive(Debug, Clone, Copy)] pub struct AttesterRecord { diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 2df2849612e..80d073a81c6 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -7,6 +7,7 @@ use crate::{ AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, Error, ProposerSlashingStatus, metrics, }; +use bls::AggregateSignature; use byteorder::{BigEndian, ByteOrder}; use interface::{Environment, OpenDatabases, RwTransaction}; use lru::LruCache; @@ -14,15 +15,16 @@ use parking_lot::Mutex; use serde::de::DeserializeOwned; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; use std::sync::Arc; use tracing::info; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, + AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, ProposerSlashing, SignedBeaconBlockHeader, - Slot, VariableList, + Slot, }; /// Current database schema version, to check compatibility of on-disk DB with software. @@ -860,7 +862,8 @@ impl SlasherDB { #[cfg(test)] mod test { use super::*; - use types::{Checkpoint, ForkName, MainnetEthSpec, Unsigned}; + use typenum::Unsigned; + use types::{Checkpoint, ForkName, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index bbbadac7618..20d1ee92175 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -1,10 +1,11 @@ +use bls::{AggregateSignature, Signature}; +use fixed_bytes::FixedBytesExtended; use std::collections::HashSet; use std::sync::Arc; use types::{ - AggregateSignature, AttestationData, AttesterSlashing, AttesterSlashingBase, - AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, IndexedAttestation, MainnetEthSpec, Signature, - SignedBeaconBlockHeader, Slot, + AttestationData, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, + BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, + MainnetEthSpec, SignedBeaconBlockHeader, Slot, indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, }; diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 581785e2a97..cef201ee91d 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -27,14 +27,17 @@ fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } logging = { workspace = true } +milhouse = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } snap = { workspace = true } +ssz_types = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 1103d2fe822..52f5333df1a 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,11 +1,12 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; +use ssz_types::FixedVector; use tree_hash::Hash256; +use typenum::Unsigned; use types::{ BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, - BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconState, FixedVector, FullPayload, Unsigned, - light_client_update, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconState, FullPayload, light_client_update, }; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 8742f8a1409..1dd37a22eed 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -5,12 +5,14 @@ use crate::cases::common::{DecimalU128, DecimalU256, SszStaticType}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{context_yaml_decode_file, log_file_access, snappy_decode_file}; use context_deserialize::{ContextDeserialize, context_deserialize}; +use milhouse::Vector; use serde::{Deserialize, Deserializer, de::Error as SerdeError}; use ssz_derive::{Decode, Encode}; +use ssz_types::{BitList, BitVector, FixedVector, VariableList}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::typenum::*; -use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; +use typenum::*; +use types::ForkName; #[derive(Debug, Clone, Deserialize)] #[context_deserialize(ForkName)] diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 089e4464cd7..0cec69c97e5 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,6 +1,7 @@ #![cfg(feature = "ef_tests")] use ef_tests::*; +use typenum::Unsigned; use types::*; // Check that the hand-computed multiplications on EthSpec are correctly computed. diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 74bf43e3ae0..034b6c5c8a0 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -7,14 +7,16 @@ edition = { workspace = true } portable = ["types/portable"] [dependencies] -alloy-network = "1.0" +alloy-network = { workspace = true } alloy-primitives = { workspace = true } -alloy-provider = "1.0" +alloy-provider = { workspace = true } alloy-rpc-types-eth = { workspace = true } -alloy-signer-local = "1.0" +alloy-signer-local = { workspace = true } async-channel = { workspace = true } +bls = { workspace = true } deposit_contract = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -26,4 +28,5 @@ serde_json = { workspace = true } task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 57501c6ee2c..8413da4c5ee 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -6,11 +6,13 @@ use alloy_network::{EthereumWallet, TransactionBuilder}; use alloy_primitives::Address as AlloyAddress; use alloy_provider::{Provider, ProviderBuilder}; use alloy_signer_local::PrivateKeySigner; +use bls::PublicKeyBytes; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadParameters, PayloadStatus, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{Client, header::CONTENT_TYPE}; use sensitive_url::SensitiveUrl; @@ -22,8 +24,9 @@ use tokio::time::sleep; use types::payload::BlockProductionVersion; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + ForkName, Hash256, MainnetEthSpec, Slot, Uint256, }; + const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); const TEST_FORK: ForkName = ForkName::Capella; diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index fe36a1bf67f..8cd63ce307a 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -1,8 +1,10 @@ use alloy_network::TransactionBuilder; use alloy_primitives::{Address, U256}; use alloy_rpc_types_eth::{AccessList, TransactionRequest}; +use bls::{Keypair, Signature}; use deposit_contract::{BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, encode_eth1_tx_data}; -use types::{DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, Signature}; +use fixed_bytes::FixedBytesExtended; +use types::{DepositData, EthSpec, Hash256}; /// Hardcoded deposit contract address based on sender address and nonce pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 54035f2e827..a1b1b6f95d2 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -20,4 +20,5 @@ serde_json = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 1240785121a..35200692c32 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,8 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, FinalityCheckpointsData, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot, Unsigned}; +use typenum::Unsigned; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 66376f0a51d..437aa539f41 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -10,7 +10,9 @@ portable = ["beacon_chain/portable"] [dependencies] beacon_chain = { workspace = true } +bls = { workspace = true } ethereum_ssz = { workspace = true } +fixed_bytes = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } types = { workspace = true } diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 4a829b68035..80c30489b7c 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -3,6 +3,8 @@ mod macros; mod exit; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; use ssz::Encode; use std::env; use std::fs::{self, File}; @@ -10,10 +12,8 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::process::exit; use std::sync::LazyLock; -use types::{ - BeaconState, EthSpec, Keypair, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, -}; -use types::{FixedBytesExtended, Hash256, MainnetEthSpec, Slot}; +use types::{BeaconState, EthSpec, SignedBeaconBlock, test_utils::generate_deterministic_keypairs}; +use types::{Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index b4637b4030f..3ef2e0f7f7a 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -9,10 +9,12 @@ edition = { workspace = true } [dev-dependencies] account_utils = { workspace = true } async-channel = { workspace = true } +bls = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } eth2_network_config = { workspace = true } +fixed_bytes = { workspace = true } futures = { workspace = true } initialized_validators = { workspace = true } lighthouse_validator_store = { workspace = true } @@ -24,6 +26,7 @@ serde_json = { workspace = true } serde_yaml = { workspace = true } slashing_protection = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 15ec745e3f1..541f9b2b4a7 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -20,9 +20,11 @@ mod tests { use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; + use bls::{AggregateSignature, Keypair, PublicKeyBytes, SecretKey, Signature}; use eth2::types::FullBlockContents; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use fixed_bytes::FixedBytesExtended; use initialized_validators::{ InitializedValidators, load_pem_certificate, load_pkcs12_identity, }; @@ -32,6 +34,7 @@ mod tests { use serde::Serialize; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use slot_clock::{SlotClock, TestingSlotClock}; + use ssz_types::BitList; use std::env; use std::fmt::Debug; use std::fs::{self, File}; diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 5fe2af4cb0b..481aece48b2 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -9,6 +9,7 @@ name = "beacon_node_fallback" path = "src/lib.rs" [dependencies] +bls = { workspace = true } clap = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 0f13d8c8b7b..6abcd44cc94 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -773,12 +773,13 @@ impl ApiTopic { mod tests { use super::*; use crate::beacon_node_health::BeaconNodeHealthTier; + use bls::Signature; use eth2::SensitiveUrl; use eth2::Timeouts; use slot_clock::TestingSlotClock; use strum::VariantNames; use types::{BeaconBlockDeneb, MainnetEthSpec, Slot}; - use types::{EmptyBlock, Signature, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; + use types::{EmptyBlock, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; use validator_test_rig::mock_beacon_node::MockBeaconNode; type E = MainnetEthSpec; diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml index e5b183570de..66b27eb39d5 100644 --- a/validator_client/doppelganger_service/Cargo.toml +++ b/validator_client/doppelganger_service/Cargo.toml @@ -6,6 +6,7 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } +bls = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } logging = { workspace = true } diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index b0ed78e9965..600ae82c546 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -30,6 +30,7 @@ //! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. use beacon_node_fallback::BeaconNodeFallback; +use bls::PublicKeyBytes; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use logging::crit; @@ -41,7 +42,7 @@ use std::sync::Arc; use task_executor::ShutdownReason; use tokio::time::sleep; use tracing::{error, info}; -use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Slot}; use validator_store::{DoppelgangerStatus, ValidatorStore}; struct LivenessResponses { diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 588aa2ca931..2bd57867acf 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -16,10 +16,11 @@ deposit_contract = { workspace = true } directory = { workspace = true } dirs = { workspace = true } doppelganger_service = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } graffiti_file = { workspace = true } health_metrics = { workspace = true } initialized_validators = { workspace = true } @@ -41,6 +42,7 @@ tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } +typenum = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } @@ -54,3 +56,4 @@ zeroize = { workspace = true } futures = { workspace = true } itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } +ssz_types = { workspace = true } diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index c0f918f9bb8..18accf0d5a0 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -1,5 +1,6 @@ //! Implementation of the standard keystore management API. use account_utils::validator_definitions::PasswordStorage; +use bls::PublicKeyBytes; use eth2::lighthouse_vc::{ std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, @@ -18,7 +19,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; -use types::{EthSpec, PublicKeyBytes}; +use types::EthSpec; use validator_dir::{Builder as ValidatorDirBuilder, keystore_password_path}; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 4494fca9574..a35b4ec6c6d 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -22,6 +22,7 @@ use account_utils::{ }; pub use api_secret::ApiSecret; use beacon_node_fallback::CandidateInfo; +use bls::{PublicKey, PublicKeyBytes}; use core::convert::Infallible; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, @@ -30,8 +31,8 @@ use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_D use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ - self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, PublicKey, - PublicKeyBytes, SetGraffitiRequest, UpdateCandidatesRequest, UpdateCandidatesResponse, + self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, SetGraffitiRequest, + UpdateCandidatesRequest, UpdateCandidatesResponse, }, }; use health_metrics::observe::Observe; diff --git a/validator_client/http_api/src/remotekeys.rs b/validator_client/http_api/src/remotekeys.rs index 5aa63baac3b..987e1b8740d 100644 --- a/validator_client/http_api/src/remotekeys.rs +++ b/validator_client/http_api/src/remotekeys.rs @@ -2,6 +2,7 @@ use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, Web3SignerDefinition, }; +use bls::PublicKeyBytes; use eth2::lighthouse_vc::std_types::{ DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, @@ -14,7 +15,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; -use types::{EthSpec, PublicKeyBytes}; +use types::EthSpec; use url::Url; use warp::Rejection; use warp_utils::reject::custom_server_error; diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 9a8784f2023..f83d9f4d526 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -4,6 +4,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, }; +use bls::Keypair; use deposit_contract::decode_eth1_tx_data; use doppelganger_service::DoppelgangerService; use eth2::{ diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index b0780e74278..5cb631983cc 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -11,6 +11,7 @@ use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, random_password_string, validator_definitions::ValidatorDefinitions, }; +use bls::{Keypair, PublicKeyBytes}; use deposit_contract::decode_eth1_tx_data; use eth2::{ Error as ApiError, diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index dd2266e3f6e..eeb3cd94de0 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -1,19 +1,23 @@ use super::*; use account_utils::random_password_string; use bls::PublicKeyBytes; +use bls::{AggregateSignature, PublicKey}; use eth2::lighthouse_vc::types::UpdateFeeRecipientRequest; use eth2::lighthouse_vc::{ http_client::ValidatorClientHttpClient as HttpClient, std_types::{KeystoreJsonStr as Keystore, *}, types::Web3SignerValidatorRequest, }; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use lighthouse_validator_store::DEFAULT_GAS_LIMIT; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; +use ssz_types::BitList; use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; +use typenum::Unsigned; use types::{Address, attestation::AttestationBase}; use validator_store::ValidatorStore; use zeroize::Zeroizing; diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index 4d61bd4ed81..db6d03174dd 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -15,6 +15,7 @@ use account_utils::{ Web3SignerDefinition, }, }; +use bls::{Keypair, PublicKey, PublicKeyBytes}; use eth2_keystore::Keystore; use lockfile::{Lockfile, LockfileError}; use metrics::set_gauge; @@ -30,7 +31,7 @@ use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, warn}; use types::graffiti::GraffitiString; -use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; +use types::{Address, Graffiti}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; use zeroize::Zeroizing; diff --git a/validator_client/lighthouse_validator_store/Cargo.toml b/validator_client/lighthouse_validator_store/Cargo.toml index 0f8220bdc9f..01c7616be15 100644 --- a/validator_client/lighthouse_validator_store/Cargo.toml +++ b/validator_client/lighthouse_validator_store/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Sigma Prime "] [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } doppelganger_service = { workspace = true } either = { workspace = true } environment = { workspace = true } diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index d10fecb32e4..3bea21a05d8 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1,4 +1,5 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; +use bls::{PublicKeyBytes, Signature}; use doppelganger_service::DoppelgangerService; use eth2::types::PublishBlockRequest; use initialized_validators::InitializedValidators; @@ -15,13 +16,13 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use tracing::{error, info, warn}; +use tracing::{error, info, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, - Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, graffiti::GraffitiString, }; @@ -242,6 +243,7 @@ impl LighthouseValidatorStore { /// Returns a `SigningMethod` for `validator_pubkey` *only if* that validator is considered safe /// by doppelganger protection. + #[instrument(skip_all, level = "debug")] fn doppelganger_checked_signing_method( &self, validator_pubkey: PublicKeyBytes, @@ -745,6 +747,7 @@ impl ValidatorStore for LighthouseValidatorS } } + #[instrument(skip_all)] async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 3e1a48142f9..cb321c2d498 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -5,6 +5,7 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +bls = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } lockfile = { workspace = true } @@ -12,6 +13,7 @@ parking_lot = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } task_executor = { workspace = true } +tracing = { workspace = true } types = { workspace = true } url = { workspace = true } validator_metrics = { workspace = true } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index c535415b1e9..d0d98689526 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -3,6 +3,7 @@ //! - Via a local `Keypair`. //! - Via a remote signer (Web3Signer) +use bls::{Keypair, PublicKey, Signature}; use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; @@ -10,6 +11,7 @@ use reqwest::{Client, header::ACCEPT}; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; +use tracing::instrument; use types::*; use url::Url; use web3signer::{ForkInfo, MessageType, SigningRequest, SigningResponse}; @@ -131,6 +133,7 @@ impl SigningMethod { } /// Return the signature of `signable_message`, with respect to the `signing_context`. + #[instrument(skip_all, level = "debug")] pub async fn get_signature>( &self, signable_message: SignableMessage<'_, E, Payload>, diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 99fad103035..246d9e9e091 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -1,6 +1,7 @@ //! Contains the types required to make JSON requests to Web3Signer servers. use super::Error; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use types::*; diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6a778c5de31..b80da6c7867 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -11,9 +11,11 @@ portable = ["types/portable"] [dependencies] arbitrary = { workspace = true, features = ["derive"] } +bls = { workspace = true } eip_3076 = { workspace = true, features = ["json"] } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" rusqlite = { workspace = true } diff --git a/validator_client/slashing_protection/src/attestation_tests.rs b/validator_client/slashing_protection/src/attestation_tests.rs index 37766f271bb..d16c9613369 100644 --- a/validator_client/slashing_protection/src/attestation_tests.rs +++ b/validator_client/slashing_protection/src/attestation_tests.rs @@ -2,7 +2,8 @@ use crate::test_utils::*; use crate::*; -use types::{AttestationData, Checkpoint, Epoch, FixedBytesExtended, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{AttestationData, Checkpoint, Epoch, Slot}; pub fn build_checkpoint(epoch_num: u64) -> Checkpoint { Checkpoint { diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index dfda7983f73..df1c63f37d3 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -1,11 +1,12 @@ use eip_3076::{Interchange, InterchangeData, InterchangeMetadata, SignedAttestation, SignedBlock}; +use fixed_bytes::FixedBytesExtended; use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; use slashing_protection::interchange_test::{MultiTestCase, TestCase}; use slashing_protection::test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}; use std::fs::{self, File}; use std::io::Write; use std::path::Path; -use types::{Epoch, FixedBytesExtended, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; fn metadata(genesis_validators_root: Hash256) -> InterchangeMetadata { InterchangeMetadata { diff --git a/validator_client/slashing_protection/src/block_tests.rs b/validator_client/slashing_protection/src/block_tests.rs index b3273015f42..2531f52d8ce 100644 --- a/validator_client/slashing_protection/src/block_tests.rs +++ b/validator_client/slashing_protection/src/block_tests.rs @@ -2,7 +2,8 @@ use super::*; use crate::test_utils::*; -use types::{BeaconBlockHeader, FixedBytesExtended, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconBlockHeader, Slot}; pub fn block(slot: u64) -> BeaconBlockHeader { BeaconBlockHeader { diff --git a/validator_client/slashing_protection/src/extra_interchange_tests.rs b/validator_client/slashing_protection/src/extra_interchange_tests.rs index 0f88ec8b1dc..18457720e4e 100644 --- a/validator_client/slashing_protection/src/extra_interchange_tests.rs +++ b/validator_client/slashing_protection/src/extra_interchange_tests.rs @@ -2,8 +2,8 @@ use crate::test_utils::pubkey; use crate::*; +use fixed_bytes::FixedBytesExtended; use tempfile::tempdir; -use types::FixedBytesExtended; #[test] fn export_non_existent_key() { diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index ebe0105f24d..0dfcda204d7 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -2,11 +2,13 @@ use crate::{ SigningRoot, SlashingDatabase, test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}, }; +use bls::PublicKeyBytes; use eip_3076::{Interchange, SignedAttestation, SignedBlock}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; -use types::{Epoch, FixedBytesExtended, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, Hash256, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 917d51d38b7..f8580e73158 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -19,10 +19,11 @@ pub use crate::slashing_database::{ InterchangeError, InterchangeImportOutcome, SUPPORTED_INTERCHANGE_FORMAT_VERSION, SlashingDatabase, }; +use bls::PublicKeyBytes; use rusqlite::Error as SQLError; use std::fmt::Display; use std::io::{Error as IOError, ErrorKind}; -use types::{Hash256, PublicKeyBytes}; +use types::Hash256; /// The filename within the `validators` directory that contains the slashing protection DB. pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; @@ -133,7 +134,7 @@ impl Display for NotSafe { #[cfg(test)] mod test { - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; use super::*; diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index ce32299a511..67e1234ac57 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1,6 +1,7 @@ use crate::signed_attestation::InvalidAttestation; use crate::signed_block::InvalidBlock; use crate::{NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot, signing_root_from_row}; +use bls::PublicKeyBytes; use eip_3076::{ Interchange, InterchangeData, InterchangeMetadata, SignedAttestation as InterchangeAttestation, SignedBlock as InterchangeBlock, @@ -11,7 +12,8 @@ use rusqlite::{OptionalExtension, Transaction, TransactionBehavior, params}; use std::fs::File; use std::path::Path; use std::time::Duration; -use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot}; +use tracing::instrument; +use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, SignedRoot, Slot}; type Pool = r2d2::Pool; @@ -639,6 +641,7 @@ impl SlashingDatabase { /// to prevent concurrent checks and inserts from resulting in slashable data being inserted. /// /// This is the safe, externally-callable interface for checking attestations. + #[instrument(skip_all, level = "debug")] pub fn check_and_insert_attestation( &self, validator_pubkey: &PublicKeyBytes, diff --git a/validator_client/slashing_protection/tests/migration.rs b/validator_client/slashing_protection/tests/migration.rs index 3d4ec7ea9a8..14bf0d63f93 100644 --- a/validator_client/slashing_protection/tests/migration.rs +++ b/validator_client/slashing_protection/tests/migration.rs @@ -1,10 +1,11 @@ //! Tests for upgrading a previous version of the database to the latest schema. +use fixed_bytes::FixedBytesExtended; use slashing_protection::{NotSafe, SlashingDatabase}; use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use tempfile::tempdir; -use types::{FixedBytesExtended, Hash256}; +use types::Hash256; fn test_data_dir() -> PathBuf { Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("migration-tests") diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index a6ce67fae91..587d4668b8a 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; +use tracing::{Instrument, Span, debug, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -180,8 +180,9 @@ impl AttestationService Result<(), String> { let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; let duration_to_next_slot = self @@ -189,6 +190,59 @@ impl AttestationService = self.duties_service.attesters(slot).into_iter().collect(); + + // Return early if there is no attestation duties + if attestation_duties.is_empty() { + return Ok(()); + } + + let attestation_service = self.clone(); + + let attestation_data_handle = self + .inner + .executor + .spawn_handle( + async move { + let attestation_data = attestation_service + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, 0) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }) + .await + .map_err(|e| e.to_string())?; + + attestation_service + .sign_and_publish_attestations( + slot, + &attestation_duties, + attestation_data.clone(), + ) + .await + .map_err(|e| { + crit!( + error = format!("{:?}", e), + slot = slot.as_u64(), + "Error during attestation routine" + ); + e + })?; + Ok::(attestation_data) + }, + "unaggregated attestation production", + ) + .ok_or("Failed to spawn attestation data task")?; + // If a validator needs to publish an aggregate attestation, they must do so at 2/3 // through the slot. This delay triggers at this time let aggregate_production_instant = Instant::now() @@ -196,7 +250,7 @@ impl AttestationService> = self + let aggregate_duties_by_committee_index: HashMap> = self .duties_service .attesters(slot) .into_iter() @@ -207,24 +261,45 @@ impl AttestationService data, + Ok(Some(Err(err))) => { + error!(?err, "Attestation production failed"); + return; + } + Ok(None) | Err(_) => { + info!("Aborting attestation production due to shutdown"); + return; + } + }; + + // For each committee index for this slot: + // Create and publish `SignedAggregateAndProof` for all aggregating validators. + aggregate_duties_by_committee_index.into_iter().for_each( + |(committee_index, validator_duties)| { + let attestation_service = attestation_service_clone.clone(); + let attestation_data = attestation_data.clone(); + executor.spawn_ignoring_error( + attestation_service.handle_aggregates( + slot, + committee_index, + validator_duties, + aggregate_production_instant, + attestation_data, + ), + "aggregate publish", + ); + }, + ) + }, + "attestation and aggregate publish", + ); // Schedule pruning of the slashing protection database once all unaggregated // attestations have (hopefully) been signed, i.e. at the same time as aggregate @@ -234,115 +309,73 @@ impl AttestationService, aggregate_production_instant: Instant, + attestation_data: AttestationData, ) -> Result<(), ()> { - let attestations_timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::ATTESTATIONS], - ); - - // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have + // There's not need to produce `SignedAggregateAndProof` if we do not have // any validators for the given `slot` and `committee_index`. if validator_duties.is_empty() { return Ok(()); } - // Step 1. - // - // Download, sign and publish an `Attestation` for each validator. - let attestation_opt = self - .produce_and_publish_attestations(slot, committee_index, &validator_duties) - .await - .map_err(move |e| { - crit!( - error = format!("{:?}", e), - committee_index, - slot = slot.as_u64(), - "Error during attestation routine" - ) - })?; + // Wait until the `aggregation_production_instant` (2/3rds + // of the way though the slot). As verified in the + // `delay_triggers_when_in_the_past` test, this code will still run + // even if the instant has already elapsed. + sleep_until(aggregate_production_instant).await; - drop(attestations_timer); - - // Step 2. - // - // If an attestation was produced, make an aggregate. - if let Some(attestation_data) = attestation_opt { - // First, wait until the `aggregation_production_instant` (2/3rds - // of the way though the slot). As verified in the - // `delay_triggers_when_in_the_past` test, this code will still run - // even if the instant has already elapsed. - sleep_until(aggregate_production_instant).await; - - // Start the metrics timer *after* we've done the delay. - let _aggregates_timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::AGGREGATES], - ); - - // Then download, sign and publish a `SignedAggregateAndProof` for each - // validator that is elected to aggregate for this `slot` and - // `committee_index`. - self.produce_and_publish_aggregates( - &attestation_data, - committee_index, - &validator_duties, - ) + // Start the metrics timer *after* we've done the delay. + let _aggregates_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES], + ); + + // Download, sign and publish a `SignedAggregateAndProof` for each + // validator that is elected to aggregate for this `slot` and + // `committee_index`. + self.produce_and_publish_aggregates(&attestation_data, committee_index, &validator_duties) .await .map_err(move |e| { crit!( error = format!("{:?}", e), committee_index, slot = slot.as_u64(), - "Error during attestation routine" + "Error during aggregate attestation routine" ) })?; - } Ok(()) } - /// Performs the first step of the attesting process: downloading `Attestation` objects, - /// signing them and returning them to the validator. + /// Performs the main steps of the attesting process: signing and publishing to the BN. /// - /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting + /// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/validator.md#attesting /// /// ## Detail /// /// The given `validator_duties` should already be filtered to only contain those that match - /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. - /// - /// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each - /// validator and the list of individually-signed `Attestation` objects is returned to the BN. - #[instrument(skip_all, fields(%slot, %committee_index))] - async fn produce_and_publish_attestations( + /// `slot`. Critical errors will be logged if this is not the case. + #[instrument(skip_all, fields(%slot, %attestation_data.beacon_block_root))] + async fn sign_and_publish_attestations( &self, slot: Slot, - committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], - ) -> Result, String> { - if validator_duties.is_empty() { - return Ok(None); - } + attestation_data: AttestationData, + ) -> Result<(), String> { + let _attestations_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS], + ); let current_epoch = self .slot_clock @@ -350,98 +383,82 @@ impl AttestationService(attestation_data, &self.chain_spec) { - crit!( - validator = ?duty.pubkey, - duty_slot = %duty.slot, - attestation_slot = %attestation_data.slot, - duty_index = duty.committee_index, - attestation_index = attestation_data.index, - "Inconsistent validator duties during signing" - ); - return None; - } + let signing_futures = validator_duties.iter().map(|duty_and_proof| { + async move { + let duty = &duty_and_proof.duty; + let attestation_data = attestation_data_ref; - let mut attestation = match Attestation::empty_for_signing( - duty.committee_index, - duty.committee_length as usize, - attestation_data.slot, - attestation_data.beacon_block_root, - attestation_data.source, - attestation_data.target, - &self.chain_spec, - ) { - Ok(attestation) => attestation, - Err(err) => { + // Ensure that the attestation matches the duties. + if !duty.match_attestation_data::(attestation_data, &self.chain_spec) { crit!( validator = ?duty.pubkey, - ?duty, - ?err, - "Invalid validator duties during signing" + duty_slot = %duty.slot, + attestation_slot = %attestation_data.slot, + duty_index = duty.committee_index, + attestation_index = attestation_data.index, + "Inconsistent validator duties during signing" ); return None; } - }; - match self - .validator_store - .sign_attestation( - duty.pubkey, - duty.validator_committee_index as usize, - &mut attestation, - current_epoch, - ) - .await - { - Ok(()) => Some((attestation, duty.validator_index)), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - warn!( - info = "a validator may have recently been removed from this VC", - pubkey = ?pubkey, - validator = ?duty.pubkey, - committee_index = committee_index, - slot = slot.as_u64(), - "Missing pubkey for attestation" - ); - None - } - Err(e) => { - crit!( - error = ?e, - validator = ?duty.pubkey, - committee_index, - slot = slot.as_u64(), - "Failed to sign attestation" - ); - None + let mut attestation = match Attestation::empty_for_signing( + duty.committee_index, + duty.committee_length as usize, + attestation_data.slot, + attestation_data.beacon_block_root, + attestation_data.source, + attestation_data.target, + &self.chain_spec, + ) { + Ok(attestation) => attestation, + Err(err) => { + crit!( + validator = ?duty.pubkey, + ?duty, + ?err, + "Invalid validator duties during signing" + ); + return None; + } + }; + + match self + .validator_store + .sign_attestation( + duty.pubkey, + duty.validator_committee_index as usize, + &mut attestation, + current_epoch, + ) + .await + { + Ok(()) => Some((attestation, duty.validator_index)), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + info = "a validator may have recently been removed from this VC", + pubkey = ?pubkey, + validator = ?duty.pubkey, + slot = slot.as_u64(), + "Missing pubkey for attestation" + ); + None + } + Err(e) => { + crit!( + error = ?e, + validator = ?duty.pubkey, + slot = slot.as_u64(), + "Failed to sign attestation" + ); + None + } } } + .instrument(Span::current()) }); // Execute all the futures in parallel, collecting any successful results. @@ -457,7 +474,7 @@ impl AttestationService AttestationService"] [dependencies] +bls = { workspace = true } eth2 = { workspace = true } slashing_protection = { workspace = true } types = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 6fd2e270649..2b472799d24 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,3 +1,4 @@ +use bls::{PublicKeyBytes, Signature}; use eth2::types::{FullBlockContents, PublishBlockRequest}; use slashing_protection::NotSafe; use std::fmt::Debug; @@ -5,9 +6,9 @@ use std::future::Future; use std::sync::Arc; use types::{ Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, Graffiti, Hash256, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SelectionProof, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; #[derive(Debug, PartialEq, Clone)] diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 6ef179fbe99..16ce1e023fa 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } [dependencies] account_utils = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } educe = { workspace = true } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 0e93b257734..a95d2a1fd61 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,5 +1,6 @@ pub use account_utils::STDIN_INPUTS_FLAG; use account_utils::strip_off_newlines; +use bls::{Keypair, PublicKeyBytes, SignatureBytes}; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ SensitiveUrl, diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 19f78be2ea7..8682705956c 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::{ @@ -586,6 +587,7 @@ async fn run(config: CreateConfig, spec: &ChainSpec) -> Result<(), S #[cfg(test)] pub mod tests { use super::*; + use bls::SignatureBytes; use eth2_network_config::Eth2NetworkConfig; use regex::Regex; use std::path::Path; diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index 3ff0c9529d7..2421b002aab 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ SensitiveUrl, @@ -5,7 +6,6 @@ use eth2::{ }; use serde::{Deserialize, Serialize}; use std::path::PathBuf; -use types::PublicKeyBytes; use crate::{DumpConfig, common::vc_http_client}; diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs index 4a398793ce1..b53d9c0a16d 100644 --- a/validator_manager/src/exit_validators.rs +++ b/validator_manager/src/exit_validators.rs @@ -1,5 +1,6 @@ use crate::{DumpConfig, common::vc_http_client}; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::types::{ConfigAndPreset, Epoch, StateId, ValidatorId, ValidatorStatus}; @@ -10,7 +11,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::fs::write; use std::path::PathBuf; use std::time::Duration; -use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use types::{ChainSpec, EthSpec}; pub const CMD: &str = "exit"; pub const BEACON_URL_FLAG: &str = "beacon-node"; diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index 082894a995d..f7a09f8d8e7 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::lighthouse_vc::types::SingleKeystoreResponse; use eth2::types::{ConfigAndPreset, StateId, ValidatorId, ValidatorStatus}; @@ -5,7 +6,7 @@ use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use types::{ChainSpec, EthSpec}; use crate::exit_validators::get_current_epoch; use crate::{DumpConfig, common::vc_http_client}; diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 08b50eb9293..ace1d1941fd 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; use account_utils::read_password_from_user; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ SensitiveUrl, @@ -18,7 +19,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use tokio::time::sleep; -use types::{Address, PublicKeyBytes}; +use types::Address; use zeroize::Zeroizing; pub const MOVE_DIR_NAME: &str = "lighthouse-validator-move"; From b42c9118df6da390f516921112306787a8854687 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 15:58:06 +0000 Subject: [PATCH 48/67] Fix imports --- beacon_node/src/config.rs | 2 +- consensus/types/src/execution_proof.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c3b34247b0e..2f951daae1f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -30,7 +30,7 @@ use std::str::FromStr; use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; -use types::{Checkpoint, Epoch, EthSpec, ExecutionProofId, Hash256, PublicKeyBytes}; +use types::{Checkpoint, Epoch, EthSpec, ExecutionProofId, Hash256}; use zkvm_execution_layer::ZKVMExecutionLayerConfig; const PURGE_DB_CONFIRMATION: &str = "confirm"; diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs index 0b74d6286ea..35d5acda49e 100644 --- a/consensus/types/src/execution_proof.rs +++ b/consensus/types/src/execution_proof.rs @@ -1,4 +1,5 @@ -use crate::{ExecutionBlockHash, Hash256, Slot, VariableList}; +use crate::{ExecutionBlockHash, Hash256, Slot}; +use ssz_types::VariableList; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode as DeriveEncode}; From 63735bdea1760e300ebc104c33583ec0b561e4ca Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 16:02:04 +0000 Subject: [PATCH 49/67] cargo sort --- beacon_node/Cargo.toml | 4 ++-- beacon_node/beacon_chain/Cargo.toml | 4 ++-- beacon_node/client/Cargo.toml | 7 ++++--- beacon_node/proof_generation_service/Cargo.toml | 7 ++++--- dummy_el/Cargo.toml | 16 ++++++++-------- zkvm_execution_layer/Cargo.toml | 7 +++---- 6 files changed, 23 insertions(+), 22 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 41a0c7e706c..0777216d172 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -29,8 +29,6 @@ dirs = { workspace = true } environment = { workspace = true } eth2_config = { workspace = true } execution_layer = { workspace = true } -# TODO(zkproofs): add as a workspace dependency -zkvm_execution_layer = { path = "../zkvm_execution_layer" } genesis = { workspace = true } hex = { workspace = true } http_api = { workspace = true } @@ -46,6 +44,8 @@ strum = { workspace = true } task_executor = { workspace = true } tracing = { workspace = true } types = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../zkvm_execution_layer" } [dev-dependencies] node_test_rig = { path = "../testing/node_test_rig" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 485aff2e076..eef64e1d9ad 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -27,8 +27,6 @@ ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } fixed_bytes = { workspace = true } -# TODO(zkproofs): add as a workspace dependency -zkvm_execution_layer = { path = "../../zkvm_execution_layer" } fork_choice = { workspace = true } futures = { workspace = true } genesis = { workspace = true } @@ -71,6 +69,8 @@ tree_hash = { workspace = true } tree_hash_derive = { workspace = true } typenum = { workspace = true } types = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } zstd = { workspace = true } [dev-dependencies] diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index c5de5a4f839..3a58457bb2b 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -14,9 +14,6 @@ eth2 = { workspace = true } eth2_config = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } -# TODO(zkproofs): add as a workspace dependency -zkvm_execution_layer = { path = "../../zkvm_execution_layer" } -proof_generation_service = { path = "../proof_generation_service" } futures = { workspace = true } genesis = { workspace = true } http_api = { workspace = true } @@ -27,6 +24,8 @@ logging = { workspace = true } metrics = { workspace = true } monitoring_api = { workspace = true } network = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +proof_generation_service = { path = "../proof_generation_service" } rand = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } @@ -42,6 +41,8 @@ tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } [dev-dependencies] operation_pool = { workspace = true } diff --git a/beacon_node/proof_generation_service/Cargo.toml b/beacon_node/proof_generation_service/Cargo.toml index bbd043e0fdd..21f25007603 100644 --- a/beacon_node/proof_generation_service/Cargo.toml +++ b/beacon_node/proof_generation_service/Cargo.toml @@ -6,11 +6,12 @@ edition = "2021" [dependencies] beacon_chain = { path = "../beacon_chain" } lighthouse_network = { workspace = true } +logging = { workspace = true } network = { workspace = true } -types = { path = "../../consensus/types" } -zkvm_execution_layer = { path = "../../zkvm_execution_layer" } tokio = { workspace = true } -logging = { workspace = true } tracing = { workspace = true } +types = { path = "../../consensus/types" } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } [dev-dependencies] diff --git a/dummy_el/Cargo.toml b/dummy_el/Cargo.toml index 7b25c4a679d..788a52a3fb7 100644 --- a/dummy_el/Cargo.toml +++ b/dummy_el/Cargo.toml @@ -3,18 +3,18 @@ name = "dummy_el" version = "0.1.0" edition = "2024" +[[bin]] +name = "dummy_el" +path = "src/main.rs" + [dependencies] +anyhow = { workspace = true } axum = { workspace = true } +clap = { workspace = true } +hex = { workspace = true } +jsonwebtoken = "9" serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } -clap = { workspace = true } -anyhow = { workspace = true } -jsonwebtoken = "9" -hex = { workspace = true } - -[[bin]] -name = "dummy_el" -path = "src/main.rs" diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index 1603cd31e88..69724cc6bc3 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -5,13 +5,12 @@ edition = "2021" [dependencies] async-trait = "0.1" #TODO(zkproofs): Remove -lru = "0.12" +execution_layer = { path = "../beacon_node/execution_layer" } hashbrown = "0.15" +lru = "0.12" serde = { version = "1.0", features = ["derive"] } -tokio = { version = "1", features = ["full"] } thiserror = "2" +tokio = { version = "1", features = ["full"] } types = { path = "../consensus/types" } -execution_layer = { path = "../beacon_node/execution_layer" } - [dev-dependencies] From e33bc493cff6460e86994d6a48ee04372b3c2d3b Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 16:03:32 +0000 Subject: [PATCH 50/67] cargo fmt --- beacon_node/client/src/builder.rs | 2 +- .../lighthouse_network/src/rpc/codec.rs | 8 +- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../sync/block_lookups/single_block_lookup.rs | 4 +- .../proof_generation_service/src/lib.rs | 8 +- consensus/types/src/execution_proof.rs | 2 +- dummy_el/src/main.rs | 82 +++++++++++-------- 7 files changed, 61 insertions(+), 49 deletions(-) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 52ceca991df..eba0861dbef 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -215,7 +215,7 @@ where let ordered_custody_column_indices = compute_ordered_custody_column_indices::(node_id, &spec).map_err(|e| { format!("Failed to compute ordered custody column indices: {:?}", e) - })?; + })?; let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) .store(store) diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 8abec366fac..b3401038df8 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -18,8 +18,8 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ExecutionProof, ForkContext, ForkName, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, SignedBeaconBlockGloas, }; @@ -574,9 +574,7 @@ fn handle_rpc_request( let request = ExecutionProofsByRootRequest::from_ssz_bytes(decoded_buffer) .map_err(RPCError::SSZDecodeError)?; - request - .validate(spec) - .map_err(RPCError::InvalidData)?; + request.validate(spec).map_err(RPCError::InvalidData)?; Ok(Some(RequestType::ExecutionProofsByRoot(request))) } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 98cc587201b..9ba8f66dafa 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -17,8 +17,8 @@ use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ExecutionProof, ExecutionProofId, ForkContext, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, - SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + Slot, blob_sidecar::BlobSidecar, }; /// Maximum length of error message. diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 46a8deb0ab2..6c326e84d4d 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -262,7 +262,9 @@ impl SingleBlockLookup { } if cx.chain.should_fetch_execution_proofs(block_epoch) { - self.proof_request = cx.chain.min_execution_proofs_required() + self.proof_request = cx + .chain + .min_execution_proofs_required() .map(|min_proofs| ProofRequestState::new(self.block_root, min_proofs)); } } else { diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs index 80c2a83e98c..5265ea08719 100644 --- a/beacon_node/proof_generation_service/src/lib.rs +++ b/beacon_node/proof_generation_service/src/lib.rs @@ -312,9 +312,7 @@ mod tests { let proof_id = ExecutionProofId::new(0).unwrap(); // Should return false for a proof that hasn't been observed - assert!( - !service.check_if_proof_exists(slot, block_root, proof_id) - ); + assert!(!service.check_if_proof_exists(slot, block_root, proof_id)); } #[tokio::test] @@ -339,9 +337,7 @@ mod tests { .unwrap(); // Should return true for an observed proof - assert!( - service.check_if_proof_exists(slot, block_root, proof_id) - ); + assert!(service.check_if_proof_exists(slot, block_root, proof_id)); } #[tokio::test] diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs index 35d5acda49e..1ae7fa8dffe 100644 --- a/consensus/types/src/execution_proof.rs +++ b/consensus/types/src/execution_proof.rs @@ -1,8 +1,8 @@ use crate::{ExecutionBlockHash, Hash256, Slot}; -use ssz_types::VariableList; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode as DeriveEncode}; +use ssz_types::VariableList; use ssz_types::typenum; use std::fmt::{self, Debug}; use tree_hash_derive::TreeHash; diff --git a/dummy_el/src/main.rs b/dummy_el/src/main.rs index fd0aa4520a7..b1c22030293 100644 --- a/dummy_el/src/main.rs +++ b/dummy_el/src/main.rs @@ -1,15 +1,15 @@ use axum::{ + Json, Router, extract::State, http::{Request, StatusCode}, middleware::{self, Next}, response::Response, routing::post, - Json, Router, }; use clap::Parser; use jsonwebtoken::{Algorithm, DecodingKey, Validation}; use serde::{Deserialize, Serialize}; -use serde_json::{json, Value as JsonValue}; +use serde_json::{Value as JsonValue, json}; use std::net::SocketAddr; use std::path::PathBuf; use std::sync::Arc; @@ -163,7 +163,10 @@ async fn handle_rpc( debug!("eth_getBlockByHash: returning null"); Ok(json!(null)) } - "engine_newPayloadV1" | "engine_newPayloadV2" | "engine_newPayloadV3" | "engine_newPayloadV4" => { + "engine_newPayloadV1" + | "engine_newPayloadV2" + | "engine_newPayloadV3" + | "engine_newPayloadV4" => { debug!("{}: returning SYNCING status", request.method); Ok(json!({ "status": "SYNCING", @@ -171,7 +174,9 @@ async fn handle_rpc( "validationError": null })) } - "engine_forkchoiceUpdatedV1" | "engine_forkchoiceUpdatedV2" | "engine_forkchoiceUpdatedV3" => { + "engine_forkchoiceUpdatedV1" + | "engine_forkchoiceUpdatedV2" + | "engine_forkchoiceUpdatedV3" => { debug!("{}: returning SYNCING status", request.method); Ok(json!({ "payloadStatus": { @@ -182,8 +187,15 @@ async fn handle_rpc( "payloadId": null })) } - "engine_getPayloadV1" | "engine_getPayloadV2" | "engine_getPayloadV3" | "engine_getPayloadV4" | "engine_getPayloadV5" => { - debug!("{}: returning error (payload not available)", request.method); + "engine_getPayloadV1" + | "engine_getPayloadV2" + | "engine_getPayloadV3" + | "engine_getPayloadV4" + | "engine_getPayloadV5" => { + debug!( + "{}: returning error (payload not available)", + request.method + ); Err(JsonRpcError { code: -38001, message: "Unknown payload".to_string(), @@ -217,7 +229,10 @@ async fn handle_rpc( "engine_getBlobsV1", "engine_getBlobsV2", ]; - debug!("engine_exchangeCapabilities: returning {} capabilities", capabilities.len()); + debug!( + "engine_exchangeCapabilities: returning {} capabilities", + capabilities.len() + ); Ok(json!(capabilities)) } "engine_getClientVersionV1" => { @@ -262,23 +277,23 @@ async fn handle_rpc( } // Simple RPC handler without JWT auth for non-Engine API ports -async fn handle_simple_rpc(Json(request): Json) -> (StatusCode, Json) { +async fn handle_simple_rpc( + Json(request): Json, +) -> (StatusCode, Json) { debug!(method = %request.method, "Received simple RPC request"); let result: Result = match request.method.as_str() { - "admin_nodeInfo" => { - Ok(json!({ - "id": "0ecd4a2c5f7c2a304e3acbec67efea275510d31c304fe47f4e626a2ebd5fb101", - "name": "Dummy-EL/v0.1.0", - "enode": "enode://dummy@127.0.0.1:30303", - "enr": "enr:-Iq4QDummy0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - "ip": "127.0.0.1", - "ports": { - "discovery": 30303, - "listener": 30303 - } - })) - } + "admin_nodeInfo" => Ok(json!({ + "id": "0ecd4a2c5f7c2a304e3acbec67efea275510d31c304fe47f4e626a2ebd5fb101", + "name": "Dummy-EL/v0.1.0", + "enode": "enode://dummy@127.0.0.1:30303", + "enr": "enr:-Iq4QDummy0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "ip": "127.0.0.1", + "ports": { + "discovery": 30303, + "listener": 30303 + } + })), _ => { // For any other method, just return a success response Ok(json!(null)) @@ -328,18 +343,16 @@ async fn main() -> anyhow::Result<()> { // Read JWT secret if provided let jwt_secret = match &args.jwt_secret { - Some(path) => { - match read_jwt_secret(path) { - Ok(secret) => { - info!("JWT secret loaded from {:?}", path); - Some(secret) - } - Err(e) => { - error!("Failed to read JWT secret from {:?}: {}", path, e); - return Err(e); - } + Some(path) => match read_jwt_secret(path) { + Ok(secret) => { + info!("JWT secret loaded from {:?}", path); + Some(secret) } - } + Err(e) => { + error!("Failed to read JWT secret from {:?}: {}", path, e); + return Err(e); + } + }, None => { warn!("No JWT secret provided - authentication disabled!"); warn!("This is insecure and should only be used for testing"); @@ -363,7 +376,10 @@ async fn main() -> anyhow::Result<()> { // Engine API server (port 8551) with JWT auth let engine_app = Router::new() .route("/", post(handle_rpc)) - .layer(middleware::from_fn_with_state(state.clone(), auth_middleware)) + .layer(middleware::from_fn_with_state( + state.clone(), + auth_middleware, + )) .with_state(state.clone()); let engine_addr = format!("{}:{}", args.host, args.port) From 6f0fa9a6c3f2ef014899f0f9c2a1e194b923b90a Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 16:08:02 +0000 Subject: [PATCH 51/67] fix import --- beacon_node/beacon_chain/src/observed_execution_proofs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/observed_execution_proofs.rs b/beacon_node/beacon_chain/src/observed_execution_proofs.rs index e927ecad68f..1749edcca81 100644 --- a/beacon_node/beacon_chain/src/observed_execution_proofs.rs +++ b/beacon_node/beacon_chain/src/observed_execution_proofs.rs @@ -165,7 +165,7 @@ impl Default for ObservedExecutionProofs { #[cfg(test)] mod tests { use super::*; - use types::FixedBytesExtended; + use bls::FixedBytesExtended; fn test_proof_key(slot: u64) -> (Slot, Hash256, ExecutionProofId) { ( From d3c7e8dab5beb93051f15cc0a46aa8c5ed36a59f Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 16:18:36 +0000 Subject: [PATCH 52/67] Fix FixedBytesExtended import issues --- Cargo.lock | 1 + zkvm_execution_layer/Cargo.toml | 1 + zkvm_execution_layer/src/dummy_proof_verifier.rs | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index dc7031d7640..9104c2a10af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11028,6 +11028,7 @@ version = "0.1.0" dependencies = [ "async-trait", "execution_layer", + "fixed_bytes", "hashbrown 0.15.5", "lru 0.12.5", "serde", diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index 69724cc6bc3..1f6f291e9a7 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -14,3 +14,4 @@ tokio = { version = "1", features = ["full"] } types = { path = "../consensus/types" } [dev-dependencies] +fixed_bytes = { workspace = true } diff --git a/zkvm_execution_layer/src/dummy_proof_verifier.rs b/zkvm_execution_layer/src/dummy_proof_verifier.rs index b7d06a852c5..9f322ed7551 100644 --- a/zkvm_execution_layer/src/dummy_proof_verifier.rs +++ b/zkvm_execution_layer/src/dummy_proof_verifier.rs @@ -55,7 +55,8 @@ impl ProofVerifier for DummyVerifier { #[cfg(test)] mod tests { use super::*; - use types::{ExecutionBlockHash, FixedBytesExtended}; + use fixed_bytes::FixedBytesExtended; + use types::ExecutionBlockHash; fn create_test_proof( subnet_id: ExecutionProofId, From 9b6c8e25778847e57e6f15d70d185b2a30557b09 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 16:31:04 +0000 Subject: [PATCH 53/67] Update CLI help text for ZKVM flags --- book/src/help_bn.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 5f3c43a7e42..ed3acefc49e 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -8,6 +8,12 @@ beacon chain and publishing messages to the network. Usage: lighthouse beacon_node [OPTIONS] --execution-endpoint Options: + --activate-zkvm + Activates ZKVM execution proof mode. Enables the node to subscribe to + the execution_proof gossip topic, receive and verify execution proofs + from peers, and advertise zkVM support in its ENR for peer discovery. + Use --zkvm-generation-proof-types to specify which proof types this + node should generate (optional - nodes can verify without generating). --auto-compact-db Enable or disable automatic compaction of the database on finalization. [default: true] @@ -424,6 +430,10 @@ Options: verify the node's sync against. The block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync from a recent state use --checkpoint-sync-url. + --zkvm-generation-proof-types + Comma-separated list of proof type IDs to generate (e.g., '0,1' where + 0=SP1+Reth, 1=Risc0+Geth). Optional - nodes can verify proofs without + generating them. -V, --version Print version From dd5e6b80c22e0d744fd2234bd669122ab8da81ba Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 16:58:47 +0000 Subject: [PATCH 54/67] enable zkvm flag in test-setup rig --- .../src/sync/tests/execution_proof_tests.rs | 22 +++++++++---------- beacon_node/network/src/sync/tests/lookups.rs | 21 ++++++++++++++++-- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs index ce006172187..16a53a008b0 100644 --- a/beacon_node/network/src/sync/tests/execution_proof_tests.rs +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -7,7 +7,7 @@ use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot} /// Test successful execution proof fetch and verification #[test] fn test_proof_lookup_happy_path() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -57,7 +57,7 @@ fn test_proof_lookup_happy_path() { /// Test that empty proof response results in peer penalization #[test] fn test_proof_lookup_empty_response() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -87,7 +87,7 @@ fn test_proof_lookup_empty_response() { /// Test partial proof response (peer doesn't have all requested proofs) #[test] fn test_proof_lookup_partial_response() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -152,7 +152,7 @@ fn test_proof_lookup_partial_response() { /// Test unrequested proof triggers penalization #[test] fn test_proof_lookup_unrequested_proof() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -200,7 +200,7 @@ fn test_proof_lookup_unrequested_proof() { /// Test duplicate proofs triggers penalization #[test] fn test_proof_lookup_duplicate_proof() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -261,7 +261,7 @@ fn test_proof_lookup_duplicate_proof() { /// Test wrong block root in proof triggers penalization #[test] fn test_proof_lookup_wrong_block_root() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -310,7 +310,7 @@ fn test_proof_lookup_wrong_block_root() { /// Test proof request timeout #[test] fn test_proof_lookup_timeout() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -344,7 +344,7 @@ fn test_proof_lookup_timeout() { /// Test peer disconnection during proof request #[test] fn test_proof_lookup_peer_disconnected() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -375,7 +375,7 @@ fn test_proof_lookup_peer_disconnected() { /// Test multiple retries on failure #[test] fn test_proof_lookup_multiple_retries() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -430,7 +430,7 @@ fn test_proof_lookup_multiple_retries() { /// Test proof lookup with no peers available #[test] fn test_proof_lookup_no_peers() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; @@ -464,7 +464,7 @@ fn test_proof_lookup_no_peers() { /// Test successful proof verification after block already has blobs #[test] fn test_proof_lookup_with_existing_blobs() { - let Some(mut rig) = TestRig::test_setup_after_fulu() else { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { return; }; diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index a9c3eb231c9..d6618d0225d 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -41,8 +41,8 @@ use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; use tracing::info; use types::{ - BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, - ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkContext, ForkName, Hash256, + BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, ChainSpec, DataColumnSidecar, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, @@ -57,7 +57,10 @@ impl TestRig { pub fn test_setup() -> Self { // Use `fork_from_env` logic to set correct fork epochs let spec = test_spec::(); + Self::test_setup_with_spec(spec) + } + fn test_setup_with_spec(spec: ChainSpec) -> Self { // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) .spec(Arc::new(spec)) @@ -151,6 +154,20 @@ impl TestRig { } } + /// Setup test rig for Fulu with zkvm enabled. + /// This is needed for execution proof tests since proof requests are only made + /// when zkvm mode is enabled in the chain spec. + pub fn test_setup_after_fulu_with_zkvm() -> Option { + let mut spec = test_spec::(); + spec.zkvm_enabled = true; + let r = Self::test_setup_with_spec(spec); + if r.fork_name.fulu_enabled() { + Some(r) + } else { + None + } + } + pub fn log(&self, msg: &str) { info!(msg, "TEST_RIG"); } From 0fc75306638237c1296996dd1cf4e6d7aafdd7b5 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Sat, 13 Dec 2025 17:45:55 +0000 Subject: [PATCH 55/67] minimal-specs-num-proofs-required set to 2 --- beacon_node/network/src/sync/tests/execution_proof_tests.rs | 3 +-- consensus/types/src/core/chain_spec.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs index 16a53a008b0..0062ddbfa6c 100644 --- a/beacon_node/network/src/sync/tests/execution_proof_tests.rs +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -37,8 +37,7 @@ fn test_proof_lookup_happy_path() { // Now expect proof request let proof_id = rig.expect_proof_lookup_request(block_root); - // Send all requested proofs - // TODO(zkproofs): We should use min_required instead of hardcoding 2 proofs here + // Send all requested proofs (minimal spec requires 2) let proof_ids = vec![ ExecutionProofId::new(0).unwrap(), ExecutionProofId::new(1).unwrap(), diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 8838cac0942..72547d31e14 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1335,7 +1335,7 @@ impl ChainSpec { gloas_fork_epoch: None, // zkVM zkvm_enabled: false, - zkvm_min_proofs_required: 0, + zkvm_min_proofs_required: 2, min_epochs_for_execution_proof_requests: 2, // Other network_id: 2, // lighthouse testnet network id From 22bbd3ca3d4ea61ddb2de394f79f41d3a88472ac Mon Sep 17 00:00:00 2001 From: kevaundray Date: Tue, 16 Dec 2025 12:48:06 +0000 Subject: [PATCH 56/67] Remove proof generation type node (#3) * - update tests to have special zkvm nodes - check for zkvm capability in the enr - execution_proof_lookup_request filters zkvm nodes * remove proof generation service * cargo fmt * fix * update proof-gen-types * endpoints exposed * add basic tests * cargo fmt * remove executionWitness --- Cargo.lock | 15 - Cargo.toml | 1 - beacon_node/beacon_chain/src/beacon_chain.rs | 22 - beacon_node/beacon_chain/src/builder.rs | 32 -- .../src/data_availability_checker.rs | 3 - .../overflow_lru_cache.rs | 16 +- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/client/Cargo.toml | 2 - beacon_node/client/src/builder.rs | 64 --- beacon_node/http_api/src/beacon/pool.rs | 101 ++++- beacon_node/http_api/src/lib.rs | 5 + beacon_node/http_api/tests/tests.rs | 133 +++++- .../src/peer_manager/peerdb.rs | 27 +- .../src/peer_manager/peerdb/peer_info.rs | 16 +- .../network/src/sync/network_context.rs | 19 +- .../src/sync/tests/execution_proof_tests.rs | 90 +---- beacon_node/network/src/sync/tests/lookups.rs | 12 + .../proof_generation_service/Cargo.toml | 17 - .../proof_generation_service/src/lib.rs | 381 ------------------ beacon_node/src/cli.rs | 15 +- beacon_node/src/config.rs | 34 +- book/src/help_bn.md | 6 - common/eth2/src/lib.rs | 18 + ...network_params_mixed_proof_gen_verify.yaml | 7 +- .../network_params_proof_gen_only.yaml | 21 - 25 files changed, 342 insertions(+), 717 deletions(-) delete mode 100644 beacon_node/proof_generation_service/Cargo.toml delete mode 100644 beacon_node/proof_generation_service/src/lib.rs delete mode 100644 scripts/local_testnet/network_params_proof_gen_only.yaml diff --git a/Cargo.lock b/Cargo.lock index 9104c2a10af..141aef5750b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1930,7 +1930,6 @@ dependencies = [ "monitoring_api", "network", "operation_pool", - "proof_generation_service", "rand 0.9.2", "sensitive_url", "serde", @@ -7153,20 +7152,6 @@ dependencies = [ "syn 2.0.110", ] -[[package]] -name = "proof_generation_service" -version = "0.1.0" -dependencies = [ - "beacon_chain", - "lighthouse_network", - "logging", - "network", - "tokio", - "tracing", - "types", - "zkvm_execution_layer", -] - [[package]] name = "proptest" version = "1.9.0" diff --git a/Cargo.toml b/Cargo.toml index cd5f82b1788..3c20391e7e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,6 @@ members = [ "beacon_node/lighthouse_tracing", "beacon_node/network", "beacon_node/operation_pool", - "beacon_node/proof_generation_service", "beacon_node/store", "beacon_node/timer", "boot_node", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b314faa6d7b..434a73420dc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -133,7 +133,6 @@ use store::{ KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{RayonPoolType, ShutdownReason, TaskExecutor}; -use tokio::sync::mpsc::UnboundedSender; use tokio_stream::Stream; use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; @@ -141,7 +140,6 @@ use types::blob_sidecar::FixedBlobSidecarList; use types::data_column_sidecar::ColumnIndex; use types::payload::BlockProductionVersion; use types::*; -use zkvm_execution_layer::GeneratorRegistry; pub type ForkChoiceError = fork_choice::Error; @@ -352,8 +350,6 @@ pub enum BlockProcessStatus { pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); -pub type ProofGenerationEvent = (Hash256, Slot, Arc>); - pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< ::EthSpec, @@ -495,10 +491,6 @@ pub struct BeaconChain { pub kzg: Arc, /// RNG instance used by the chain. Currently used for shuffling column sidecars in block publishing. pub rng: Arc>>, - /// Registry of zkVM proof generators for altruistic proof generation - pub zkvm_generator_registry: Option>, - /// Sender to notify proof generation service of blocks needing proofs - pub proof_generation_tx: Option>>, } pub enum BeaconBlockResponseWrapper { @@ -4191,20 +4183,6 @@ impl BeaconChain { current_slot, ); - // Notify proof generation service for altruistic proof generation - if let Some(ref proof_gen_tx) = self.proof_generation_tx { - let slot = signed_block.slot(); - let event = (block_root, slot, signed_block.clone()); - - if let Err(e) = proof_gen_tx.send(event) { - debug!( - error = ?e, - ?block_root, - "Failed to send proof generation event" - ); - } - } - Ok(block_root) } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index de5d7b20966..bc5b41b09e1 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -2,7 +2,6 @@ use crate::ChainConfig; use crate::CustodyContext; use crate::beacon_chain::{ BEACON_CHAIN_DB_KEY, CanonicalHead, LightClientProducerEvent, OP_POOL_DB_KEY, - ProofGenerationEvent, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::custody_context::NodeCustodyType; @@ -43,7 +42,6 @@ use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, error, info}; use types::data_column_custody_group::CustodyIndex; use types::{ @@ -116,10 +114,6 @@ pub struct BeaconChainBuilder { /// be replaced with ZkVmEngineApi from zkvm_execution_layer. This would allow the /// --execution-endpoint CLI flag to be optional when running in ZK-VM mode. zkvm_execution_layer_config: Option, - /// Registry of zkVM proof generators for currently altruistic proof generation - zkvm_generator_registry: Option>, - /// Sender to notify proof generation service of blocks needing proofs - proof_generation_tx: Option>>, } impl @@ -161,8 +155,6 @@ where ordered_custody_column_indices: None, rng: None, zkvm_execution_layer_config: None, - zkvm_generator_registry: None, - proof_generation_tx: None, } } @@ -723,21 +715,6 @@ where self } - /// Sets the zkVM generator registry for altruistic proof generation. - pub fn zkvm_generator_registry( - mut self, - registry: Arc, - ) -> Self { - self.zkvm_generator_registry = Some(registry); - self - } - - /// Sets a `Sender` to notify the proof generation service of new blocks. - pub fn proof_generation_tx(mut self, sender: UnboundedSender>) -> Self { - self.proof_generation_tx = Some(sender); - self - } - /// Creates a new, empty operation pool. fn empty_op_pool(mut self) -> Self { self.op_pool = Some(OperationPool::new()); @@ -1016,9 +993,6 @@ where }; debug!(?custody_context, "Loaded persisted custody context"); - let has_execution_layer_and_proof_gen = - self.execution_layer.is_some() && self.zkvm_generator_registry.is_some(); - let beacon_chain = BeaconChain { spec: self.spec.clone(), config: self.chain_config, @@ -1102,17 +1076,11 @@ where self.zkvm_execution_layer_config .as_ref() .map(|_| Arc::new(zkvm_execution_layer::registry_proof_verification::VerifierRegistry::new_with_dummy_verifiers())), - // Pass whether this node has an execution layer AND generates proofs - // Nodes with EL+proof-gen validate via traditional execution - // Nodes with EL but no proof-gen wait for proofs (lightweight verifier) - has_execution_layer_and_proof_gen, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), rng: Arc::new(Mutex::new(rng)), - zkvm_generator_registry: self.zkvm_generator_registry, - proof_generation_tx: self.proof_generation_tx, }; let head = beacon_chain.head_snapshot(); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index a4c86b5202e..8359de354d9 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -130,14 +130,12 @@ impl DataAvailabilityChecker { custody_context: Arc>, spec: Arc, verifier_registry: Option>, - has_execution_layer_and_proof_gen: bool, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY_NON_ZERO, store, custody_context.clone(), spec.clone(), - has_execution_layer_and_proof_gen, )?; Ok(Self { complete_blob_backfill, @@ -1473,7 +1471,6 @@ mod test { custody_context, spec, None, - false, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 4f8b8e32dc7..5eebffdd2c4 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -272,7 +272,6 @@ impl PendingComponents { &self, spec: &Arc, num_expected_columns_opt: Option, - has_execution_layer_and_proof_gen: bool, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -351,12 +350,7 @@ impl PendingComponents { }; // Check if this node needs execution proofs to validate blocks. - // Nodes that have EL and generate proofs validate via EL execution. - // Nodes that have EL but DON'T generate proofs are lightweight verifiers and wait for proofs. - // TODO(zkproofs): This is a technicality mainly because we cannot remove the EL on kurtosis - // ie each CL is coupled with an EL - let needs_execution_proofs = - spec.zkvm_min_proofs_required().is_some() && !has_execution_layer_and_proof_gen; + let needs_execution_proofs = spec.zkvm_min_proofs_required().is_some(); if needs_execution_proofs { let min_proofs = spec.zkvm_min_proofs_required().unwrap(); @@ -488,10 +482,6 @@ pub struct DataAvailabilityCheckerInner { state_cache: StateLRUCache, custody_context: Arc>, spec: Arc, - /// Whether this node has an execution layer AND generates proofs. - /// - true: Node has EL and generates proofs → validates via EL execution - /// - false: Node either has no EL, or has EL but doesn't generate → waits for proofs (lightweight verifier) - has_execution_layer_and_proof_gen: bool, } // This enum is only used internally within the crate in the reconstruction function to improve @@ -509,14 +499,12 @@ impl DataAvailabilityCheckerInner { beacon_store: BeaconStore, custody_context: Arc>, spec: Arc, - has_execution_layer_and_proof_gen: bool, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, - has_execution_layer_and_proof_gen, }) } @@ -720,7 +708,6 @@ impl DataAvailabilityCheckerInner { if let Some(available_block) = pending_components.make_available( &self.spec, num_expected_columns_opt, - self.has_execution_layer_and_proof_gen, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { // Explicitly drop read lock before acquiring write lock @@ -1172,7 +1159,6 @@ mod test { test_store, custody_context, spec.clone(), - false, ) .expect("should create cache"), ); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 686b56e63eb..4e310c4556d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -66,7 +66,7 @@ pub use self::beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, ChainSegmentResult, ForkChoiceError, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, LightClientProducerEvent, OverrideForkchoiceUpdate, - ProduceBlockVerification, ProofGenerationEvent, StateSkipConfig, WhenSlotSkipped, + ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3a58457bb2b..e6f50b4e232 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -24,8 +24,6 @@ logging = { workspace = true } metrics = { workspace = true } monitoring_api = { workspace = true } network = { workspace = true } -# TODO(zkproofs): add as a workspace dependency -proof_generation_service = { path = "../proof_generation_service" } rand = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index eba0861dbef..41f8a8f6c8f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -4,7 +4,6 @@ use crate::compute_light_client_updates::{ }; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; -use beacon_chain::ProofGenerationEvent; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; @@ -33,7 +32,6 @@ use lighthouse_network::identity::Keypair; use lighthouse_network::{NetworkGlobals, prometheus_client::registry::Registry}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; -use proof_generation_service; use rand::SeedableRng; use rand::rngs::{OsRng, StdRng}; use slasher::Slasher; @@ -50,7 +48,6 @@ use types::{ BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, }; -use zkvm_execution_layer; /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; @@ -92,8 +89,6 @@ pub struct ClientBuilder { beacon_processor_config: Option, beacon_processor_channels: Option>, light_client_server_rv: Option>>, - proof_generation_rx: - Option>>, eth_spec_instance: T::EthSpec, } @@ -128,7 +123,6 @@ where beacon_processor_config: None, beacon_processor_channels: None, light_client_server_rv: None, - proof_generation_rx: None, } } @@ -253,44 +247,6 @@ where builder }; - // Set up proof generation service if zkVM is configured with generation proof types - let builder = if let Some(ref zkvm_config) = config.zkvm_execution_layer { - if !zkvm_config.generation_proof_types.is_empty() { - // Validate that proof generation requires an execution layer - // Proof-generating nodes will validate blocks via EL execution, not proofs - if config.execution_layer.is_none() { - return Err( - "Proof generation requires an EL. \ - Nodes generating proofs must validate blocks via an execution layer. \ - To run a lightweight verifier node (without EL), omit --zkvm-generation-proof-types." - .into(), - ); - } - - // Create channel for proof generation events - let (proof_gen_tx, proof_gen_rx) = - tokio::sync::mpsc::unbounded_channel::>(); - - // Create generator registry with enabled proof types - let registry = Arc::new( - zkvm_execution_layer::GeneratorRegistry::new_with_dummy_generators( - zkvm_config.generation_proof_types.clone(), - ), - ); - - // Store receiver for later when we spawn the service - self.proof_generation_rx = Some(proof_gen_rx); - - builder - .zkvm_generator_registry(registry) - .proof_generation_tx(proof_gen_tx) - } else { - builder - } - } else { - builder - }; - let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false); // If the client is expect to resume but there's no beacon chain in the database, @@ -852,26 +808,6 @@ where beacon_chain.task_executor.clone(), beacon_chain.clone(), ); - - // Start proof generation service if configured - if let Some(proof_gen_rx) = self.proof_generation_rx { - let network_tx = self - .network_senders - .as_ref() - .ok_or("proof_generation_service requires network_senders")? - .network_send(); - - let service = proof_generation_service::ProofGenerationService::new( - beacon_chain.clone(), - proof_gen_rx, - network_tx, - ); - - runtime_context.executor.spawn( - async move { service.run().await }, - "proof_generation_service", - ); - } } Ok(Client { diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs index 059573c3175..63b1a95b2ed 100644 --- a/beacon_node/http_api/src/beacon/pool.rs +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -5,6 +5,10 @@ use crate::version::{ unsupported_version_rejection, }; use crate::{sync_committees, utils}; +use beacon_chain::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; +use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::observed_operations::ObservationOutcome; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; @@ -17,7 +21,7 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, info, warn}; use types::{ - Attestation, AttestationData, AttesterSlashing, ForkName, ProposerSlashing, + Attestation, AttestationData, AttesterSlashing, ExecutionProof, ForkName, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, SyncCommitteeMessage, }; use warp::filters::BoxedFilter; @@ -520,3 +524,98 @@ pub fn post_beacon_pool_attestations_v2( ) .boxed() } + +/// POST beacon/pool/execution_proofs +/// +/// Submits an execution proof to the beacon node. +/// The proof will be validated and stored in the data availability checker. +/// If valid, the proof will be published to the gossip network. +pub fn post_beacon_pool_execution_proofs( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("execution_proofs")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + proof: ExecutionProof, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let proof = Arc::new(proof); + + // Validate the proof using the same logic as gossip validation + let verified_proof: GossipVerifiedExecutionProof = + GossipVerifiedExecutionProof::new(proof.clone(), &chain).map_err(|e| { + match e { + GossipExecutionProofError::PriorKnown { + slot, + block_root, + proof_id, + } => { + debug!( + %slot, + %block_root, + %proof_id, + "Execution proof already known" + ); + warp_utils::reject::custom_bad_request(format!( + "proof already known for slot {} block_root {} proof_id {}", + slot, block_root, proof_id + )) + } + GossipExecutionProofError::PriorKnownUnpublished => { + // Proof is valid but was received via non-gossip source + // It's in the DA checker, so we should publish it to gossip + warp_utils::reject::custom_bad_request( + "proof already received but not yet published".to_string(), + ) + } + _ => warp_utils::reject::object_invalid(format!( + "proof verification failed: {:?}", + e + )), + } + })?; + + let slot = verified_proof.slot(); + let block_root = verified_proof.block_root(); + let proof_id = verified_proof.subnet_id(); + + // Publish the proof to the gossip network + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::ExecutionProof(verified_proof.clone().into_inner()), + )?; + + // Store the proof in the data availability checker + if let Err(e) = chain + .data_availability_checker + .put_rpc_execution_proofs(block_root, vec![verified_proof.into_inner()]) + { + warn!( + %slot, + %block_root, + %proof_id, + error = ?e, + "Failed to store execution proof in DA checker" + ); + } + + info!( + %slot, + %block_root, + %proof_id, + "Execution proof submitted and published" + ); + + Ok(()) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 58cd2a3bdbc..8139e47985f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1465,6 +1465,10 @@ pub fn serve( let post_beacon_pool_bls_to_execution_changes = post_beacon_pool_bls_to_execution_changes(&network_tx_filter, &beacon_pool_path); + // POST beacon/pool/execution_proofs + let post_beacon_pool_execution_proofs = + post_beacon_pool_execution_proofs(&network_tx_filter, &beacon_pool_path); + let beacon_rewards_path = eth_v1 .clone() .and(warp::path("beacon")) @@ -3356,6 +3360,7 @@ pub fn serve( .uor(post_beacon_pool_voluntary_exits) .uor(post_beacon_pool_sync_committees) .uor(post_beacon_pool_bls_to_execution_changes) + .uor(post_beacon_pool_execution_proofs) .uor(post_beacon_state_validators) .uor(post_beacon_state_validator_balances) .uor(post_beacon_state_validator_identities) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index f8eba0ee2b7..8b01ef3451e 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -46,8 +46,9 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - Domain, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, RelativeEpoch, SelectionProof, - SignedRoot, SingleAttestation, Slot, attestation::AttestationBase, + Domain, EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, MainnetEthSpec, + RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, Slot, + attestation::AttestationBase, }; type E = MainnetEthSpec; @@ -94,6 +95,7 @@ struct ApiTesterConfig { spec: ChainSpec, retain_historic_states: bool, node_custody_type: NodeCustodyType, + enable_zkvm: bool, } impl Default for ApiTesterConfig { @@ -104,6 +106,7 @@ impl Default for ApiTesterConfig { spec, retain_historic_states: false, node_custody_type: NodeCustodyType::Fullnode, + enable_zkvm: false, } } } @@ -113,6 +116,11 @@ impl ApiTesterConfig { self.retain_historic_states = true; self } + + fn with_zkvm(mut self) -> Self { + self.enable_zkvm = true; + self + } } impl ApiTester { @@ -129,10 +137,15 @@ impl ApiTester { Self::new_from_config(config).await } + pub async fn new_with_zkvm() -> Self { + let config = ApiTesterConfig::default().with_zkvm(); + Self::new_from_config(config).await + } + pub async fn new_from_config(config: ApiTesterConfig) -> Self { let spec = Arc::new(config.spec); - let mut harness = BeaconChainHarness::builder(MainnetEthSpec) + let mut builder = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .chain_config(ChainConfig { reconstruct_historic_states: config.retain_historic_states, @@ -142,8 +155,13 @@ impl ApiTester { .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() - .node_custody_type(config.node_custody_type) - .build(); + .node_custody_type(config.node_custody_type); + + if config.enable_zkvm { + builder = builder.zkvm_with_dummy_verifiers(); + } + + let mut harness = builder.build(); harness .mock_execution_layer @@ -2732,6 +2750,86 @@ impl ApiTester { self } + /// Helper to create a test execution proof for the head block + fn create_test_execution_proof(&self) -> ExecutionProof { + let head = self.chain.head_snapshot(); + let block_root = head.beacon_block_root; + let slot = head.beacon_block.slot(); + let block_hash = head + .beacon_block + .message() + .body() + .execution_payload() + .map(|p| p.block_hash()) + .unwrap_or_else(|_| ExecutionBlockHash::zero()); + + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof_data = vec![0u8; 32]; // Dummy proof data + + ExecutionProof::new(proof_id, slot, block_hash, block_root, proof_data) + .expect("Valid test proof") + } + + pub async fn test_post_beacon_pool_execution_proofs_valid(mut self) -> Self { + let proof = self.create_test_execution_proof(); + + self.client + .post_beacon_pool_execution_proofs(&proof) + .await + .unwrap(); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "valid proof should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_execution_proofs_invalid_duplicate(mut self) -> Self { + let proof = self.create_test_execution_proof(); + + // First submission should succeed + self.client + .post_beacon_pool_execution_proofs(&proof) + .await + .unwrap(); + + // Consume the network message + self.network_rx.network_recv.recv().await; + + // Duplicate submission should fail + let result = self.client.post_beacon_pool_execution_proofs(&proof).await; + + assert!(result.is_err(), "duplicate proof should be rejected"); + + assert!( + self.network_rx.network_recv.recv().now_or_never().is_none(), + "duplicate proof should not be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_execution_proofs_invalid_future_slot(self) -> Self { + let head = self.chain.head_snapshot(); + let block_root = head.beacon_block_root; + let future_slot = self.chain.slot().unwrap() + 100u64; + let block_hash = ExecutionBlockHash::zero(); + + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof_data = vec![0u8; 32]; + + let proof = ExecutionProof::new(proof_id, future_slot, block_hash, block_root, proof_data) + .expect("Valid test proof"); + + let result = self.client.post_beacon_pool_execution_proofs(&proof).await; + + assert!(result.is_err(), "future slot proof should be rejected"); + + self + } + pub async fn test_get_config_fork_schedule(self) -> Self { let result = self.client.get_config_fork_schedule().await.unwrap().data; @@ -7186,6 +7284,30 @@ async fn beacon_pools_post_voluntary_exits_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_execution_proofs_valid() { + ApiTester::new_with_zkvm() + .await + .test_post_beacon_pool_execution_proofs_valid() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_execution_proofs_invalid_duplicate() { + ApiTester::new_with_zkvm() + .await + .test_post_beacon_pool_execution_proofs_invalid_duplicate() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_execution_proofs_invalid_future_slot() { + ApiTester::new_with_zkvm() + .await + .test_post_beacon_pool_execution_proofs_invalid_future_slot() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_get() { ApiTester::new() @@ -7899,6 +8021,7 @@ async fn get_blobs_post_fulu_supernode() { retain_historic_states: false, spec: E::default_spec(), node_custody_type: NodeCustodyType::Supernode, + enable_zkvm: false, }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 87337cafcf5..142c62c966e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,5 +1,5 @@ use crate::discovery::CombinedKey; -use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; +use crate::discovery::enr::{PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, ZKVM_ENABLED_ENR_KEY}; use crate::{Enr, Gossipsub, PeerId, SyncInfo, metrics, multiaddr::Multiaddr, types::Subnet}; use itertools::Itertools; use logging::crit; @@ -799,6 +799,26 @@ impl PeerDB { supernode: bool, spec: &ChainSpec, enr_key: CombinedKey, + ) -> PeerId { + self.__add_connected_peer_with_opts_testing_only(supernode, false, spec, enr_key) + } + + /// Updates the connection state with zkvm option. MUST ONLY BE USED IN TESTS. + pub fn __add_connected_zkvm_peer_testing_only( + &mut self, + spec: &ChainSpec, + enr_key: CombinedKey, + ) -> PeerId { + self.__add_connected_peer_with_opts_testing_only(false, true, spec, enr_key) + } + + /// Updates the connection state with options. MUST ONLY BE USED IN TESTS. + fn __add_connected_peer_with_opts_testing_only( + &mut self, + supernode: bool, + zkvm_enabled: bool, + spec: &ChainSpec, + enr_key: CombinedKey, ) -> PeerId { let mut enr = Enr::builder().build(&enr_key).unwrap(); let peer_id = enr.peer_id(); @@ -812,6 +832,11 @@ impl PeerDB { .expect("u64 can be encoded"); } + if zkvm_enabled { + enr.insert(ZKVM_ENABLED_ENR_KEY, &true, &enr_key) + .expect("bool can be encoded"); + } + self.update_connection_state( &peer_id, NewConnectionState::Connected { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 483da11be0b..38cbd6e7782 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -92,6 +92,15 @@ impl PeerInfo { /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. /// Also returns true if the peer is assigned to custody a given data column `Subnet` computed from the metadata `custody_group_count` field or ENR `cgc` field. pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { + // ExecutionProof capability is advertised via ENR zkvm flag, not metadata. + // Check this separately since it doesn't depend on metadata presence. + if let Subnet::ExecutionProof = subnet { + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled(); + } + return false; + } + if let Some(meta_data) = &self.meta_data { match subnet { Subnet::Attestation(id) => { @@ -106,12 +115,7 @@ impl PeerInfo { return self.is_assigned_to_custody_subnet(subnet_id); } Subnet::ExecutionProof => { - // ExecutionProof capability is advertised via ENR zkvm flag, not metadata - // A node cannot dynamically change what the support. - if let Some(enr) = self.enr.as_ref() { - return enr.zkvm_enabled(); - } - return false; + unreachable!("zkvm flag is only in the ENR") } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 73afabe60d2..0943787c925 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -30,7 +30,7 @@ use lighthouse_network::service::api_types::{ DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; -use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; +use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Subnet}; use lighthouse_tracing::{SPAN_OUTGOING_BLOCK_BY_ROOT_REQUEST, SPAN_OUTGOING_RANGE_REQUEST}; use parking_lot::RwLock; pub use requests::LookupVerifyError; @@ -1048,9 +1048,18 @@ impl SyncNetworkContext { min_proofs_required: usize, ) -> Result { let active_request_count_by_peer = self.active_request_count_by_peer(); + let peers_db = self.network_globals().peers.read(); + + // Filter to only zkvm-enabled peers let Some(peer_id) = lookup_peers .read() .iter() + .filter(|peer| { + peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + }) .map(|peer| { ( // Prefer peers with less overall requests @@ -1063,9 +1072,11 @@ impl SyncNetworkContext { .min() .map(|(_, _, peer)| *peer) else { - return Ok(LookupRequestResult::Pending("no peers")); + return Ok(LookupRequestResult::Pending("no zkvm-enabled peers")); }; + drop(peers_db); + // Query DA checker for proofs we already have let already_have = self .chain @@ -1124,8 +1135,8 @@ impl SyncNetworkContext { self.execution_proofs_by_root_requests.insert( id, peer_id, - // Don't expect max responses since peer might not have all the proofs we need - false, + // Expect peer to provide all requested proofs - if they can't, penalize + true, ExecutionProofsByRootRequestItems::new(request), Span::none(), ); diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs index 0062ddbfa6c..32f251adccc 100644 --- a/beacon_node/network/src/sync/tests/execution_proof_tests.rs +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -13,7 +13,7 @@ fn test_proof_lookup_happy_path() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Get execution payload hash from the block let block_hash = block @@ -62,7 +62,7 @@ fn test_proof_lookup_empty_response() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Trigger lookup rig.trigger_unknown_block_from_attestation(block_root, peer_id); @@ -79,7 +79,7 @@ fn test_proof_lookup_empty_response() { rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // Should retry with different peer - let _new_peer = rig.new_connected_peer(); + let _new_peer = rig.new_connected_zkvm_peer(); rig.expect_proof_lookup_request(block_root); } @@ -92,7 +92,7 @@ fn test_proof_lookup_partial_response() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); let block_hash = block .message() .body() @@ -128,7 +128,7 @@ fn test_proof_lookup_partial_response() { rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // Should retry with another peer - let new_peer = rig.new_connected_peer(); + let new_peer = rig.new_connected_zkvm_peer(); let retry_proof_id = rig.expect_proof_lookup_request(block_root); // Complete with all proofs @@ -148,54 +148,6 @@ fn test_proof_lookup_partial_response() { rig.expect_no_active_lookups(); } -/// Test unrequested proof triggers penalization -#[test] -fn test_proof_lookup_unrequested_proof() { - let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { - return; - }; - - let block = rig.rand_block(); - let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); - let block_hash = block - .message() - .body() - .execution_payload() - .ok() - .map(|p| p.execution_payload_ref().block_hash()) - .unwrap_or_else(ExecutionBlockHash::zero); - - // Trigger lookup - rig.trigger_unknown_block_from_attestation(block_root, peer_id); - let block_id = rig.expect_block_lookup_request(block_root); - rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); - rig.expect_block_process(ResponseType::Block); - - let proof_id = rig.expect_proof_lookup_request(block_root); - - // Requested proofs 0, 1 but peer sends proofs 5 (unrequested) - let unrequested_proof = Arc::new( - ExecutionProof::new( - ExecutionProofId::new(5).unwrap(), - Slot::new(0), - block_hash, - block_root, - vec![1, 2, 3], - ) - .unwrap(), - ); - - rig.single_lookup_proof_response(proof_id, peer_id, Some(unrequested_proof)); - - // Should penalize peer for sending unrequested data - rig.expect_penalty(peer_id, "UnrequestedProof"); - - // Should retry - let _new_peer = rig.new_connected_peer(); - rig.expect_proof_lookup_request(block_root); -} - /// Test duplicate proofs triggers penalization #[test] fn test_proof_lookup_duplicate_proof() { @@ -205,7 +157,7 @@ fn test_proof_lookup_duplicate_proof() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); let block_hash = block .message() .body() @@ -250,10 +202,10 @@ fn test_proof_lookup_duplicate_proof() { rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_b)); // Should penalize peer for duplicate proof - rig.expect_penalty(peer_id, "DuplicatedProof"); + rig.expect_penalty(peer_id, "DuplicatedProofIDs"); // Should retry - let _new_peer = rig.new_connected_peer(); + let _new_peer = rig.new_connected_zkvm_peer(); rig.expect_proof_lookup_request(block_root); } @@ -267,7 +219,7 @@ fn test_proof_lookup_wrong_block_root() { let block = rig.rand_block(); let block_root = block.canonical_root(); let wrong_root = Hash256::random(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); let block_hash = block .message() .body() @@ -302,7 +254,7 @@ fn test_proof_lookup_wrong_block_root() { rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); // Should retry - let _new_peer = rig.new_connected_peer(); + let _new_peer = rig.new_connected_zkvm_peer(); rig.expect_proof_lookup_request(block_root); } @@ -315,7 +267,7 @@ fn test_proof_lookup_timeout() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Trigger lookup rig.trigger_unknown_block_from_attestation(block_root, peer_id); @@ -332,11 +284,9 @@ fn test_proof_lookup_timeout() { error: RPCError::ErrorResponse(RpcErrorResponse::ServerError, "timeout".to_string()), }); - // Should penalize peer for timeout - rig.expect_penalty(peer_id, "rpc_error"); - + // RPC errors trigger retry without necessarily penalizing the peer // Should retry with different peer - let _new_peer = rig.new_connected_peer(); + let _new_peer = rig.new_connected_zkvm_peer(); rig.expect_proof_lookup_request(block_root); } @@ -349,7 +299,7 @@ fn test_proof_lookup_peer_disconnected() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Trigger lookup rig.trigger_unknown_block_from_attestation(block_root, peer_id); @@ -367,7 +317,7 @@ fn test_proof_lookup_peer_disconnected() { }); // Should retry with different peer (no penalty for disconnect) - let _new_peer = rig.new_connected_peer(); + let _new_peer = rig.new_connected_zkvm_peer(); rig.expect_proof_lookup_request(block_root); } @@ -388,7 +338,7 @@ fn test_proof_lookup_multiple_retries() { .map(|p| p.execution_payload_ref().block_hash()) .unwrap_or_else(ExecutionBlockHash::zero); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Trigger lookup rig.trigger_unknown_block_from_attestation(block_root, peer_id); @@ -402,13 +352,13 @@ fn test_proof_lookup_multiple_retries() { rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // Second attempt - different peer, also fails - let peer_id_2 = rig.new_connected_peer(); + let peer_id_2 = rig.new_connected_zkvm_peer(); let proof_id_2 = rig.expect_proof_lookup_request(block_root); rig.single_lookup_proof_response(proof_id_2, peer_id_2, None); rig.expect_penalty(peer_id_2, "NotEnoughResponsesReturned"); // Third attempt - succeeds - let peer_id_3 = rig.new_connected_peer(); + let peer_id_3 = rig.new_connected_zkvm_peer(); let proof_id_3 = rig.expect_proof_lookup_request(block_root); rig.complete_single_lookup_proof_download( proof_id_3, @@ -435,7 +385,7 @@ fn test_proof_lookup_no_peers() { let block = rig.rand_block(); let block_root = block.canonical_root(); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Trigger lookup rig.trigger_unknown_block_from_attestation(block_root, peer_id); @@ -476,7 +426,7 @@ fn test_proof_lookup_with_existing_blobs() { .ok() .map(|p| p.execution_payload_ref().block_hash()) .unwrap_or_else(ExecutionBlockHash::zero); - let peer_id = rig.new_connected_peer(); + let peer_id = rig.new_connected_zkvm_peer(); // Trigger lookup rig.trigger_unknown_block_from_attestation(block_root, peer_id); diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index d6618d0225d..8e190da2b9d 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -371,6 +371,18 @@ impl TestRig { .__add_connected_peer_testing_only(true, &self.harness.spec, key) } + /// Create a new connected peer with zkvm enabled (advertises zkvm=true in ENR) + pub fn new_connected_zkvm_peer(&mut self) -> PeerId { + let key = self.determinstic_key(); + let peer_id = self + .network_globals + .peers + .write() + .__add_connected_zkvm_peer_testing_only(&self.harness.spec, key); + self.log(&format!("Added new zkvm peer for testing {peer_id:?}")); + peer_id + } + fn determinstic_key(&mut self) -> CombinedKey { k256::ecdsa::SigningKey::random(&mut self.rng_08).into() } diff --git a/beacon_node/proof_generation_service/Cargo.toml b/beacon_node/proof_generation_service/Cargo.toml deleted file mode 100644 index 21f25007603..00000000000 --- a/beacon_node/proof_generation_service/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "proof_generation_service" -version = "0.1.0" -edition = "2021" - -[dependencies] -beacon_chain = { path = "../beacon_chain" } -lighthouse_network = { workspace = true } -logging = { workspace = true } -network = { workspace = true } -tokio = { workspace = true } -tracing = { workspace = true } -types = { path = "../../consensus/types" } -# TODO(zkproofs): add as a workspace dependency -zkvm_execution_layer = { path = "../../zkvm_execution_layer" } - -[dev-dependencies] diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs deleted file mode 100644 index 5265ea08719..00000000000 --- a/beacon_node/proof_generation_service/src/lib.rs +++ /dev/null @@ -1,381 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, ProofGenerationEvent}; -use lighthouse_network::PubsubMessage; -use network::NetworkMessage; -use std::sync::Arc; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; -use tracing::{debug, error, info}; -use types::{EthSpec, ExecPayload, ExecutionProofId, Hash256, SignedBeaconBlock, Slot}; - -/// Service responsible for "altruistic" proof generation -/// -/// This service receives notifications about newly imported blocks and generates -/// execution proofs for blocks that don't have proofs yet. This allows any node -/// (not just the block proposer) to generate and publish proofs. -/// -/// Note: While proofs are optional, we don't have the proposer making proofs -/// for their own block. The proposer should insert the block into their own -/// chain, so this should trigger. -pub struct ProofGenerationService { - /// Reference to the beacon chain - chain: Arc>, - /// Receiver for proof generation events - event_rx: UnboundedReceiver>, - /// Sender to publish proofs to the network - network_tx: UnboundedSender>, -} - -impl ProofGenerationService { - pub fn new( - chain: Arc>, - event_rx: UnboundedReceiver>, - network_tx: UnboundedSender>, - ) -> Self { - Self { - chain, - event_rx, - network_tx, - } - } - - /// Run the service event loop - pub async fn run(mut self) { - info!("Proof generation service started"); - - while let Some(event) = self.event_rx.recv().await { - let (block_root, slot, block) = event; - - debug!( - slot = ?slot, - block_root = ?block_root, - "Received block import notification" - ); - - // Handle the event - self.handle_block_import(block_root, slot, block).await; - } - - info!("Proof generation service stopped"); - } - - /// Handle a block import event - async fn handle_block_import( - &self, - block_root: Hash256, - slot: Slot, - block: Arc>, - ) { - // Check if proofs are required for this epoch - // TODO(zkproofs): alternative is to only enable this when - // the zkvm fork is enabled. Check if this is possible - let block_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - if !self - .chain - .data_availability_checker - .execution_proof_check_required_for_epoch(block_epoch) - { - debug!( - slot = ?slot, - epoch = ?block_epoch, - "Proofs not required for this epoch, skipping proof generation" - ); - return; - } - - // Check if we have a proof generator registry - let registry = match &self.chain.zkvm_generator_registry { - Some(registry) => registry.clone(), - None => { - debug!( - slot = ?slot, - "No generator registry configured, skipping proof generation" - ); - return; - } - }; - - // Get the list of proof types we should generate - let proof_types = registry.proof_ids(); - - if proof_types.is_empty() { - debug!( - slot = ?slot, - "No proof generators registered" - ); - return; - } - - debug!( - slot = ?slot, - block_root = ?block_root, - proof_types = proof_types.len(), - "Checking for locally missing proofs" - ); - - // Check which proofs are missing/we haven't received yet - for proof_id in proof_types { - // Check if we already have this proof - let has_proof = self.check_if_proof_exists(slot, block_root, proof_id); - - if has_proof { - debug!( - slot = ?slot, - proof_id = ?proof_id, - "Proof already exists, skipping" - ); - continue; - } - - self.spawn_proof_generation( - block_root, - slot, - block.clone(), - proof_id, - registry.clone(), - self.network_tx.clone(), - ); - } - } - - /// Check if a proof already exists for this block - fn check_if_proof_exists( - &self, - slot: Slot, - block_root: Hash256, - proof_id: ExecutionProofId, - ) -> bool { - let observed = self.chain.observed_execution_proofs.read(); - observed - .is_known(slot, block_root, proof_id) - .unwrap_or(false) - } - - /// Spawn a task to generate a proof - fn spawn_proof_generation( - &self, - block_root: Hash256, - slot: Slot, - block: Arc>, - proof_id: ExecutionProofId, - registry: Arc, - network_tx: UnboundedSender>, - ) { - let chain = self.chain.clone(); - - // Get the generator for this proof type - let Some(generator) = registry.get_generator(proof_id) else { - debug!( - slot = ?slot, - proof_id = ?proof_id, - "No generator found for proof type" - ); - return; - }; - - // Spawn the generation task (async because generator.generate() is async) - self.chain.task_executor.spawn( - async move { - info!( - slot = ?slot, - block_root = ?block_root, - proof_id = ?proof_id, - "Generating execution proof" - ); - - // Extract execution payload hash from the block - let block_hash = match block.message().execution_payload() { - Ok(payload) => payload.block_hash(), - Err(e) => { - debug!( - slot = ?slot, - block_root = ?block_root, - error = ?e, - "Block has no execution payload, skipping proof generation" - ); - return; - } - }; - - // Generate the proof using the generator - let proof_result = generator.generate(slot, &block_hash, &block_root).await; - - match proof_result { - Ok(proof) => { - info!( - slot = ?slot, - proof_id = ?proof_id, - "Successfully generated proof" - ); - - // Double-check that proof didn't arrive via gossip while we were generating - let observed = chain.observed_execution_proofs.read(); - if observed - .is_known(slot, block_root, proof_id) - .unwrap_or(false) - { - info!( - slot = ?slot, - proof_id = ?proof_id, - "Proof arrived via gossip while generating, discarding our copy" - ); - return; - } - drop(observed); - - // Note: We don't store the proof in the data availability checker because: - // 1. The block has already been imported and is no longer in the availability cache - // 2. This is altruistic proof generation - we're generating proofs for OTHER nodes - // 3. We already have the block, so we don't need the proof for ourselves - - // Publish the proof to the network - let pubsub_message = PubsubMessage::ExecutionProof(Arc::new(proof)); - - let network_message = NetworkMessage::Publish { - messages: vec![pubsub_message], - }; - - if let Err(e) = network_tx.send(network_message) { - error!( - slot = ?slot, - proof_id = ?proof_id, - error = ?e, - "Failed to send proof to network service" - ); - } else { - info!( - slot = ?slot, - proof_id = ?proof_id, - "Proof successfully published to network" - ); - - // Mark the proof as observed so we don't regenerate it - if let Err(e) = chain - .observed_execution_proofs - .write() - .observe_proof(slot, block_root, proof_id) - { - error!( - slot = ?slot, - proof_id = ?proof_id, - error = ?e, - "Failed to mark proof as observed" - ); - } - } - } - Err(e) => { - error!( - slot = ?slot, - proof_id = ?proof_id, - error = %e, - "Failed to generate proof" - ); - } - } - }, - "proof_generation", - ); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, - }; - use tokio::sync::mpsc; - use types::MinimalEthSpec as E; - - type TestHarness = BeaconChainHarness>; - - /// Create a test harness with minimal setup - fn build_test_harness(validator_count: usize) -> TestHarness { - BeaconChainHarness::builder(E) - .default_spec() - .deterministic_keypairs(validator_count) - .fresh_ephemeral_store() - .build() - } - - #[tokio::test] - async fn test_check_if_proof_exists_returns_false_for_new_proof() { - let harness = build_test_harness(8); - let chain = harness.chain.clone(); - - let (_event_tx, event_rx) = mpsc::unbounded_channel(); - let (network_tx, _network_rx) = mpsc::unbounded_channel(); - - let service = ProofGenerationService::new(chain, event_rx, network_tx); - - let block_root = Hash256::random(); - let slot = types::Slot::new(1); - let proof_id = ExecutionProofId::new(0).unwrap(); - - // Should return false for a proof that hasn't been observed - assert!(!service.check_if_proof_exists(slot, block_root, proof_id)); - } - - #[tokio::test] - async fn test_check_if_proof_exists_returns_true_after_observation() { - let harness = build_test_harness(8); - let chain = harness.chain.clone(); - - let (_event_tx, event_rx) = mpsc::unbounded_channel(); - let (network_tx, _network_rx) = mpsc::unbounded_channel(); - - let service = ProofGenerationService::new(chain.clone(), event_rx, network_tx); - - let block_root = Hash256::random(); - let slot = types::Slot::new(1); - let proof_id = ExecutionProofId::new(0).unwrap(); - - // Mark the proof as observed - chain - .observed_execution_proofs - .write() - .observe_proof(slot, block_root, proof_id) - .unwrap(); - - // Should return true for an observed proof - assert!(service.check_if_proof_exists(slot, block_root, proof_id)); - } - - #[tokio::test] - async fn test_handle_block_import_skips_when_epoch_not_required() { - let harness = build_test_harness(8); - let chain = harness.chain.clone(); - - // Note: zkVM is NOT enabled in this harness - // TODO(zkproofs): can we make a harness with zkVM enabled to test this functionality in a unit test - - let (_event_tx, event_rx) = mpsc::unbounded_channel(); - let (network_tx, mut network_rx) = mpsc::unbounded_channel(); - - let service = ProofGenerationService::new(chain.clone(), event_rx, network_tx); - - harness.advance_slot(); - - harness - .extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - let block = harness.chain.head_snapshot().beacon_block.clone(); - let block_root = block.canonical_root(); - let slot = block.slot(); - - service.handle_block_import(block_root, slot, block).await; - - // Give async tasks time to complete - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - // Should not have published any proofs because epoch doesn't require them - assert!( - network_rx.try_recv().is_err(), - "Should not publish proofs when epoch doesn't require them" - ); - } -} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 268ba468a06..48a7fb3e4f3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -939,23 +939,10 @@ pub fn cli_app() -> Command { .long("activate-zkvm") .help("Activates ZKVM execution proof mode. Enables the node to subscribe to the \ execution_proof gossip topic, receive and verify execution proofs from peers, \ - and advertise zkVM support in its ENR for peer discovery. \ - Use --zkvm-generation-proof-types to specify which proof types this node \ - should generate (optional - nodes can verify without generating).") + and advertise zkVM support in its ENR for peer discovery.") .action(ArgAction::SetTrue) .display_order(0) ) - .arg( - Arg::new("zkvm-generation-proof-types") - .long("zkvm-generation-proof-types") - .value_name("PROOF_TYPE_IDS") - .help("Comma-separated list of proof type IDs to generate \ - (e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth). \ - Optional - nodes can verify proofs without generating them.") - .requires("activate-zkvm") - .action(ArgAction::Set) - .display_order(0) - ) /* Deneb settings */ .arg( Arg::new("trusted-setup-file-override") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 2f951daae1f..f58ca5d12da 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -30,7 +30,7 @@ use std::str::FromStr; use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; -use types::{Checkpoint, Epoch, EthSpec, ExecutionProofId, Hash256}; +use types::{Checkpoint, Epoch, EthSpec, Hash256}; use zkvm_execution_layer::ZKVMExecutionLayerConfig; const PURGE_DB_CONFIRMATION: &str = "confirm"; @@ -340,43 +340,13 @@ pub fn get_config( // Parse ZK-VM execution layer config if provided if cli_args.get_flag("activate-zkvm") { - let generation_proof_types = if let Some(gen_types_str) = - clap_utils::parse_optional::(cli_args, "zkvm-generation-proof-types")? - { - gen_types_str - .split(',') - .map(|s| s.trim().parse::()) - .collect::, _>>() - .map_err(|e| { - format!( - "Invalid proof type ID in --zkvm-generation-proof-types: {}", - e - ) - })? - .into_iter() - .map(ExecutionProofId::new) - .collect::, _>>() - .map_err(|e| format!("Invalid subnet ID: {}", e))? - } else { - HashSet::new() - }; - - // Build and validate the config let zkvm_config = ZKVMExecutionLayerConfig::builder() - .generation_proof_types(generation_proof_types) .build() .map_err(|e| format!("Invalid ZK-VM configuration: {}", e))?; client_config.zkvm_execution_layer = Some(zkvm_config); - info!( - "ZKVM mode activated with generation_proof_types={:?}", - client_config - .zkvm_execution_layer - .as_ref() - .unwrap() - .generation_proof_types - ); + info!("ZKVM mode activated"); } // Override default trusted setup file if required diff --git a/book/src/help_bn.md b/book/src/help_bn.md index ed3acefc49e..208667e8c1a 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -12,8 +12,6 @@ Options: Activates ZKVM execution proof mode. Enables the node to subscribe to the execution_proof gossip topic, receive and verify execution proofs from peers, and advertise zkVM support in its ENR for peer discovery. - Use --zkvm-generation-proof-types to specify which proof types this - node should generate (optional - nodes can verify without generating). --auto-compact-db Enable or disable automatic compaction of the database on finalization. [default: true] @@ -430,10 +428,6 @@ Options: verify the node's sync against. The block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync from a recent state use --checkpoint-sync-url. - --zkvm-generation-proof-types - Comma-separated list of proof type IDs to generate (e.g., '0,1' where - 0=SP1+Reth, 1=Risc0+Geth). Optional - nodes can verify proofs without - generating them. -V, --version Print version diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 820d817d9d8..7b4104e0ee7 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1755,6 +1755,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/execution_proofs` + pub async fn post_beacon_pool_execution_proofs( + &self, + proof: &ExecutionProof, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("execution_proofs"); + + self.post(path, proof).await?; + + Ok(()) + } + /// `POST beacon/rewards/sync_committee` pub async fn post_beacon_rewards_sync_committee( &self, diff --git a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml index f0d0967a166..11439e6d0eb 100644 --- a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml +++ b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml @@ -1,16 +1,15 @@ -# 3 nodes generate proofs, 1 node only verifies +# Mixed configuration: 3 normal nodes, 1 node with dummy EL participants: - # Proof generating nodes (nodes 1-3) + # Nodes with real execution layer (nodes 1-3) - el_type: geth el_image: ethereum/client-go:latest cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: - --activate-zkvm - - --zkvm-generation-proof-types=0,1 - --target-peers=3 count: 3 - # Proof verifying only node (node 4) + # Node with dummy execution layer (node 4) # TODO(zkproofs): Currently there is no way to add no client here # We likely want to use our dummy zkvm EL here - el_type: geth diff --git a/scripts/local_testnet/network_params_proof_gen_only.yaml b/scripts/local_testnet/network_params_proof_gen_only.yaml deleted file mode 100644 index aea91efb92b..00000000000 --- a/scripts/local_testnet/network_params_proof_gen_only.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Network configuration for testing execution proof generation -# All nodes have execution layers and are configured to generate proofs -participants: - - el_type: geth - el_image: ethereum/client-go:latest - cl_type: lighthouse - cl_image: lighthouse:local - cl_extra_params: - - --activate-zkvm - - --zkvm-generation-proof-types=0,1 - - --target-peers=3 - count: 4 -network_params: - electra_fork_epoch: 0 - fulu_fork_epoch: 1 - seconds_per_slot: 2 -global_log_level: debug -snooper_enabled: false -additional_services: - - dora - - prometheus_grafana \ No newline at end of file From 127d3aa1e77530d8f8cbef7d5cb9c436736dec59 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sun, 21 Dec 2025 20:22:53 +0000 Subject: [PATCH 57/67] Pass `min_proofs_required` parameter in da_checker (#6) --- .../overflow_lru_cache.rs | 34 +++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 5eebffdd2c4..b5eba1c4a5f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -272,6 +272,7 @@ impl PendingComponents { &self, spec: &Arc, num_expected_columns_opt: Option, + min_proofs_required_opt: Option, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -349,11 +350,8 @@ impl PendingComponents { return Ok(None); }; - // Check if this node needs execution proofs to validate blocks. - let needs_execution_proofs = spec.zkvm_min_proofs_required().is_some(); - - if needs_execution_proofs { - let min_proofs = spec.zkvm_min_proofs_required().unwrap(); + // Check if this block needs execution proofs. + if let Some(min_proofs) = min_proofs_required_opt { let num_proofs = self.execution_proof_subnet_count(); if num_proofs < min_proofs { // Not enough execution proofs yet @@ -605,7 +603,13 @@ impl DataAvailabilityCheckerInner { ); }); - self.check_availability_and_cache_components(block_root, pending_components, None) + let min_proofs_required_opt = self.get_min_proofs_required(epoch); + self.check_availability_and_cache_components( + block_root, + pending_components, + None, + min_proofs_required_opt, + ) } #[allow(clippy::type_complexity)] @@ -645,10 +649,12 @@ impl DataAvailabilityCheckerInner { ); }); + let min_proofs_required_opt = self.get_min_proofs_required(epoch); self.check_availability_and_cache_components( block_root, pending_components, Some(num_expected_columns), + min_proofs_required_opt, ) } @@ -682,6 +688,7 @@ impl DataAvailabilityCheckerInner { })?; let num_expected_columns_opt = self.get_num_expected_columns(epoch); + let min_proofs_required_opt = self.get_min_proofs_required(epoch); pending_components.span.in_scope(|| { debug!( @@ -696,6 +703,7 @@ impl DataAvailabilityCheckerInner { block_root, pending_components, num_expected_columns_opt, + min_proofs_required_opt, ) } @@ -704,10 +712,12 @@ impl DataAvailabilityCheckerInner { block_root: Hash256, pending_components: MappedRwLockReadGuard<'_, PendingComponents>, num_expected_columns_opt: Option, + min_proofs_required_opt: Option, ) -> Result, AvailabilityCheckError> { if let Some(available_block) = pending_components.make_available( &self.spec, num_expected_columns_opt, + min_proofs_required_opt, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { // Explicitly drop read lock before acquiring write lock @@ -876,6 +886,7 @@ impl DataAvailabilityCheckerInner { })?; let num_expected_columns_opt = self.get_num_expected_columns(epoch); + let min_proofs_required_opt = self.get_min_proofs_required(epoch); pending_components.span.in_scope(|| { debug!( @@ -889,6 +900,7 @@ impl DataAvailabilityCheckerInner { block_root, pending_components, num_expected_columns_opt, + min_proofs_required_opt, ) } @@ -903,6 +915,16 @@ impl DataAvailabilityCheckerInner { } } + /// Returns the minimum number of execution proofs required for a block at the given epoch. + /// Returns `None` if proofs are not required (zkVM not enabled for this epoch). + fn get_min_proofs_required(&self, epoch: Epoch) -> Option { + if self.spec.is_zkvm_enabled_for_epoch(epoch) { + self.spec.zkvm_min_proofs_required() + } else { + None + } + } + /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { // clean up any lingering states in the state cache From 11578fbba6d6d352d8c56f5a02f0b22333c96a24 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Wed, 31 Dec 2025 22:21:09 +0000 Subject: [PATCH 58/67] Add ExecutionWitness sentry + ExecutionProofsByRange + Syncing Logic (#5) * execution-witness * update dummyEL wrapper * Merge Execution Witness Sentry into ExecutionProofsByRange (#9) * add execution proofs by range and persist proofs in blobs_db * use the same caching mechanism for proofsbyrange that columnsbyrange uses * also fix proofsbyroot to hit da_checker then store * cargo fmt * lint fix * fmt * update database schema * small cleanup * update configs * update to ignore new geth flags * cargo fmt * cargo lint * sync: wire execution proofs through range/backfill sync - track execution proof requests in block coupling logic - add proof-by-range retries with zkvm peer selection - skip proof processing when DA cache already satisfies minimum - add testss * make cargo fmt * cargo sort * update kurtosis script * update cargo lock --- Cargo.lock | 374 ++++++++- Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 18 + beacon_node/beacon_chain/src/builder.rs | 7 + .../beacon_chain/src/canonical_head.rs | 6 + beacon_node/beacon_chain/src/migrate.rs | 34 + .../beacon_chain/tests/schema_stability.rs | 6 +- beacon_node/beacon_processor/src/lib.rs | 16 + beacon_node/http_api/src/beacon/pool.rs | 156 ++-- .../src/peer_manager/mod.rs | 3 + .../lighthouse_network/src/rpc/codec.rs | 18 + .../lighthouse_network/src/rpc/config.rs | 19 + .../lighthouse_network/src/rpc/methods.rs | 56 ++ .../lighthouse_network/src/rpc/protocol.rs | 39 +- .../src/rpc/rate_limiter.rs | 19 + .../src/service/api_types.rs | 25 + .../lighthouse_network/src/service/mod.rs | 30 +- beacon_node/lighthouse_tracing/src/lib.rs | 3 + .../gossip_methods.rs | 18 +- .../src/network_beacon_processor/mod.rs | 20 +- .../network_beacon_processor/rpc_methods.rs | 179 ++++- beacon_node/network/src/router.rs | 40 + .../network/src/sync/backfill_sync/mod.rs | 57 ++ .../src/sync/block_sidecar_coupling.rs | 120 ++- beacon_node/network/src/sync/manager.rs | 33 +- .../network/src/sync/network_context.rs | 518 ++++++++++++- .../src/sync/network_context/requests.rs | 2 + .../requests/execution_proofs_by_range.rs | 54 ++ .../network/src/sync/range_sync/chain.rs | 125 ++- beacon_node/network/src/sync/tests/lookups.rs | 44 +- beacon_node/network/src/sync/tests/range.rs | 721 ++++++++++++++++- beacon_node/store/src/errors.rs | 3 + beacon_node/store/src/hot_cold_store.rs | 377 ++++++++- beacon_node/store/src/lib.rs | 27 + beacon_node/store/src/metadata.rs | 28 + consensus/types/src/lib.rs | 2 +- dummy_el/geth-wrapper.sh | 100 ++- execution-witness-sentry/Cargo.toml | 24 + execution-witness-sentry/config.toml | 27 + .../src/cl_subscription.rs | 128 +++ execution-witness-sentry/src/config.rs | 58 ++ execution-witness-sentry/src/error.rs | 53 ++ execution-witness-sentry/src/lib.rs | 45 ++ execution-witness-sentry/src/main.rs | 731 ++++++++++++++++++ execution-witness-sentry/src/rpc.rs | 393 ++++++++++ execution-witness-sentry/src/storage.rs | 248 ++++++ execution-witness-sentry/src/subscription.rs | 45 ++ .../tests/cl_subscription.rs | 11 + scripts/local_testnet/network_params.yaml | 8 +- ...network_params_mixed_proof_gen_verify.yaml | 27 +- .../local_testnet/network_params_simple.yaml | 19 + 51 files changed, 4929 insertions(+), 186 deletions(-) create mode 100644 beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs create mode 100644 execution-witness-sentry/Cargo.toml create mode 100644 execution-witness-sentry/config.toml create mode 100644 execution-witness-sentry/src/cl_subscription.rs create mode 100644 execution-witness-sentry/src/config.rs create mode 100644 execution-witness-sentry/src/error.rs create mode 100644 execution-witness-sentry/src/lib.rs create mode 100644 execution-witness-sentry/src/main.rs create mode 100644 execution-witness-sentry/src/rpc.rs create mode 100644 execution-witness-sentry/src/storage.rs create mode 100644 execution-witness-sentry/src/subscription.rs create mode 100644 execution-witness-sentry/tests/cl_subscription.rs create mode 100644 scripts/local_testnet/network_params_simple.yaml diff --git a/Cargo.lock b/Cargo.lock index 2d94c4c0bcd..b10d62ef57c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -366,12 +366,14 @@ dependencies = [ "alloy-network", "alloy-network-primitives", "alloy-primitives", + "alloy-pubsub", "alloy-rpc-client", "alloy-rpc-types-eth", "alloy-signer", "alloy-sol-types", "alloy-transport", "alloy-transport-http", + "alloy-transport-ws", "async-stream", "async-trait", "auto_impl", @@ -392,6 +394,28 @@ dependencies = [ "wasmtimer", ] +[[package]] +name = "alloy-pubsub" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdd4c64eb250a18101d22ae622357c6b505e158e9165d4c7974d59082a600c5e" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "auto_impl", + "bimap", + "futures", + "parking_lot", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tracing", + "wasmtimer", +] + [[package]] name = "alloy-rlp" version = "0.3.12" @@ -422,8 +446,10 @@ checksum = "d0882e72d2c1c0c79dcf4ab60a67472d3f009a949f774d4c17d0bdb669cfde05" dependencies = [ "alloy-json-rpc", "alloy-primitives", + "alloy-pubsub", "alloy-transport", "alloy-transport-http", + "alloy-transport-ws", "futures", "pin-project", "reqwest", @@ -619,6 +645,24 @@ dependencies = [ "url", ] +[[package]] +name = "alloy-transport-ws" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ad2344a12398d7105e3722c9b7a7044ea837128e11d453604dec6e3731a86e2" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures", + "http 1.3.1", + "rustls 0.23.35", + "serde_json", + "tokio", + "tokio-tungstenite", + "tracing", + "ws_stream_wasm", +] + [[package]] name = "alloy-trie" version = "0.9.1" @@ -698,7 +742,7 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -709,7 +753,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -1081,6 +1125,17 @@ dependencies = [ "syn 2.0.110", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.1", +] + [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -1373,6 +1428,12 @@ dependencies = [ "types", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bincode" version = "1.3.3" @@ -3159,7 +3220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -3376,6 +3437,22 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eventsource-client" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43ddc25e1ad2cc0106d5e2d967397b4fb2068a66677ee9b0eea4600e5cfe8fb4" +dependencies = [ + "futures", + "hyper 0.14.32", + "hyper-rustls 0.24.2", + "hyper-timeout 0.4.1", + "log", + "pin-project", + "rand 0.8.5", + "tokio", +] + [[package]] name = "eventsource-stream" version = "0.2.3" @@ -3387,6 +3464,29 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "execution-witness-sentry" +version = "0.1.0" +dependencies = [ + "alloy-provider", + "alloy-rpc-types-eth", + "anyhow", + "clap", + "discv5", + "eventsource-client", + "flate2", + "futures", + "reqwest", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "toml", + "tracing", + "tracing-subscriber", + "url", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -4403,6 +4503,22 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.7" @@ -4417,7 +4533,19 @@ dependencies = [ "tokio", "tokio-rustls 0.26.4", "tower-service", - "webpki-roots", + "webpki-roots 1.0.4", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.32", + "pin-project-lite", + "tokio", + "tokio-io-timeout", ] [[package]] @@ -4803,7 +4931,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6396,7 +6524,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -6850,6 +6978,16 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.1", +] + [[package]] name = "pin-project" version = "1.1.10" @@ -7060,7 +7198,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit", + "toml_edit 0.23.7", ] [[package]] @@ -7572,7 +7710,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-rustls", + "hyper-rustls 0.27.7", "hyper-tls", "hyper-util", "js-sys", @@ -7599,7 +7737,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", + "webpki-roots 1.0.4", ] [[package]] @@ -7697,9 +7835,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.17.0" +version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" +checksum = "c141e807189ad38a07276942c6623032d3753c8859c146104ac2e4d68865945a" dependencies = [ "alloy-rlp", "arbitrary", @@ -7843,7 +7981,19 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", ] [[package]] @@ -7875,6 +8025,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework 2.11.1", +] + [[package]] name = "rustls-native-certs" version = "0.8.2" @@ -7887,6 +8049,15 @@ dependencies = [ "security-framework 3.5.1", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -7906,6 +8077,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.102.8" @@ -8053,6 +8234,16 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "sec1" version = "0.7.3" @@ -8153,6 +8344,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "sensitive_url" version = "0.1.0" @@ -8237,6 +8434,15 @@ dependencies = [ "syn 2.0.110", ] +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -8933,7 +9139,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -9172,6 +9378,16 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd86198d9ee903fedd2f9a2e72014287c0d9167e4ae43b5853007205dda1b76" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.6.0" @@ -9193,6 +9409,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.25.0" @@ -9226,6 +9452,22 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +dependencies = [ + "futures-util", + "log", + "rustls 0.23.35", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tungstenite", + "webpki-roots 0.26.11", +] + [[package]] name = "tokio-util" version = "0.7.17" @@ -9241,6 +9483,27 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + [[package]] name = "toml_datetime" version = "0.7.3" @@ -9250,6 +9513,20 @@ dependencies = [ "serde_core", ] +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.12.0", + "serde", + "serde_spanned", + "toml_datetime 0.6.11", + "toml_write", + "winnow", +] + [[package]] name = "toml_edit" version = "0.23.7" @@ -9257,7 +9534,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ "indexmap 2.12.0", - "toml_datetime", + "toml_datetime 0.7.3", "toml_parser", "winnow", ] @@ -9271,6 +9548,12 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + [[package]] name = "tonic" version = "0.12.3" @@ -9287,7 +9570,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-timeout", + "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", "pin-project", @@ -9314,12 +9597,12 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-timeout", + "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", "pin-project", "prost", - "rustls-native-certs", + "rustls-native-certs 0.8.2", "tokio", "tokio-rustls 0.26.4", "tokio-stream", @@ -9554,6 +9837,25 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +dependencies = [ + "bytes", + "data-encoding", + "http 1.3.1", + "httparse", + "log", + "rand 0.9.2", + "rustls 0.23.35", + "rustls-pki-types", + "sha1", + "thiserror 2.0.17", + "utf-8", +] + [[package]] name = "typenum" version = "1.19.0" @@ -9733,6 +10035,12 @@ dependencies = [ "serde", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -10042,7 +10350,7 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile", + "rustls-pemfile 2.2.0", "scoped-tls", "serde", "serde_json", @@ -10223,6 +10531,15 @@ dependencies = [ "zip", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.4", +] + [[package]] name = "webpki-roots" version = "1.0.4" @@ -10278,7 +10595,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.48.0", ] [[package]] @@ -10723,6 +11040,25 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +[[package]] +name = "ws_stream_wasm" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper", + "thiserror 2.0.17", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index ba2316bb034..19158eb29e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,7 @@ members = [ "crypto/kzg", "database_manager", "dummy_el", + "execution-witness-sentry", "lcli", "lighthouse", "lighthouse/environment", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index dfd28cd9572..d6990894018 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4149,6 +4149,24 @@ impl BeaconChain { // This prevents inconsistency between the two at the expense of concurrency. drop(fork_choice); + // Persist execution proofs to the database if zkvm is enabled and proofs are cached. + // This is done after the block is successfully stored so we don't lose proofs on cache eviction. + if let Some(proofs) = self + .data_availability_checker + .get_execution_proofs(&block_root) + && !proofs.is_empty() + { + let proofs_owned: Vec<_> = proofs.iter().map(|p| (**p).clone()).collect(); + if let Err(e) = self.store.put_execution_proofs(&block_root, &proofs_owned) { + // Log but don't fail block import - proofs can still be served from cache + warn!( + %block_root, + error = ?e, + "Failed to persist execution proofs to database" + ); + } + } + // We're declaring the block "imported" at this point, since fork choice and the DB know // about it. let block_time_imported = timestamp_now(); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index bc5b41b09e1..feabcd5f44a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1159,6 +1159,13 @@ where .process_prune_blobs(data_availability_boundary); } + // Prune execution proofs older than the execution proof boundary in the background. + if let Some(execution_proof_boundary) = beacon_chain.execution_proof_boundary() { + beacon_chain + .store_migrator + .process_prune_execution_proofs(execution_proof_boundary); + } + Ok(beacon_chain) } } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 228e5eb2d27..17dc227430b 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1034,6 +1034,12 @@ impl BeaconChain { .process_prune_blobs(data_availability_boundary); } + // Prune execution proofs in the background. + if let Some(execution_proof_boundary) = self.execution_proof_boundary() { + self.store_migrator + .process_prune_execution_proofs(execution_proof_boundary); + } + // Take a write-lock on the canonical head and signal for it to prune. self.canonical_head.fork_choice_write_lock().prune()?; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index bd232f2e8a2..e290cf510f0 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -120,6 +120,7 @@ pub enum Notification { Finalization(FinalizationNotification), Reconstruction, PruneBlobs(Epoch), + PruneExecutionProofs(Epoch), ManualFinalization(ManualFinalizationNotification), ManualCompaction, } @@ -251,6 +252,28 @@ impl, Cold: ItemStore> BackgroundMigrator>, + execution_proof_boundary: Epoch, + ) { + if let Err(e) = db.try_prune_execution_proofs(false, execution_proof_boundary) { + error!( + error = ?e, + "Execution proof pruning failed" + ); + } + } + /// If configured to run in the background, send `notif` to the background thread. /// /// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise. @@ -440,11 +463,15 @@ impl, Cold: ItemStore> BackgroundMigrator reconstruction_notif = Some(notif), Notification::Finalization(fin) => finalization_notif = Some(fin), Notification::ManualFinalization(fin) => manual_finalization_notif = Some(fin), Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + Notification::PruneExecutionProofs(epb) => { + prune_execution_proofs_notif = Some(epb) + } Notification::ManualCompaction => manual_compaction_notif = Some(notif), } // Read the rest of the messages in the channel, taking the best of each type. @@ -475,6 +502,10 @@ impl, Cold: ItemStore> BackgroundMigrator { prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab)); } + Notification::PruneExecutionProofs(epb) => { + prune_execution_proofs_notif = + std::cmp::max(prune_execution_proofs_notif, Some(epb)); + } } } // Run finalization and blob pruning migrations first, then a reconstruction batch. @@ -489,6 +520,9 @@ impl, Cold: ItemStore> BackgroundMigrator = DBColumn::iter().map(|c| c.as_str()).collect(); let expected_columns = vec![ - "bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", - "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", - "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", + "bma", "blk", "blb", "bdc", "bdi", "bep", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", + "bcs", "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", + "bhr", "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", ]; assert_eq!(expected_columns, current_columns); } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 6f5170be300..f98d57e5cb6 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -129,6 +129,7 @@ pub struct BeaconProcessorQueueLengths { block_broots_queue: usize, blob_broots_queue: usize, execution_proof_broots_queue: usize, + execution_proof_brange_queue: usize, blob_brange_queue: usize, dcbroots_queue: usize, dcbrange_queue: usize, @@ -198,6 +199,7 @@ impl BeaconProcessorQueueLengths { block_broots_queue: 1024, blob_broots_queue: 1024, execution_proof_broots_queue: 1024, + execution_proof_brange_queue: 1024, blob_brange_queue: 1024, dcbroots_queue: 1024, dcbrange_queue: 1024, @@ -620,6 +622,7 @@ pub enum Work { BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), ExecutionProofsByRootsRequest(BlockingFn), + ExecutionProofsByRangeRequest(BlockingFn), DataColumnsByRootsRequest(BlockingFn), DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), @@ -675,6 +678,7 @@ pub enum WorkType { BlobsByRangeRequest, BlobsByRootsRequest, ExecutionProofsByRootsRequest, + ExecutionProofsByRangeRequest, DataColumnsByRootsRequest, DataColumnsByRangeRequest, GossipBlsToExecutionChange, @@ -728,6 +732,7 @@ impl Work { Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, Work::ExecutionProofsByRootsRequest(_) => WorkType::ExecutionProofsByRootsRequest, + Work::ExecutionProofsByRangeRequest(_) => WorkType::ExecutionProofsByRangeRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, @@ -901,6 +906,8 @@ impl BeaconProcessor { let mut blob_broots_queue = FifoQueue::new(queue_lengths.blob_broots_queue); let mut execution_proof_broots_queue = FifoQueue::new(queue_lengths.execution_proof_broots_queue); + let mut execution_proof_brange_queue = + FifoQueue::new(queue_lengths.execution_proof_brange_queue); let mut blob_brange_queue = FifoQueue::new(queue_lengths.blob_brange_queue); let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); @@ -1226,6 +1233,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = execution_proof_broots_queue.pop() { Some(item) + } else if let Some(item) = execution_proof_brange_queue.pop() { + Some(item) } else if let Some(item) = dcbroots_queue.pop() { Some(item) } else if let Some(item) = dcbrange_queue.pop() { @@ -1430,6 +1439,9 @@ impl BeaconProcessor { Work::ExecutionProofsByRootsRequest { .. } => { execution_proof_broots_queue.push(work, work_id) } + Work::ExecutionProofsByRangeRequest { .. } => { + execution_proof_brange_queue.push(work, work_id) + } Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } @@ -1489,6 +1501,9 @@ impl BeaconProcessor { WorkType::ExecutionProofsByRootsRequest => { execution_proof_broots_queue.len() } + WorkType::ExecutionProofsByRangeRequest => { + execution_proof_brange_queue.len() + } WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), WorkType::GossipBlsToExecutionChange => { @@ -1649,6 +1664,7 @@ impl BeaconProcessor { Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) | Work::ExecutionProofsByRootsRequest(process_fn) + | Work::ExecutionProofsByRangeRequest(process_fn) | Work::DataColumnsByRootsRequest(process_fn) | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs index 63b1a95b2ed..50a257db01b 100644 --- a/beacon_node/http_api/src/beacon/pool.rs +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -530,6 +530,7 @@ pub fn post_beacon_pool_attestations_v2( /// Submits an execution proof to the beacon node. /// The proof will be validated and stored in the data availability checker. /// If valid, the proof will be published to the gossip network. +/// If the proof makes a block available, the block will be imported. pub fn post_beacon_pool_execution_proofs( network_tx_filter: &NetworkTxFilter, beacon_pool_path: &BeaconPoolPathFilter, @@ -541,81 +542,92 @@ pub fn post_beacon_pool_execution_proofs( .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .then( - |task_spawner: TaskSpawner, + |_task_spawner: TaskSpawner, chain: Arc>, proof: ExecutionProof, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let proof = Arc::new(proof); - - // Validate the proof using the same logic as gossip validation - let verified_proof: GossipVerifiedExecutionProof = - GossipVerifiedExecutionProof::new(proof.clone(), &chain).map_err(|e| { - match e { - GossipExecutionProofError::PriorKnown { - slot, - block_root, - proof_id, - } => { - debug!( - %slot, - %block_root, - %proof_id, - "Execution proof already known" - ); - warp_utils::reject::custom_bad_request(format!( - "proof already known for slot {} block_root {} proof_id {}", - slot, block_root, proof_id - )) - } - GossipExecutionProofError::PriorKnownUnpublished => { - // Proof is valid but was received via non-gossip source - // It's in the DA checker, so we should publish it to gossip - warp_utils::reject::custom_bad_request( - "proof already received but not yet published".to_string(), - ) - } - _ => warp_utils::reject::object_invalid(format!( - "proof verification failed: {:?}", - e - )), - } - })?; - - let slot = verified_proof.slot(); - let block_root = verified_proof.block_root(); - let proof_id = verified_proof.subnet_id(); - - // Publish the proof to the gossip network - utils::publish_pubsub_message( - &network_tx, - PubsubMessage::ExecutionProof(verified_proof.clone().into_inner()), - )?; - - // Store the proof in the data availability checker - if let Err(e) = chain - .data_availability_checker - .put_rpc_execution_proofs(block_root, vec![verified_proof.into_inner()]) - { - warn!( - %slot, - %block_root, - %proof_id, - error = ?e, - "Failed to store execution proof in DA checker" - ); - } - - info!( - %slot, - %block_root, - %proof_id, - "Execution proof submitted and published" - ); - - Ok(()) - }) + network_tx: UnboundedSender>| async move { + let result = publish_execution_proof(chain, proof, network_tx).await; + convert_rejection(result.map(|()| warp::reply::json(&()))).await }, ) .boxed() } + +/// Validate, publish, and process an execution proof. +async fn publish_execution_proof( + chain: Arc>, + proof: ExecutionProof, + network_tx: UnboundedSender>, +) -> Result<(), warp::Rejection> { + let proof = Arc::new(proof); + + // Validate the proof using the same logic as gossip validation + let verified_proof: GossipVerifiedExecutionProof = + GossipVerifiedExecutionProof::new(proof.clone(), &chain).map_err(|e| match e { + GossipExecutionProofError::PriorKnown { + slot, + block_root, + proof_id, + } => { + debug!( + %slot, + %block_root, + %proof_id, + "Execution proof already known" + ); + warp_utils::reject::custom_bad_request(format!( + "proof already known for slot {} block_root {} proof_id {}", + slot, block_root, proof_id + )) + } + GossipExecutionProofError::PriorKnownUnpublished => { + // Proof is valid but was received via non-gossip source + // It's in the DA checker, so we should publish it to gossip + warp_utils::reject::custom_bad_request( + "proof already received but not yet published".to_string(), + ) + } + _ => warp_utils::reject::object_invalid(format!("proof verification failed: {:?}", e)), + })?; + + let slot = verified_proof.slot(); + let block_root = verified_proof.block_root(); + let proof_id = verified_proof.subnet_id(); + + // Publish the proof to the gossip network + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::ExecutionProof(verified_proof.clone().into_inner()), + )?; + + // Store the proof in the data availability checker and check if block is now available. + // This properly triggers block import if all components are now available. + match chain + .process_rpc_execution_proofs(slot, block_root, vec![verified_proof.into_inner()]) + .await + { + Ok(status) => { + info!( + %slot, + %block_root, + %proof_id, + ?status, + "Execution proof submitted and published" + ); + } + Err(e) => { + // Log the error but don't fail the request - the proof was already + // published to gossip and stored in the DA checker. The error is + // likely due to the block already being imported or similar. + debug!( + %slot, + %block_root, + %proof_id, + error = ?e, + "Error processing execution proof availability (proof was still published)" + ); + } + } + + Ok(()) +} diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 1b280d54035..52b98d4d3c7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -606,6 +606,7 @@ impl PeerManager { Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -627,6 +628,7 @@ impl PeerManager { Protocol::DataColumnsByRoot => return, Protocol::DataColumnsByRange => return, Protocol::ExecutionProofsByRoot => return, + Protocol::ExecutionProofsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -651,6 +653,7 @@ impl PeerManager { Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index b3401038df8..aa0fe8a3d9d 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -81,6 +81,7 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::ExecutionProofsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -362,6 +363,7 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), RequestType::ExecutionProofsByRoot(req) => req.as_ssz_bytes(), + RequestType::ExecutionProofsByRange(req) => req.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), @@ -578,6 +580,11 @@ fn handle_rpc_request( Ok(Some(RequestType::ExecutionProofsByRoot(request))) } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RequestType::ExecutionProofsByRange( + ExecutionProofsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -746,6 +753,11 @@ fn handle_rpc_response( ExecutionProof::from_ssz_bytes(decoded_buffer)?, )))) } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRange(Arc::new( + ExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -1295,6 +1307,12 @@ mod tests { RequestType::ExecutionProofsByRoot(exec_proofs) => { assert_eq!(decoded, RequestType::ExecutionProofsByRoot(exec_proofs)) } + RequestType::ExecutionProofsByRange(exec_proofs_range) => { + assert_eq!( + decoded, + RequestType::ExecutionProofsByRange(exec_proofs_range) + ) + } RequestType::Ping(ping) => { assert_eq!(decoded, RequestType::Ping(ping)) } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index d23c16f8fa1..99c0f33da31 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -94,6 +94,7 @@ pub struct RateLimiterConfig { pub(super) data_columns_by_root_quota: Quota, pub(super) data_columns_by_range_quota: Quota, pub(super) execution_proofs_by_root_quota: Quota, + pub(super) execution_proofs_by_range_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -126,6 +127,9 @@ impl RateLimiterConfig { // TODO(zkproofs): Configure this to be less arbitrary pub const DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA: Quota = Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + // TODO(zkproofs): Configure this to be less arbitrary + pub const DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -146,6 +150,7 @@ impl Default for RateLimiterConfig { data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, execution_proofs_by_root_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA, + execution_proofs_by_range_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -184,6 +189,14 @@ impl Debug for RateLimiterConfig { "data_columns_by_root", fmt_q!(&self.data_columns_by_root_quota), ) + .field( + "execution_proofs_by_root", + fmt_q!(&self.execution_proofs_by_root_quota), + ) + .field( + "execution_proofs_by_range", + fmt_q!(&self.execution_proofs_by_range_quota), + ) .finish() } } @@ -207,6 +220,7 @@ impl FromStr for RateLimiterConfig { let mut data_columns_by_root_quota = None; let mut data_columns_by_range_quota = None; let mut execution_proofs_by_root_quota = None; + let mut execution_proofs_by_range_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -231,6 +245,9 @@ impl FromStr for RateLimiterConfig { Protocol::ExecutionProofsByRoot => { execution_proofs_by_root_quota = execution_proofs_by_root_quota.or(quota) } + Protocol::ExecutionProofsByRange => { + execution_proofs_by_range_quota = execution_proofs_by_range_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -268,6 +285,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), execution_proofs_by_root_quota: execution_proofs_by_root_quota .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA), + execution_proofs_by_range_quota: execution_proofs_by_range_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9ba8f66dafa..966106b6f69 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -603,6 +603,36 @@ impl ExecutionProofsByRootRequest { } } +/// Request execution proofs for a range of slots. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRangeRequest { + /// The starting slot to request execution proofs. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, +} + +impl ExecutionProofsByRangeRequest { + pub fn max_proofs_requested(&self) -> u64 { + // Each slot could have up to MAX_PROOFS execution proofs + self.count + .saturating_mul(types::execution_proof::MAX_PROOFS as u64) + } + + pub fn ssz_min_len() -> usize { + ExecutionProofsByRangeRequest { + start_slot: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /// Request a number of beacon data columns from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientUpdatesByRangeRequest { @@ -673,6 +703,9 @@ pub enum RpcSuccessResponse { /// A response to a get EXECUTION_PROOFS_BY_ROOT request. ExecutionProofsByRoot(Arc), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Arc), + /// A PONG response to a PING request. Pong(Ping), @@ -704,6 +737,9 @@ pub enum ResponseTermination { /// Execution proofs by root stream termination. ExecutionProofsByRoot, + /// Execution proofs by range stream termination. + ExecutionProofsByRange, + /// Light client updates by range stream termination. LightClientUpdatesByRange, } @@ -718,6 +754,7 @@ impl ResponseTermination { ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, ResponseTermination::ExecutionProofsByRoot => Protocol::ExecutionProofsByRoot, + ResponseTermination::ExecutionProofsByRange => Protocol::ExecutionProofsByRange, ResponseTermination::LightClientUpdatesByRange => Protocol::LightClientUpdatesByRange, } } @@ -814,6 +851,7 @@ impl RpcSuccessResponse { RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, RpcSuccessResponse::ExecutionProofsByRoot(_) => Protocol::ExecutionProofsByRoot, + RpcSuccessResponse::ExecutionProofsByRange(_) => Protocol::ExecutionProofsByRange, RpcSuccessResponse::Pong(_) => Protocol::Ping, RpcSuccessResponse::MetaData(_) => Protocol::MetaData, RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -840,6 +878,7 @@ impl RpcSuccessResponse { Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), // TODO(zkproofs): Change this when we add Slot to ExecutionProof Self::ExecutionProofsByRoot(_) + | Self::ExecutionProofsByRange(_) | Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, @@ -905,6 +944,13 @@ impl std::fmt::Display for RpcSuccessResponse { RpcSuccessResponse::ExecutionProofsByRoot(proof) => { write!(f, "ExecutionProofsByRoot: Block root: {}", proof.block_root) } + RpcSuccessResponse::ExecutionProofsByRange(proof) => { + write!( + f, + "ExecutionProofsByRange: Block root: {}", + proof.block_root + ) + } RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RpcSuccessResponse::MetaData(metadata) => { write!(f, "Metadata: {}", metadata.seq_number()) @@ -1027,3 +1073,13 @@ impl std::fmt::Display for ExecutionProofsByRootRequest { ) } } + +impl std::fmt::Display for ExecutionProofsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRange: Start Slot: {}, Count: {}", + self.start_slot, self.count + ) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index dfa44976390..0a37db0d210 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -254,6 +254,9 @@ pub enum Protocol { /// The `ExecutionProofsByRoot` protocol name. #[strum(serialize = "execution_proofs_by_root")] ExecutionProofsByRoot, + /// The `ExecutionProofsByRange` protocol name. + #[strum(serialize = "execution_proofs_by_range")] + ExecutionProofsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -285,6 +288,7 @@ impl Protocol { Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), Protocol::ExecutionProofsByRoot => Some(ResponseTermination::ExecutionProofsByRoot), + Protocol::ExecutionProofsByRange => Some(ResponseTermination::ExecutionProofsByRange), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -316,6 +320,7 @@ pub enum SupportedProtocol { DataColumnsByRootV1, DataColumnsByRangeV1, ExecutionProofsByRootV1, + ExecutionProofsByRangeV1, PingV1, MetaDataV1, MetaDataV2, @@ -341,6 +346,7 @@ impl SupportedProtocol { SupportedProtocol::DataColumnsByRootV1 => "1", SupportedProtocol::DataColumnsByRangeV1 => "1", SupportedProtocol::ExecutionProofsByRootV1 => "1", + SupportedProtocol::ExecutionProofsByRangeV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -366,6 +372,7 @@ impl SupportedProtocol { SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, SupportedProtocol::ExecutionProofsByRootV1 => Protocol::ExecutionProofsByRoot, + SupportedProtocol::ExecutionProofsByRangeV1 => Protocol::ExecutionProofsByRange, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -417,10 +424,16 @@ impl SupportedProtocol { ]); } if fork_context.spec.is_zkvm_enabled() { - supported.push(ProtocolId::new( - SupportedProtocol::ExecutionProofsByRootV1, - Encoding::SSZSnappy, - )); + supported.extend_from_slice(&[ + ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + ), + ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + ), + ]); } supported } @@ -535,6 +548,10 @@ impl ProtocolId { DataColumnsByRangeRequest::ssz_max_len::(), ), Protocol::ExecutionProofsByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), + Protocol::ExecutionProofsByRange => RpcLimits::new( + ExecutionProofsByRangeRequest::ssz_min_len(), + ExecutionProofsByRangeRequest::ssz_max_len(), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -572,6 +589,7 @@ impl ProtocolId { rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } Protocol::ExecutionProofsByRoot => rpc_execution_proof_limits(), + Protocol::ExecutionProofsByRange => rpc_execution_proof_limits(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -614,6 +632,7 @@ impl ProtocolId { | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 | SupportedProtocol::ExecutionProofsByRootV1 + | SupportedProtocol::ExecutionProofsByRangeV1 | SupportedProtocol::PingV1 | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 @@ -748,6 +767,7 @@ pub enum RequestType { DataColumnsByRoot(DataColumnsByRootRequest), DataColumnsByRange(DataColumnsByRangeRequest), ExecutionProofsByRoot(ExecutionProofsByRootRequest), + ExecutionProofsByRange(ExecutionProofsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -772,6 +792,7 @@ impl RequestType { RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), RequestType::ExecutionProofsByRoot(req) => req.max_requested() as u64, + RequestType::ExecutionProofsByRange(req) => req.max_proofs_requested(), RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -802,6 +823,7 @@ impl RequestType { RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, RequestType::ExecutionProofsByRoot(_) => SupportedProtocol::ExecutionProofsByRootV1, + RequestType::ExecutionProofsByRange(_) => SupportedProtocol::ExecutionProofsByRangeV1, RequestType::Ping(_) => SupportedProtocol::PingV1, RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -834,6 +856,7 @@ impl RequestType { RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, RequestType::ExecutionProofsByRoot(_) => ResponseTermination::ExecutionProofsByRoot, + RequestType::ExecutionProofsByRange(_) => ResponseTermination::ExecutionProofsByRange, RequestType::Status(_) => unreachable!(), RequestType::Goodbye(_) => unreachable!(), RequestType::Ping(_) => unreachable!(), @@ -884,6 +907,10 @@ impl RequestType { SupportedProtocol::ExecutionProofsByRootV1, Encoding::SSZSnappy, )], + RequestType::ExecutionProofsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + )], RequestType::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -923,6 +950,7 @@ impl RequestType { RequestType::DataColumnsByRoot(_) => false, RequestType::DataColumnsByRange(_) => false, RequestType::ExecutionProofsByRoot(_) => false, + RequestType::ExecutionProofsByRange(_) => false, RequestType::Ping(_) => true, RequestType::MetaData(_) => true, RequestType::LightClientBootstrap(_) => true, @@ -1039,6 +1067,9 @@ impl std::fmt::Display for RequestType { RequestType::ExecutionProofsByRoot(req) => { write!(f, "Execution proofs by root: {:?}", req) } + RequestType::ExecutionProofsByRange(req) => { + write!(f, "Execution proofs by range: {:?}", req) + } RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), RequestType::MetaData(_) => write!(f, "MetaData request"), RequestType::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index f70b29cfe45..9dfbc668c89 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -107,6 +107,8 @@ pub struct RPCRateLimiter { dcbrange_rl: Limiter, /// ExecutionProofsByRoot rate limiter. execution_proofs_by_root_rl: Limiter, + /// ExecutionProofsByRange rate limiter. + execution_proofs_by_range_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -152,6 +154,8 @@ pub struct RPCRateLimiterBuilder { dcbrange_quota: Option, /// Quota for the ExecutionProofsByRoot protocol. execution_proofs_by_root_quota: Option, + /// Quota for the ExecutionProofsByRange protocol. + execution_proofs_by_range_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -178,6 +182,7 @@ impl RPCRateLimiterBuilder { Protocol::DataColumnsByRoot => self.dcbroot_quota = q, Protocol::DataColumnsByRange => self.dcbrange_quota = q, Protocol::ExecutionProofsByRoot => self.execution_proofs_by_root_quota = q, + Protocol::ExecutionProofsByRange => self.execution_proofs_by_range_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -230,6 +235,10 @@ impl RPCRateLimiterBuilder { .execution_proofs_by_root_quota .ok_or("ExecutionProofsByRoot quota not specified")?; + let execution_proofs_by_range_quota = self + .execution_proofs_by_range_quota + .ok_or("ExecutionProofsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -242,6 +251,7 @@ impl RPCRateLimiterBuilder { let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; let execution_proofs_by_root_rl = Limiter::from_quota(execution_proofs_by_root_quota)?; + let execution_proofs_by_range_rl = Limiter::from_quota(execution_proofs_by_range_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -266,6 +276,7 @@ impl RPCRateLimiterBuilder { dcbroot_rl, dcbrange_rl, execution_proofs_by_root_rl, + execution_proofs_by_range_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -320,6 +331,7 @@ impl RPCRateLimiter { data_columns_by_root_quota, data_columns_by_range_quota, execution_proofs_by_root_quota, + execution_proofs_by_range_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -341,6 +353,10 @@ impl RPCRateLimiter { Protocol::ExecutionProofsByRoot, execution_proofs_by_root_quota, ) + .set_quota( + Protocol::ExecutionProofsByRange, + execution_proofs_by_range_quota, + ) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -389,6 +405,7 @@ impl RPCRateLimiter { Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, Protocol::DataColumnsByRange => &mut self.dcbrange_rl, Protocol::ExecutionProofsByRoot => &mut self.execution_proofs_by_root_rl, + Protocol::ExecutionProofsByRange => &mut self.execution_proofs_by_range_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, @@ -414,6 +431,7 @@ impl RPCRateLimiter { dcbroot_rl, dcbrange_rl, execution_proofs_by_root_rl, + execution_proofs_by_range_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -432,6 +450,7 @@ impl RPCRateLimiter { dcbrange_rl.prune(time_since_start); dcbroot_rl.prune(time_since_start); execution_proofs_by_root_rl.prune(time_since_start); + execution_proofs_by_range_rl.prune(time_since_start); lc_bootstrap_rl.prune(time_since_start); lc_optimistic_update_rl.prune(time_since_start); lc_finality_update_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index d97506653b5..ca3a5b78bd9 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -32,6 +32,8 @@ pub enum SyncRequestId { BlobsByRange(BlobsByRangeRequestId), /// Data columns by range request DataColumnsByRange(DataColumnsByRangeRequestId), + /// Execution proofs by range request + ExecutionProofsByRange(ExecutionProofsByRangeRequestId), } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. @@ -77,6 +79,17 @@ pub enum DataColumnsByRangeRequester { CustodyBackfillSync(CustodyBackFillBatchRequestId), } +/// Request ID for execution_proofs_by_range requests during range sync. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct ExecutionProofsByRangeRequestId { + /// Id to identify this attempt at an execution_proofs_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request. + pub parent_request_id: ComponentsByRangeRequestId, + /// The peer id associated with the request. + pub peer: PeerId, +} + /// Block components by range request for range sync. Includes an ID for downstream consumers to /// handle retries and tie all their sub requests together. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -168,6 +181,8 @@ pub enum Response { DataColumnsByRoot(Option>>), /// A response to a get EXECUTION_PROOFS_BY_ROOT request. ExecutionProofsByRoot(Option>), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Option>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -209,6 +224,10 @@ impl std::convert::From> for RpcResponse { Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRoot(p)), None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRoot), }, + Response::ExecutionProofsByRange(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRange(p)), + None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRange), + }, Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) @@ -245,6 +264,12 @@ macro_rules! impl_display { impl_display!(BlocksByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(BlobsByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(DataColumnsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!( + ExecutionProofsByRangeRequestId, + "{}/{}", + id, + parent_request_id +); impl_display!(ComponentsByRangeRequestId, "{}/{}", id, requester); impl_display!(DataColumnsByRootRequestId, "{}/{}", id, requester); impl_display!(SingleLookupReqId, "{}/Lookup/{}", req_id, lookup_id); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 9f1530ec732..4e8be98a509 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -411,7 +411,7 @@ impl Network { }; let peer_manager = { - let peer_manager_cfg = PeerManagerCfg { + let mut peer_manager_cfg = PeerManagerCfg { discovery_enabled: !config.disable_discovery, quic_enabled: !config.disable_quic_support, metrics_enabled: config.metrics_enabled, @@ -419,6 +419,15 @@ impl Network { execution_proof_enabled: ctx.chain_spec.is_zkvm_enabled(), ..Default::default() }; + // TODO(zkproofs): We decrease the slot time, so we want to + // correspondingly decrease the status interval at which a node will + // check if it needs to sync with others. + let epoch_secs = ctx + .chain_spec + .seconds_per_slot + .saturating_mul(E::slots_per_epoch()) + .max(1); + peer_manager_cfg.status_interval = peer_manager_cfg.status_interval.min(epoch_secs); PeerManager::new(peer_manager_cfg, network_globals.clone())? }; @@ -1580,6 +1589,17 @@ impl Network { request_type, }) } + RequestType::ExecutionProofsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } RequestType::LightClientBootstrap(_) => { metrics::inc_counter_vec( &metrics::TOTAL_RPC_REQUESTS, @@ -1670,6 +1690,11 @@ impl Network { peer_id, Response::ExecutionProofsByRoot(Some(resp)), ), + RpcSuccessResponse::ExecutionProofsByRange(resp) => self.build_response( + id, + peer_id, + Response::ExecutionProofsByRange(Some(resp)), + ), // Should never be reached RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1702,6 +1727,9 @@ impl Network { ResponseTermination::ExecutionProofsByRoot => { Response::ExecutionProofsByRoot(None) } + ResponseTermination::ExecutionProofsByRange => { + Response::ExecutionProofsByRange(None) + } ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index dd9e9f1ebb2..9ca5afbcf9c 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -41,6 +41,8 @@ pub const SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST: &str = "handle_blocks_by_root_requ pub const SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST: &str = "handle_blobs_by_root_request"; pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = "handle_execution_proofs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST: &str = + "handle_execution_proofs_by_range_request"; pub const SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST: &str = "handle_data_columns_by_root_request"; pub const SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE: &str = "handle_light_client_updates_by_range"; pub const SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP: &str = "handle_light_client_bootstrap"; @@ -73,6 +75,7 @@ pub const LH_BN_ROOT_SPAN_NAMES: &[&str] = &[ SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index c8440a6bbf4..5d2203ee380 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -820,13 +820,29 @@ impl NetworkBeaconProcessor { debug!( %block_root, %proof_id, - "Gossip execution proof already processed via the EL. Accepting the proof without re-processing." + "Gossip execution proof already processed via the EL. Checking availability." ); self.propagate_validation_result( message_id, peer_id, MessageAcceptance::Accept, ); + + // The proof is already in the DA checker (from HTTP API). + // Check if this makes any pending blocks complete and import them. + let slot = execution_proof.slot; + if let Err(e) = self + .chain + .process_rpc_execution_proofs(slot, block_root, vec![execution_proof]) + .await + { + debug!( + %block_root, + %proof_id, + error = ?e, + "Failed to process availability for prior known execution proof" + ); + } } GossipExecutionProofError::PriorKnown { block_root, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7db2790920e..ffac53e522a 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -14,7 +14,7 @@ use beacon_processor::{ use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, }; use lighthouse_network::service::api_types::CustodyBackfillBatchId; use lighthouse_network::{ @@ -699,6 +699,24 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `ExecutionProofsByRangeRequest`s from the RPC network. + pub fn send_execution_proofs_by_range_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_range_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRangeRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. pub fn send_data_columns_by_roots_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index f063d7e8380..17ee4076731 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -7,7 +7,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenS use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - ExecutionProofsByRootRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -15,9 +15,9 @@ use lighthouse_tracing::{ SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, - SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, - SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, - SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, + SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, + SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, }; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; @@ -436,19 +436,39 @@ impl NetworkBeaconProcessor { request.already_have.iter().copied().collect(); let count_needed = request.count_needed as usize; - // Get all execution proofs we have for this block from the DA checker - let Some(available_proofs) = self + // Get all execution proofs we have for this block from the DA checker, falling back to the + // store (which checks the store cache/DB). + let available_proofs = match self .chain .data_availability_checker .get_execution_proofs(&block_root) - else { - // No proofs available for this block - debug!( - %peer_id, - %block_root, - "No execution proofs available for peer" - ); - return Ok(()); + { + Some(proofs) => proofs, + None => match self.chain.store.get_execution_proofs(&block_root) { + Ok(proofs) => { + if proofs.is_empty() { + debug!( + %peer_id, + %block_root, + "No execution proofs available for peer" + ); + return Ok(()); + } + proofs + } + Err(e) => { + error!( + %peer_id, + %block_root, + error = ?e, + "Error fetching execution proofs for block root" + ); + return Err(( + RpcErrorResponse::ServerError, + "Error fetching execution proofs", + )); + } + }, }; // Filter out proofs the peer already has and send up to count_needed @@ -486,6 +506,137 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle an `ExecutionProofsByRange` request from the peer. + #[instrument( + name = SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, + parent = None, + level = "debug", + skip_all, + fields(peer_id = %peer_id, client = tracing::field::Empty) + )] + pub fn handle_execution_proofs_by_range_request( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_range_request_inner(peer_id, inbound_request_id, req), + Response::ExecutionProofsByRange, + ); + } + + /// Handle an `ExecutionProofsByRange` request from the peer. + fn handle_execution_proofs_by_range_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!( + %peer_id, + count = req.count, + start_slot = req.start_slot, + "Received ExecutionProofsByRange Request" + ); + + let request_start_slot = Slot::from(req.start_slot); + + // Check if zkvm is enabled and get the execution proof boundary + let execution_proof_boundary_slot = match self.chain.execution_proof_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!("ZKVM fork is disabled"); + return Err((RpcErrorResponse::InvalidRequest, "ZKVM fork is disabled")); + } + }; + + // Get the oldest execution proof slot from the store + let oldest_execution_proof_slot = self + .chain + .store + .get_execution_proof_info() + .oldest_execution_proof_slot + .unwrap_or(execution_proof_boundary_slot); + + if request_start_slot < oldest_execution_proof_slot { + debug!( + %request_start_slot, + %oldest_execution_proof_slot, + %execution_proof_boundary_slot, + "Range request start slot is older than the oldest execution proof slot." + ); + + return if execution_proof_boundary_slot < oldest_execution_proof_slot { + Err(( + RpcErrorResponse::ResourceUnavailable, + "execution proofs pruned within boundary", + )) + } else { + Err(( + RpcErrorResponse::InvalidRequest, + "Req outside availability period", + )) + }; + } + + let block_roots = self.get_block_roots_for_slot_range( + req.start_slot, + req.count, + "ExecutionProofsByRange", + )?; + let mut proofs_sent = 0; + + for root in block_roots { + // Get execution proofs from the database (like BlobsByRange does for blobs) + match self.chain.store.get_execution_proofs(&root) { + Ok(proofs) => { + for proof in proofs { + // Due to skip slots, proofs could be out of the range + if proof.slot >= request_start_slot + && proof.slot < request_start_slot + req.count + { + proofs_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::ExecutionProofsByRange(Some(proof)), + }); + } + } + } + Err(e) => { + error!( + request = ?req, + %peer_id, + block_root = ?root, + error = ?e, + "Error fetching execution proofs for block root" + ); + return Err(( + RpcErrorResponse::ServerError, + "Failed fetching execution proofs from database", + )); + } + } + } + + debug!( + %peer_id, + start_slot = req.start_slot, + count = req.count, + sent = proofs_sent, + "ExecutionProofsByRange outgoing response processed" + ); + + Ok(()) + } + /// Handle a `DataColumnsByRoot` request from the peer. #[instrument( name = SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index eb02ddad921..f5bf65c9777 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -283,6 +283,15 @@ impl Router { request, ), ), + RequestType::ExecutionProofsByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_range_request( + peer_id, + inbound_request_id, + request, + ), + ), _ => {} } } @@ -323,6 +332,13 @@ impl Router { Response::ExecutionProofsByRoot(execution_proof) => { self.on_execution_proofs_by_root_response(peer_id, app_request_id, execution_proof); } + Response::ExecutionProofsByRange(execution_proof) => { + self.on_execution_proofs_by_range_response( + peer_id, + app_request_id, + execution_proof, + ); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -727,6 +743,30 @@ impl Router { }); } + /// Handle an `ExecutionProofsByRange` response from the peer. + pub fn on_execution_proofs_by_range_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + trace!( + %peer_id, + "Received ExecutionProofsByRange Response" + ); + + if let AppRequestId::Sync(sync_request_id) = app_request_id { + self.send_to_sync(SyncMessage::RpcExecutionProof { + peer_id, + sync_request_id, + execution_proof, + seen_timestamp: timestamp_now(), + }); + } else { + crit!("All execution proofs by range responses should belong to sync"); + } + } + /// Handle a `DataColumnsByRoot` response from the peer. pub fn on_data_columns_by_root_response( &mut self, diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 6c0cbd7e554..441e9b0a6d9 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -348,6 +348,23 @@ impl BackFillSync { CouplingError::BlobPeerFailure(msg) => { tracing::debug!(?batch_id, msg, "Blob peer failure"); } + CouplingError::ExecutionProofPeerFailure { + error, + peer, + exceeded_retries, + } => { + tracing::debug!(?batch_id, ?peer, error, "Execution proof peer failure"); + if !*exceeded_retries { + let mut failed_peers = HashSet::new(); + failed_peers.insert(*peer); + return self.retry_execution_proof_batch( + network, + batch_id, + request_id, + failed_peers, + ); + } + } CouplingError::InternalError(msg) => { error!(?batch_id, msg, "Block components coupling internal error"); } @@ -1001,6 +1018,46 @@ impl BackFillSync { Ok(()) } + /// Retries execution proof requests within the batch by creating a new proofs request. + pub fn retry_execution_proof_batch( + &mut self, + network: &mut SyncNetworkContext, + batch_id: BatchId, + id: Id, + mut failed_peers: HashSet, + ) -> Result<(), BackFillError> { + if let Some(batch) = self.batches.get_mut(&batch_id) { + failed_peers.extend(&batch.failed_peers()); + let req = batch.to_blocks_by_range_request().0; + + let synced_peers = network + .network_globals() + .peers + .read() + .synced_peers_for_epoch(batch_id) + .cloned() + .collect::>(); + + match network.retry_execution_proofs_by_range(id, &synced_peers, &failed_peers, req) { + Ok(()) => { + debug!( + ?batch_id, + id, "Retried execution proof requests from different peers" + ); + return Ok(()); + } + Err(e) => { + debug!(?batch_id, id, e, "Failed to retry execution proof batch"); + } + } + } else { + return Err(BackFillError::InvalidSyncState( + "Batch should exist to be retried".to_string(), + )); + } + Ok(()) + } + /// When resuming a chain, this function searches for batches that need to be re-downloaded and /// transitions their state to redownload the batch. fn resume_batches(&mut self, network: &mut SyncNetworkContext) -> Result<(), BackFillError> { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index ed9a11a03de..faa2fac949c 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -5,6 +5,7 @@ use lighthouse_network::{ PeerId, service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, + ExecutionProofsByRangeRequestId, }, }; use ssz_types::RuntimeVariableList; @@ -12,7 +13,7 @@ use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, SignedBeaconBlock, + ExecutionProof, Hash256, SignedBeaconBlock, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; @@ -24,6 +25,7 @@ use crate::sync::network_context::MAX_COLUMN_RETRIES; /// - Blocks themselves (always required) /// - Blob sidecars (pre-Fulu fork) /// - Data columns (Fulu fork and later) +/// - Execution proofs (for zkvm-enabled nodes) /// /// It accumulates responses until all expected components are received, then couples /// them together and returns complete `RpcBlock`s ready for processing. Handles validation @@ -33,10 +35,25 @@ pub struct RangeBlockComponentsRequest { blocks_request: ByRangeRequest>>>, /// Sidecars we have received awaiting for their corresponding block. block_data_request: RangeBlockDataRequest, + /// Execution proofs request (for zkvm-enabled nodes). + execution_proofs_request: Option>, /// Span to track the range request and all children range requests. pub(crate) request_span: Span, } +/// Tracks execution proofs requests during range sync. +struct ExecutionProofsRequest { + /// The request tracking state. + request: ByRangeRequest>>, + /// The peer we requested proofs from. + peer: PeerId, + /// Number of proofs required per block. + min_proofs_required: usize, + /// Number of proof attempts completed for this batch. + attempt: usize, + _phantom: std::marker::PhantomData, +} + pub enum ByRangeRequest { Active(I), Complete(T), @@ -67,6 +84,12 @@ pub(crate) enum CouplingError { exceeded_retries: bool, }, BlobPeerFailure(String), + /// The peer we requested execution proofs from was faulty/malicious + ExecutionProofPeerFailure { + error: String, + peer: PeerId, + exceeded_retries: bool, + }, } impl RangeBlockComponentsRequest { @@ -76,6 +99,7 @@ impl RangeBlockComponentsRequest { /// * `blocks_req_id` - Request ID for the blocks /// * `blobs_req_id` - Optional request ID for blobs (pre-Fulu fork) /// * `data_columns` - Optional tuple of (request_id->column_indices pairs, expected_custody_columns) for Fulu fork + /// * `execution_proofs` - Optional tuple of (request_id, peer, min_proofs_required) for zkvm-enabled nodes #[allow(clippy::type_complexity)] pub fn new( blocks_req_id: BlocksByRangeRequestId, @@ -84,6 +108,7 @@ impl RangeBlockComponentsRequest { Vec<(DataColumnsByRangeRequestId, Vec)>, Vec, )>, + execution_proofs: Option<(ExecutionProofsByRangeRequestId, usize)>, request_span: Span, ) -> Self { let block_data_request = if let Some(blobs_req_id) = blobs_req_id { @@ -103,9 +128,19 @@ impl RangeBlockComponentsRequest { RangeBlockDataRequest::NoData }; + let execution_proofs_request = + execution_proofs.map(|(req_id, min_proofs_required)| ExecutionProofsRequest { + request: ByRangeRequest::Active(req_id), + peer: req_id.peer, + min_proofs_required, + attempt: 0, + _phantom: std::marker::PhantomData, + }); + Self { blocks_request: ByRangeRequest::Active(blocks_req_id), block_data_request, + execution_proofs_request, request_span, } } @@ -187,6 +222,30 @@ impl RangeBlockComponentsRequest { } } + /// Adds received execution proofs to the request. + /// + /// Returns an error if this request doesn't expect execution proofs, + /// or if the request ID doesn't match. + pub fn add_execution_proofs( + &mut self, + req_id: ExecutionProofsByRangeRequestId, + proofs: Vec>, + ) -> Result<(), String> { + match &mut self.execution_proofs_request { + Some(exec_proofs_req) => { + exec_proofs_req.request.finish(req_id, proofs)?; + exec_proofs_req.attempt += 1; + Ok(()) + } + None => Err("received execution proofs but none were expected".to_owned()), + } + } + + /// Returns true if this request expects execution proofs. + pub fn expects_execution_proofs(&self) -> bool { + self.execution_proofs_request.is_some() + } + /// Attempts to construct RPC blocks from all received components. /// /// Returns `None` if not all expected requests have completed. @@ -200,6 +259,13 @@ impl RangeBlockComponentsRequest { return None; }; + // Check if execution proofs are required but not yet complete + if let Some(exec_proofs_req) = &self.execution_proofs_request + && exec_proofs_req.request.to_finished().is_none() + { + return None; + } + // Increment the attempt once this function returns the response or errors match &mut self.block_data_request { RangeBlockDataRequest::NoData => { @@ -269,6 +335,50 @@ impl RangeBlockComponentsRequest { } } + /// Returns the collected execution proofs if available. + /// This should be called after `responses()` returns `Some`. + pub fn get_execution_proofs(&self) -> Option>> { + self.execution_proofs_request + .as_ref() + .and_then(|req| req.request.to_finished().cloned()) + } + + /// Returns the peer that was responsible for providing execution proofs. + pub fn execution_proofs_peer(&self) -> Option { + self.execution_proofs_request.as_ref().map(|req| req.peer) + } + + /// Returns the minimum number of execution proofs required per block, if any. + pub fn min_execution_proofs_required(&self) -> Option { + self.execution_proofs_request + .as_ref() + .map(|req| req.min_proofs_required) + } + + /// Returns the number of completed proof attempts for this batch, if any. + pub fn execution_proofs_attempt(&self) -> Option { + self.execution_proofs_request + .as_ref() + .map(|req| req.attempt) + } + + /// Resets the execution proofs request to retry with a new peer. + pub fn reinsert_execution_proofs_request( + &mut self, + req_id: ExecutionProofsByRangeRequestId, + min_proofs_required: usize, + ) -> Result<(), String> { + match &mut self.execution_proofs_request { + Some(exec_proofs_req) => { + exec_proofs_req.request = ByRangeRequest::Active(req_id); + exec_proofs_req.peer = req_id.peer; + exec_proofs_req.min_proofs_required = min_proofs_required; + Ok(()) + } + None => Err("execution proofs request not present".to_owned()), + } + } + fn responses_with_blobs( blocks: Vec>>, blobs: Vec>>, @@ -529,7 +639,7 @@ mod tests { let blocks_req_id = blocks_id(components_id()); let mut info = - RangeBlockComponentsRequest::::new(blocks_req_id, None, None, Span::none()); + RangeBlockComponentsRequest::::new(blocks_req_id, None, None, None, Span::none()); // Send blocks and complete terminate response info.add_blocks(blocks_req_id, blocks).unwrap(); @@ -557,6 +667,7 @@ mod tests { blocks_req_id, Some(blobs_req_id), None, + None, Span::none(), ); @@ -606,6 +717,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expects_custody_columns.clone())), + None, Span::none(), ); // Send blocks and complete terminate response @@ -674,6 +786,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expects_custody_columns.clone())), + None, Span::none(), ); @@ -762,6 +875,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_custody_columns.clone())), + None, Span::none(), ); @@ -848,6 +962,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_custody_columns.clone())), + None, Span::none(), ); @@ -941,6 +1056,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_custody_columns.clone())), + None, Span::none(), ); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c0af69d7a40..6c41d3d9c75 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -61,7 +61,8 @@ use lighthouse_network::service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, ExecutionProofsByRangeRequestId, Id, SingleLookupReqId, + SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::{PeerAction, PeerId}; @@ -518,6 +519,8 @@ impl SyncManager { SyncRequestId::DataColumnsByRange(req_id) => { self.on_data_columns_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::ExecutionProofsByRange(req_id) => self + .on_execution_proofs_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)), } } @@ -1225,6 +1228,12 @@ impl SyncManager { peer_id, RpcEvent::from_chunk(execution_proof, seen_timestamp), ), + SyncRequestId::ExecutionProofsByRange(req_id) => self + .on_execution_proofs_by_range_response( + req_id, + peer_id, + RpcEvent::from_chunk(execution_proof, seen_timestamp), + ), _ => { crit!(%peer_id, "bad request id for execution_proof"); } @@ -1352,6 +1361,28 @@ impl SyncManager { } } + /// Handles a response for an execution proofs by range request. + /// + /// Note: This is currently a stub. Execution proofs by range requests are not yet issued + /// during range sync. + fn on_execution_proofs_by_range_response( + &mut self, + id: ExecutionProofsByRangeRequestId, + peer_id: PeerId, + proof: RpcEvent>, + ) { + if let Some(resp) = self + .network + .on_execution_proofs_by_range_response(id, peer_id, proof) + { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::ExecutionProofs(id, resp), + ); + } + } + fn on_custody_by_root_result( &mut self, requester: CustodyRequester, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 0943787c925..65ae25bfd3c 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,18 +17,21 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use crate::sync::range_data_column_batch_request::RangeDataColumnBatchRequest; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, ExecutionProofsByRangeRequest, +}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; pub use lighthouse_network::service::api_types::RangeRequestId; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, ExecutionProofsByRangeRequestId, Id, SingleLookupReqId, + SyncRequestId, }; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Subnet}; use lighthouse_tracing::{SPAN_OUTGOING_BLOCK_BY_ROOT_REQUEST, SPAN_OUTGOING_RANGE_REQUEST}; @@ -37,7 +40,8 @@ pub use requests::LookupVerifyError; use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, - ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, + ExecutionProofsByRangeRequestItems, ExecutionProofsByRootRequestItems, + ExecutionProofsByRootSingleBlockRequest, }; #[cfg(test)] use slot_clock::SlotClock; @@ -73,6 +77,8 @@ macro_rules! new_range_request_span { /// Max retries for block components after which we fail the batch. pub const MAX_COLUMN_RETRIES: usize = 3; +/// Max retries for execution proofs after which we fail the batch. +pub const MAX_EXECUTION_PROOF_RETRIES: usize = 3; #[derive(Debug)] pub enum RpcEvent { @@ -118,6 +124,7 @@ pub enum RpcRequestSendError { pub enum NoPeerError { BlockPeer, CustodyPeer(ColumnIndex), + ExecutionProofPeer, } #[derive(Debug, PartialEq, Eq)] @@ -217,6 +224,9 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRange requests data_columns_by_range_requests: ActiveRequests>, + /// A mapping of active ExecutionProofsByRange requests + execution_proofs_by_range_requests: + ActiveRequests, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, @@ -254,6 +264,17 @@ pub enum RangeBlockComponent { DataColumnsByRangeRequestId, RpcResponseResult>>>, ), + ExecutionProofs( + ExecutionProofsByRangeRequestId, + RpcResponseResult>>, + ), +} + +struct RangeExecutionProofInputs { + min_proofs_required: usize, + proofs_peer: PeerId, + proofs: Vec>, + attempt: usize, } #[cfg(test)] @@ -303,6 +324,7 @@ impl SyncNetworkContext { blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), + execution_proofs_by_range_requests: ActiveRequests::new("execution_proofs_by_range"), custody_by_root_requests: <_>::default(), components_by_range_requests: FnvHashMap::default(), custody_backfill_data_column_batch_requests: FnvHashMap::default(), @@ -332,6 +354,7 @@ impl SyncNetworkContext { blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, + execution_proofs_by_range_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -371,6 +394,10 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRange(*req_id)); + let execution_proofs_by_range_ids = execution_proofs_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::ExecutionProofsByRange(*req_id)); blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) @@ -378,6 +405,7 @@ impl SyncNetworkContext { .chain(blocks_by_range_ids) .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) + .chain(execution_proofs_by_range_ids) .collect() } @@ -435,6 +463,7 @@ impl SyncNetworkContext { blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, + execution_proofs_by_range_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -458,6 +487,7 @@ impl SyncNetworkContext { .chain(blocks_by_range_requests.iter_request_peers()) .chain(blobs_by_range_requests.iter_request_peers()) .chain(data_columns_by_range_requests.iter_request_peers()) + .chain(execution_proofs_by_range_requests.iter_request_peers()) { *active_request_count_by_peer.entry(peer_id).or_default() += 1; } @@ -547,6 +577,83 @@ impl SyncNetworkContext { Ok(()) } + /// Retries execution proofs by range by requesting the proofs again from a different peer. + pub fn retry_execution_proofs_by_range( + &mut self, + id: Id, + peers: &HashSet, + peers_to_deprioritize: &HashSet, + request: BlocksByRangeRequest, + ) -> Result<(), String> { + let Some((requester, parent_request_span)) = self + .components_by_range_requests + .iter() + .find_map(|(key, value)| { + if key.id == id { + Some((key.requester, value.request_span.clone())) + } else { + None + } + }) + else { + return Err("request id not present".to_string()); + }; + + let active_request_count_by_peer = self.active_request_count_by_peer(); + + let proof_peer = self + .select_execution_proofs_peer( + peers, + &active_request_count_by_peer, + peers_to_deprioritize, + ) + .ok_or_else(|| "no zkvm-enabled peer available for execution proofs".to_string())?; + + let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); + let min_proofs_required = self.chain.spec.zkvm_min_proofs_required().ok_or_else(|| { + "zkvm enabled but min proofs requirement is not configured".to_string() + })?; + if !self.chain.spec.is_zkvm_enabled_for_epoch(epoch) { + return Err("execution proofs retry requested for pre-zkvm epoch".to_string()); + } + + debug!( + id, + ?requester, + ?proof_peer, + "Retrying execution proofs by range from a different peer" + ); + + let id = ComponentsByRangeRequestId { id, requester }; + let req_id = self + .send_execution_proofs_by_range_request( + proof_peer, + ExecutionProofsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }, + id, + new_range_request_span!( + self, + "outgoing_proofs_by_range_retry", + parent_request_span.clone(), + proof_peer + ), + ) + .map_err(|e| format!("{:?}", e))?; + + let Some(range_request) = self.components_by_range_requests.get_mut(&id) else { + return Err( + "retrying execution proofs for range request that does not exist".to_string(), + ); + }; + + range_request + .reinsert_execution_proofs_request(req_id, min_proofs_required) + .map_err(|e| format!("{e:?}"))?; + Ok(()) + } + /// A blocks by range request sent by the range sync algorithm pub fn block_components_by_range_request( &mut self, @@ -672,6 +779,43 @@ impl SyncNetworkContext { .transpose()?; let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); + + // Request execution proofs only if zkvm is enabled for this epoch. + let execution_proofs_request = if self.chain.spec.is_zkvm_enabled_for_epoch(epoch) { + let min_proofs_required = + self.chain.spec.zkvm_min_proofs_required().ok_or_else(|| { + RpcRequestSendError::InternalError( + "zkvm enabled but min proofs requirement is not configured".to_string(), + ) + })?; + + // Find a zkvm-enabled peer from block_peers or column_peers + let zkvm_peer = self.find_zkvm_enabled_peer(block_peers, column_peers); + + if let Some(proofs_peer) = zkvm_peer { + let proofs_request = ExecutionProofsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }; + let req_id = self.send_execution_proofs_by_range_request( + proofs_peer, + proofs_request, + id, + new_range_request_span!( + self, + "outgoing_proofs_by_range", + range_request_span.clone(), + proofs_peer + ), + )?; + Some((req_id, min_proofs_required)) + } else { + return Err(RpcRequestSendError::NoPeer(NoPeerError::ExecutionProofPeer)); + } + } else { + None + }; + let info = RangeBlockComponentsRequest::new( blocks_req_id, blobs_req_id, @@ -681,6 +825,7 @@ impl SyncNetworkContext { self.chain.sampling_columns_for_epoch(epoch).to_vec(), ) }), + execution_proofs_request, range_request_span, ); self.components_by_range_requests.insert(id, info); @@ -743,6 +888,33 @@ impl SyncNetworkContext { Ok(columns_to_request_by_peer) } + fn select_execution_proofs_peer( + &self, + peers: &HashSet, + active_request_count_by_peer: &HashMap, + peers_to_deprioritize: &HashSet, + ) -> Option { + let peers_db = self.network_globals().peers.read(); + peers + .iter() + .filter(|peer| { + peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + }) + .map(|peer| { + ( + peers_to_deprioritize.contains(peer), + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, _, peer)| *peer) + } + /// Received a blocks by range or blobs by range response for a request that couples blocks ' /// and blobs. pub fn range_block_component_response( @@ -750,13 +922,14 @@ impl SyncNetworkContext { id: ComponentsByRangeRequestId, range_block_component: RangeBlockComponent, ) -> Option>, RpcResponseError>> { - let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); - return None; - }; - - if let Err(e) = { - let request = entry.get_mut(); + let add_result = { + let Some(request) = self.components_by_range_requests.get_mut(&id) else { + metrics::inc_counter_vec( + &metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, + &["range_blocks"], + ); + return None; + }; match range_block_component { RangeBlockComponent::Block(req_id, resp) => resp.and_then(|(blocks, _)| { request.add_blocks(req_id, blocks).map_err(|e| { @@ -783,14 +956,104 @@ impl SyncNetworkContext { }) }) } + RangeBlockComponent::ExecutionProofs(req_id, resp) => { + let expects_execution_proofs = request.expects_execution_proofs(); + // Handle execution proofs response, treating UnsupportedProtocol as an error + // if proofs are required. + let proofs = match resp { + Ok((proofs, _)) => proofs, + Err(RpcResponseError::RpcError(RPCError::UnsupportedProtocol)) + if expects_execution_proofs => + { + return Some(Err(RpcResponseError::BlockComponentCouplingError( + CouplingError::ExecutionProofPeerFailure { + error: "Peer doesn't support execution_proofs_by_range" + .to_string(), + peer: req_id.peer, + exceeded_retries: false, + }, + ))); + } + Err(RpcResponseError::RpcError(RPCError::UnsupportedProtocol)) => { + debug!( + req_id = ?req_id, + "Peer doesn't support execution_proofs_by_range, treating as empty response" + ); + vec![] + } + Err(e) => return Some(Err(e)), + }; + request.add_execution_proofs(req_id, proofs).map_err(|e| { + RpcResponseError::BlockComponentCouplingError(CouplingError::InternalError( + e, + )) + }) + } } - } { - entry.remove(); + }; + + if let Err(e) = add_result { + self.components_by_range_requests.remove(&id); return Some(Err(e)); } - let range_req = entry.get_mut(); - if let Some(blocks_result) = range_req.responses(&self.chain.spec) { + let (blocks_result, min_proofs_required, proofs_peer, proofs, proofs_attempt) = { + let Some(range_req) = self.components_by_range_requests.get_mut(&id) else { + metrics::inc_counter_vec( + &metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, + &["range_blocks"], + ); + return None; + }; + let blocks_result = range_req.responses(&self.chain.spec); + let min_proofs_required = range_req.min_execution_proofs_required(); + let proofs_peer = range_req.execution_proofs_peer(); + let proofs = range_req.get_execution_proofs().unwrap_or_default(); + let proofs_attempt = range_req.execution_proofs_attempt().unwrap_or(0); + ( + blocks_result, + min_proofs_required, + proofs_peer, + proofs, + proofs_attempt, + ) + }; + + if let Some(Ok(blocks)) = &blocks_result + && let Some(min_proofs_required) = min_proofs_required + { + let Some(proofs_peer) = proofs_peer else { + self.components_by_range_requests.remove(&id); + return Some(Err(RpcResponseError::BlockComponentCouplingError( + CouplingError::InternalError( + "execution proofs request completed without a peer".to_string(), + ), + ))); + }; + let proof_inputs = RangeExecutionProofInputs { + min_proofs_required, + proofs_peer, + proofs, + attempt: proofs_attempt, + }; + if let Err(err) = self.process_range_execution_proofs(proof_inputs, blocks) { + let remove_entry = !matches!( + err, + RpcResponseError::BlockComponentCouplingError( + CouplingError::ExecutionProofPeerFailure { + exceeded_retries: false, + .. + } + ) + ); + if remove_entry { + self.components_by_range_requests.remove(&id); + } + return Some(Err(err)); + } + } + + if let Some(blocks_result) = blocks_result { if let Err(CouplingError::DataColumnPeerFailure { error, faulty_peers: _, @@ -800,16 +1063,16 @@ impl SyncNetworkContext { // Remove the entry if it's a peer failure **and** retry counter is exceeded if *exceeded_retries { debug!( - entry=?entry.key(), + entry = ?id, msg = error, "Request exceeded max retries, failing batch" ); - entry.remove(); - }; + self.components_by_range_requests.remove(&id); + } } else { - // also remove the entry only if it coupled successfully + // Also remove the entry only if it coupled successfully // or if it isn't a column peer failure. - entry.remove(); + self.components_by_range_requests.remove(&id); } // If the request is finished, dequeue everything Some(blocks_result.map_err(RpcResponseError::BlockComponentCouplingError)) @@ -818,6 +1081,138 @@ impl SyncNetworkContext { } } + fn process_range_execution_proofs( + &self, + inputs: RangeExecutionProofInputs, + blocks: &[RpcBlock], + ) -> Result<(), RpcResponseError> { + let RangeExecutionProofInputs { + min_proofs_required, + proofs_peer, + proofs, + attempt, + } = inputs; + let exceeded_retries = attempt >= MAX_EXECUTION_PROOF_RETRIES; + let mut proofs_by_root: HashMap>> = HashMap::new(); + for proof in proofs { + proofs_by_root + .entry(proof.block_root) + .or_default() + .push(proof); + } + + let proof_error = |error: String| { + RpcResponseError::BlockComponentCouplingError( + CouplingError::ExecutionProofPeerFailure { + error, + peer: proofs_peer, + exceeded_retries, + }, + ) + }; + + for block in blocks { + let block_root = block.block_root(); + if !self.chain.spec.is_zkvm_enabled_for_epoch(block.epoch()) { + proofs_by_root.remove(&block_root); + continue; + } + let existing_count = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .map(|ids| ids.len()) + .unwrap_or(0); + + let proofs_for_block = proofs_by_root.remove(&block_root).unwrap_or_default(); + if existing_count >= min_proofs_required { + if !proofs_for_block.is_empty() { + debug!( + ?block_root, + existing_count, + min_proofs_required, + "Ignoring execution proofs because cache already satisfies requirement" + ); + } + continue; + } + + let payload_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|payload| payload.execution_payload_ref().block_hash()) + .ok_or_else(|| { + RpcResponseError::BlockComponentCouplingError(CouplingError::InternalError( + "execution payload missing for zkvm proofs".to_string(), + )) + })?; + + let mut verified_proofs = Vec::new(); + for proof in proofs_for_block { + if proof.block_root != block_root { + return Err(proof_error(format!( + "proof block_root mismatch: expected {block_root:?} got {:?}", + proof.block_root + ))); + } + if proof.block_hash != payload_hash { + return Err(proof_error(format!( + "proof execution payload hash mismatch for {block_root:?}" + ))); + } + match self + .chain + .data_availability_checker + .verify_execution_proof_for_gossip(&proof) + { + Ok(true) => verified_proofs.push((*proof).clone()), + Ok(false) => { + return Err(proof_error(format!( + "execution proof verification failed for {block_root:?}" + ))); + } + Err(e) => { + return Err(proof_error(format!( + "execution proof verification error for {block_root:?}: {e:?}" + ))); + } + } + } + + if !verified_proofs.is_empty() + && let Err(e) = self + .chain + .data_availability_checker + .put_verified_execution_proofs(block_root, verified_proofs) + { + return Err(proof_error(format!( + "failed to store execution proofs for {block_root:?}: {e:?}" + ))); + } + + let updated_count = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .map(|ids| ids.len()) + .unwrap_or(0); + if updated_count < min_proofs_required { + return Err(proof_error(format!( + "missing execution proofs for {block_root:?}: have {updated_count}, need {min_proofs_required}" + ))); + } + } + + if !proofs_by_root.is_empty() { + let unknown_roots: Vec<_> = proofs_by_root.keys().collect(); + debug!(?unknown_roots, "Execution proofs for unknown block roots"); + } + + Ok(()) + } + /// Request block of `block_root` if necessary by checking: /// - If the da_checker has a pending block from gossip or a previous request /// @@ -1402,6 +1797,75 @@ impl SyncNetworkContext { Ok((id, requested_columns)) } + /// Find a zkvm-enabled peer from the given peer sets. + /// + /// Peers advertise zkvm support via their ENR's zkvm flag. This function + /// checks both block_peers and column_peers to find any peer that supports + /// the execution_proofs_by_range protocol. + fn find_zkvm_enabled_peer( + &self, + block_peers: &HashSet, + column_peers: &HashSet, + ) -> Option { + let peers_db = self.network_globals().peers.read(); + + // First try block_peers, then column_peers + let all_peers = block_peers.iter().chain(column_peers.iter()); + + for peer in all_peers { + if peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + { + return Some(*peer); + } + } + + None + } + + fn send_execution_proofs_by_range_request( + &mut self, + peer_id: PeerId, + request: ExecutionProofsByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + request_span: Span, + ) -> Result { + let id = ExecutionProofsByRangeRequestId { + id: self.next_id(), + parent_request_id, + peer: peer_id, + }; + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: RequestType::ExecutionProofsByRange(request.clone()), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "ExecutionProofsByRange", + slots = request.count, + epoch = %Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + self.execution_proofs_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know how many proofs to expect per block. + false, + ExecutionProofsByRangeRequestItems::new(request), + request_span, + ); + Ok(id) + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -1640,6 +2104,20 @@ impl SyncNetworkContext { self.on_rpc_response_result(id, "DataColumnsByRange", resp, peer_id, |d| d.len()) } + /// Handles a response for an execution proofs by range request. + #[allow(clippy::type_complexity)] + pub(crate) fn on_execution_proofs_by_range_response( + &mut self, + id: ExecutionProofsByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>, + ) -> Option>>> { + let resp = self + .execution_proofs_by_range_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(id, "ExecutionProofsByRange", resp, peer_id, |p| p.len()) + } + fn on_rpc_response_result usize>( &mut self, id: I, diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 63249ed2a4b..238e551659d 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -15,6 +15,7 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +pub use execution_proofs_by_range::ExecutionProofsByRangeRequestItems; pub use execution_proofs_by_root::{ ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, }; @@ -29,6 +30,7 @@ mod blocks_by_range; mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; +mod execution_proofs_by_range; mod execution_proofs_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs new file mode 100644 index 00000000000..179f08a6547 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs @@ -0,0 +1,54 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::ExecutionProofsByRangeRequest; +use std::sync::Arc; +use types::{ExecutionProof, Slot}; + +/// Accumulates results of an execution_proofs_by_range request. Only returns items after receiving +/// the stream termination. +pub struct ExecutionProofsByRangeRequestItems { + request: ExecutionProofsByRangeRequest, + items: Vec>, +} + +impl ExecutionProofsByRangeRequestItems { + pub fn new(request: ExecutionProofsByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for ExecutionProofsByRangeRequestItems { + type Item = Arc; + + fn add(&mut self, proof: Self::Item) -> Result { + let proof_slot = proof.slot; + + // Verify the proof is within the requested slot range + if proof_slot < Slot::new(self.request.start_slot) + || proof_slot >= Slot::new(self.request.start_slot + self.request.count) + { + return Err(LookupVerifyError::UnrequestedSlot(proof_slot)); + } + + // Check for duplicate proofs (same slot and proof_id) + if self + .items + .iter() + .any(|existing| existing.slot == proof_slot && existing.proof_id == proof.proof_id) + { + return Err(LookupVerifyError::DuplicatedProofIDs(proof.proof_id)); + } + + self.items.push(proof); + + // We don't know exactly how many proofs to expect (depends on block content), + // so we never return true here - rely on stream termination + Ok(false) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 4ce10e23ca1..251dff9ffb7 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -6,12 +6,14 @@ use crate::sync::batch::{ BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, }; use crate::sync::block_sidecar_coupling::CouplingError; -use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; +use crate::sync::network_context::{ + NoPeerError, RangeRequestId, RpcRequestSendError, RpcResponseError, +}; use crate::sync::{BatchProcessResult, network_context::SyncNetworkContext}; use beacon_chain::BeaconChainTypes; use beacon_chain::block_verification_types::RpcBlock; use lighthouse_network::service::api_types::Id; -use lighthouse_network::{PeerAction, PeerId}; +use lighthouse_network::{PeerAction, PeerId, Subnet}; use lighthouse_tracing::SPAN_SYNCING_CHAIN; use logging::crit; use std::collections::{BTreeMap, HashSet, btree_map::Entry}; @@ -463,6 +465,12 @@ impl SyncingChain { // target when there is no sampling peers available. This is a valid state and should not // return an error. return Ok(KeepChain); + } else if !self.good_peers_on_execution_proof_subnet(self.processing_target, network) { + debug!( + src = "process_completed_batches", + "Waiting for zkvm-enabled peers for execution proofs" + ); + return Ok(KeepChain); } else { // NOTE: It is possible that the batch doesn't exist for the processing id. This can happen // when we complete a batch and attempt to download a new batch but there are: @@ -944,6 +952,31 @@ impl SyncingChain { CouplingError::BlobPeerFailure(msg) => { tracing::debug!(?batch_id, msg, "Blob peer failure"); } + CouplingError::ExecutionProofPeerFailure { + error, + peer, + exceeded_retries, + } => { + tracing::debug!(?batch_id, ?peer, error, "Execution proof peer failure"); + if !*exceeded_retries { + if let BatchOperationOutcome::Failed { blacklist } = + batch.downloading_to_awaiting_download()? + { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }); + } + let mut failed_peers = HashSet::new(); + failed_peers.insert(*peer); + return self.retry_execution_proof_batch( + network, + batch_id, + request_id, + failed_peers, + ); + } + } CouplingError::InternalError(msg) => { tracing::error!(?batch_id, msg, "Block components coupling internal error"); } @@ -1020,6 +1053,13 @@ impl SyncingChain { for batch_id in awaiting_downloads { if self.good_peers_on_sampling_subnets(batch_id, network) { + if !self.good_peers_on_execution_proof_subnet(batch_id, network) { + debug!( + src = "attempt_send_awaiting_download_batches", + "Waiting for zkvm-enabled peers for execution proofs" + ); + continue; + } self.send_batch(network, batch_id)?; } else { debug!( @@ -1083,6 +1123,13 @@ impl SyncingChain { return Ok(KeepChain); } Err(e) => match e { + RpcRequestSendError::NoPeer(NoPeerError::ExecutionProofPeer) => { + debug!( + %batch_id, + "Waiting for zkvm-enabled peers for execution proofs" + ); + return Ok(KeepChain); + } // TODO(das): Handle the NoPeer case explicitly and don't drop the batch. For // sync to work properly it must be okay to have "stalled" batches in // AwaitingDownload state. Currently it will error with invalid state if @@ -1163,6 +1210,45 @@ impl SyncingChain { Ok(KeepChain) } + /// Retries execution proof requests within the batch by creating a new proofs request. + fn retry_execution_proof_batch( + &mut self, + network: &mut SyncNetworkContext, + batch_id: BatchId, + id: Id, + mut failed_peers: HashSet, + ) -> ProcessingResult { + let _guard = self.span.clone().entered(); + debug!(%batch_id, %id, ?failed_peers, "Retrying execution proof requests"); + if let Some(batch) = self.batches.get_mut(&batch_id) { + failed_peers.extend(&batch.failed_peers()); + let req = batch.to_blocks_by_range_request().0; + + let synced_peers = network + .network_globals() + .peers + .read() + .synced_peers_for_epoch(batch_id) + .cloned() + .collect::>(); + + match network.retry_execution_proofs_by_range(id, &synced_peers, &failed_peers, req) { + Ok(()) => { + batch.start_downloading(id)?; + debug!( + ?batch_id, + id, "Retried execution proof requests from other peers" + ); + return Ok(KeepChain); + } + Err(e) => { + debug!(?batch_id, id, e, "Failed to retry execution proof batch"); + } + } + } + Ok(KeepChain) + } + /// Returns true if this chain is currently syncing. pub fn is_syncing(&self) -> bool { match self.state { @@ -1206,6 +1292,13 @@ impl SyncingChain { ); return Ok(KeepChain); } + if !self.good_peers_on_execution_proof_subnet(epoch, network) { + debug!( + src = "request_batches_optimistic", + "Waiting for zkvm-enabled peers for execution proofs" + ); + return Ok(KeepChain); + } if let Entry::Vacant(entry) = self.batches.entry(epoch) { let batch_type = network.batch_type(epoch); @@ -1252,6 +1345,27 @@ impl SyncingChain { } } + /// Returns true if there is at least one zkvm-enabled peer for execution proofs. + fn good_peers_on_execution_proof_subnet( + &self, + epoch: Epoch, + network: &SyncNetworkContext, + ) -> bool { + if !network.chain.spec.is_zkvm_enabled_for_epoch(epoch) { + return true; + } + + let peers_db = network.network_globals().peers.read(); + let synced_peers: HashSet<_> = peers_db.synced_peers_for_epoch(epoch).cloned().collect(); + + self.peers.iter().chain(synced_peers.iter()).any(|peer| { + peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + }) + } + /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { @@ -1294,6 +1408,13 @@ impl SyncingChain { ); return None; } + if !self.good_peers_on_execution_proof_subnet(self.to_be_downloaded, network) { + debug!( + src = "include_next_batch", + "Waiting for zkvm-enabled peers for execution proofs" + ); + return None; + } // If no batch needs a retry, attempt to send the batch of the next epoch to download let next_batch_id = self.to_be_downloaded; diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 8e190da2b9d..fb4adbcee65 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -61,8 +61,16 @@ impl TestRig { } fn test_setup_with_spec(spec: ChainSpec) -> Self { + Self::test_setup_with_spec_and_zkvm(spec, false, None) + } + + fn test_setup_with_spec_and_zkvm( + spec: ChainSpec, + zkvm_dummy_verifiers: bool, + anchor_oldest_slot: Option, + ) -> Self { // Initialise a new beacon chain - let harness = BeaconChainHarness::>::builder(E) + let mut builder = BeaconChainHarness::>::builder(E) .spec(Arc::new(spec)) .deterministic_keypairs(1) .fresh_ephemeral_store() @@ -71,8 +79,23 @@ impl TestRig { Slot::new(0), Duration::from_secs(0), Duration::from_secs(12), - )) - .build(); + )); + + if zkvm_dummy_verifiers { + // TODO(zkproofs): For unit tests, we likely always want dummy verifiers + builder = builder.zkvm_with_dummy_verifiers(); + } + + let harness = builder.build(); + if let Some(oldest_slot) = anchor_oldest_slot { + let store = &harness.chain.store; + let prev_anchor = store.get_anchor_info(); + let mut new_anchor = prev_anchor.clone(); + new_anchor.oldest_block_slot = oldest_slot; + store + .compare_and_set_anchor_info_with_write(prev_anchor, new_anchor) + .expect("anchor info updated"); + } let chain = harness.chain.clone(); let fork_context = Arc::new(ForkContext::new::( @@ -160,7 +183,20 @@ impl TestRig { pub fn test_setup_after_fulu_with_zkvm() -> Option { let mut spec = test_spec::(); spec.zkvm_enabled = true; - let r = Self::test_setup_with_spec(spec); + let r = Self::test_setup_with_spec_and_zkvm(spec, true, None); + if r.fork_name.fulu_enabled() { + Some(r) + } else { + None + } + } + + /// Setup test rig for Fulu with zkvm enabled and backfill required. + pub fn test_setup_after_fulu_with_zkvm_backfill() -> Option { + let mut spec = test_spec::(); + spec.zkvm_enabled = true; + let backfill_start_slot = Slot::new(E::slots_per_epoch()); + let r = Self::test_setup_with_spec_and_zkvm(spec, true, Some(backfill_start_slot)); if r.fork_name.fulu_enabled() { Some(r) } else { diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index cb728a90c1b..f122087ae3c 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -3,26 +3,26 @@ use crate::network_beacon_processor::ChainSegmentProcessId; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; -use crate::sync::network_context::RangeRequestId; +use crate::sync::network_context::{MAX_EXECUTION_PROOF_RETRIES, RangeRequestId}; use crate::sync::range_sync::RangeSyncType; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; use beacon_processor::WorkType; -use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV2, StatusMessageV2, + BlobsByRangeRequest, DataColumnsByRangeRequest, ExecutionProofsByRangeRequest, + OldBlocksByRangeRequest, OldBlocksByRangeRequestV2, StatusMessageV2, }; +use lighthouse_network::rpc::{RPCError, RequestType}; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, - SyncRequestId, + ExecutionProofsByRangeRequestId, SyncRequestId, }; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; use types::{ - BlobSidecarList, BlockImportSource, Epoch, EthSpec, Hash256, MinimalEthSpec as E, - SignedBeaconBlock, SignedBeaconBlockHash, Slot, + BlobSidecarList, BlockImportSource, Epoch, EthSpec, ExecutionBlockHash, ExecutionProof, + ExecutionProofId, Hash256, MinimalEthSpec as E, SignedBeaconBlock, SignedBeaconBlockHash, Slot, }; const D: Duration = Duration::new(0, 0); @@ -38,6 +38,13 @@ enum ByRangeDataRequestIds { PostPeerDAS(Vec<(DataColumnsByRangeRequestId, PeerId)>), } +struct BlocksByRangeRequestMeta { + id: BlocksByRangeRequestId, + peer: PeerId, + start_slot: u64, + count: u64, +} + /// Sync tests are usually written in the form: /// - Do some action /// - Expect a request to be sent @@ -84,6 +91,20 @@ impl TestRig { }) } + fn add_head_zkvm_peer_with_root(&mut self, head_root: Hash256) -> PeerId { + let local_info = self.local_info(); + let peer_id = self.new_connected_zkvm_peer(); + self.send_sync_message(SyncMessage::AddPeer( + peer_id, + SyncInfo { + head_root, + head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), + ..local_info + }, + )); + peer_id + } + // Produce a finalized peer with an advanced finalized epoch fn add_finalized_peer(&mut self) -> PeerId { self.add_finalized_peer_with_root(Hash256::random()) @@ -155,6 +176,13 @@ impl TestRig { } } + fn add_synced_zkvm_peer(&mut self) -> PeerId { + let peer_id = self.new_connected_zkvm_peer(); + let local_info = self.local_info(); + self.send_sync_message(SyncMessage::AddPeer(peer_id, local_info)); + peer_id + } + fn assert_state(&self, state: RangeSyncType) { assert_eq!( self.sync_manager @@ -200,6 +228,16 @@ impl TestRig { &mut self, request_filter: RequestFilter, ) -> ((BlocksByRangeRequestId, PeerId), ByRangeDataRequestIds) { + let (meta, by_range_data_requests) = + self.find_blocks_by_range_request_with_meta(request_filter); + + ((meta.id, meta.peer), by_range_data_requests) + } + + fn find_blocks_by_range_request_with_meta( + &mut self, + request_filter: RequestFilter, + ) -> (BlocksByRangeRequestMeta, ByRangeDataRequestIds) { let filter_f = |peer: PeerId, start_slot: u64| { if let Some(expected_epoch) = request_filter.epoch { let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); @@ -222,10 +260,17 @@ impl TestRig { peer_id, request: RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( - OldBlocksByRangeRequestV2 { start_slot, .. }, + OldBlocksByRangeRequestV2 { + start_slot, count, .. + }, )), app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), + } if filter_f(*peer_id, *start_slot) => Some(BlocksByRangeRequestMeta { + id: *id, + peer: *peer_id, + start_slot: *start_slot, + count: *count, + }), _ => None, }) .unwrap_or_else(|e| { @@ -272,6 +317,45 @@ impl TestRig { (block_req, by_range_data_requests) } + fn find_execution_proofs_by_range_request( + &mut self, + request_filter: RequestFilter, + ) -> (ExecutionProofsByRangeRequestId, PeerId, u64, u64) { + let filter_f = |peer: PeerId, start_slot: u64| { + if let Some(expected_epoch) = request_filter.epoch { + let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); + if epoch != expected_epoch { + return false; + } + } + if let Some(expected_peer) = request_filter.peer + && peer != expected_peer + { + return false; + } + + true + }; + + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: + RequestType::ExecutionProofsByRange(ExecutionProofsByRangeRequest { + start_slot, + count, + }), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRange(id)), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id, *start_slot, *count)), + _ => None, + }) + .unwrap_or_else(|e| { + panic!( + "Should have an ExecutionProofsByRange request, filter {request_filter:?}: {e:?}" + ) + }) + } + fn find_and_complete_blocks_by_range_request( &mut self, request_filter: RequestFilter, @@ -290,6 +374,15 @@ impl TestRig { seen_timestamp: D, }); + self.complete_by_range_data_requests(by_range_data_request_ids); + + blocks_req_id.parent_request_id.requester + } + + fn complete_by_range_data_requests( + &mut self, + by_range_data_request_ids: ByRangeDataRequestIds, + ) { match by_range_data_request_ids { ByRangeDataRequestIds::PreDeneb => {} ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { @@ -319,8 +412,6 @@ impl TestRig { } } } - - blocks_req_id.parent_request_id.requester } fn find_and_complete_processing_chain_segment(&mut self, id: ChainSegmentProcessId) { @@ -601,3 +692,611 @@ fn finalized_sync_not_enough_custody_peers_on_start() { let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; r.complete_and_process_range_sync_until(last_epoch, filter()); } + +#[test] +fn range_sync_requests_execution_proofs_for_zkvm() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + rig.assert_state(RangeSyncType::Head); + rig.expect_empty_network(); + + let zkvm_peer = rig.add_head_zkvm_peer_with_root(head_root); + let _ = rig.find_blocks_by_range_request(filter()); + let (_, proof_peer, _, _) = + rig.find_execution_proofs_by_range_request(filter().peer(zkvm_peer)); + assert_eq!(proof_peer, zkvm_peer); +} + +#[test] +fn range_sync_uses_cached_execution_proofs() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let (proof_req_id, proof_peer, proof_start_slot, proof_count) = + rig.find_execution_proofs_by_range_request(filter().peer(zkvm_peer)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert_eq!(proof_count, block_meta.count); + assert_eq!(proof_peer, zkvm_peer); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let min_proofs = rig + .harness + .chain + .spec + .zkvm_min_proofs_required() + .expect("zkvm enabled"); + + let proofs = (0..min_proofs) + .map(|i| { + ExecutionProof::new( + ExecutionProofId::new(u8::try_from(i).expect("proof id fits")).unwrap(), + block.slot(), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap() + }) + .collect::>(); + + rig.harness + .chain + .data_availability_checker + .put_verified_execution_proofs(block_root, proofs) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + let request_id = block_meta.id.parent_request_id.requester; + let process_id = match request_id { + RangeRequestId::RangeSync { chain_id, batch_id } => { + ChainSegmentProcessId::RangeBatchId(chain_id, batch_id) + } + RangeRequestId::BackfillSync { batch_id } => { + ChainSegmentProcessId::BackSyncBatchId(batch_id) + } + }; + rig.find_and_complete_processing_chain_segment(process_id); +} + +#[test] +fn range_sync_retries_execution_proofs_without_block_retry() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer_1 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_2 = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry for execution proof failure"); + } + + let (_, retry_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(retry_peer, proof_peer); +} + +#[test] +fn backfill_retries_execution_proofs_without_block_retry() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm_backfill() else { + return; + }; + + let zkvm_peer_1 = rig.add_synced_zkvm_peer(); + let zkvm_peer_2 = rig.add_synced_zkvm_peer(); + let local_info = rig.local_info(); + let _supernode_peer = rig.add_supernode_peer(local_info); + + let backfill_epoch = Slot::new(E::slots_per_epoch()) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (block_meta, by_range_data) = + rig.find_blocks_by_range_request_with_meta(filter().epoch(backfill_epoch)); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(backfill_epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry for execution proof failure"); + } + + let (_, retry_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(backfill_epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(retry_peer, proof_peer); +} + +#[test] +fn range_sync_execution_proof_retries_exhaust_then_block_retry() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer_1 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_2 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_3 = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (mut proof_req_id, mut proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2 || proof_peer == zkvm_peer_3); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + rig.complete_by_range_data_requests(by_range_data); + + for attempt in 1..=MAX_EXECUTION_PROOF_RETRIES { + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if attempt < MAX_EXECUTION_PROOF_RETRIES { + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry before proof retries are exhausted"); + } + + let (next_req_id, next_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(next_peer, proof_peer); + proof_req_id = next_req_id; + proof_peer = next_peer; + } + } + + rig.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot, + .. + })), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .unwrap_or_else(|e| panic!("Expected BlocksByRange retry after exhausted proofs: {e}")); +} + +#[test] +fn range_sync_proof_retry_on_unsupported_protocol() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer_1 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_2 = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + rig.complete_by_range_data_requests(by_range_data); + + rig.send_sync_message(SyncMessage::RpcError { + peer_id: proof_peer, + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + error: RPCError::UnsupportedProtocol, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry on unsupported protocol"); + } + + let (_, retry_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(retry_peer, proof_peer); +} + +#[test] +fn range_sync_ignores_bad_proofs_when_cached() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert_eq!(proof_peer, zkvm_peer); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let min_proofs = rig + .harness + .chain + .spec + .zkvm_min_proofs_required() + .expect("zkvm enabled"); + + let proofs = (0..min_proofs) + .map(|i| { + ExecutionProof::new( + ExecutionProofId::new(u8::try_from(i).expect("proof id fits")).unwrap(), + block.slot(), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap() + }) + .collect::>(); + + rig.harness + .chain + .data_availability_checker + .put_verified_execution_proofs(block_root, proofs) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::ExecutionProofsByRange(ExecutionProofsByRangeRequest { + start_slot, + .. + }), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected execution proof retry when cache already satisfies requirement"); + } +} diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index a07cc838863..9cb6620816e 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -32,6 +32,8 @@ pub enum Error { BlobInfoConcurrentMutation, /// The store's `data_column_info` was mutated concurrently, the latest modification wasn't applied. DataColumnInfoConcurrentMutation, + /// The store's `execution_proof_info` was mutated concurrently, the latest modification wasn't applied. + ExecutionProofInfoConcurrentMutation, /// The block or state is unavailable due to weak subjectivity sync. HistoryUnavailable, /// State reconstruction cannot commence because not all historic blocks are known. @@ -92,6 +94,7 @@ pub enum Error { LoadSplit(Box), LoadBlobInfo(Box), LoadDataColumnInfo(Box), + LoadExecutionProofInfo(Box), LoadConfig(Box), LoadHotStateSummary(Hash256, Box), LoadHotStateSummaryForSplit(Box), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c4137191744..a05d915795f 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -9,12 +9,13 @@ use crate::metadata::{ ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, AnchorInfo, BLOB_INFO_KEY, BlobInfo, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, CompactionTimestamp, DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, DataColumnCustodyInfo, DataColumnInfo, - SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, + EXECUTION_PROOF_INFO_KEY, ExecutionProofInfo, SCHEMA_VERSION_KEY, SPLIT_KEY, + STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, - StoreOp, get_data_column_key, + StoreOp, get_data_column_key, get_execution_proof_key, metrics::{self, COLD_METRIC, HOT_METRIC}, parse_data_column_key, }; @@ -61,6 +62,8 @@ pub struct HotColdDB, Cold: ItemStore> { blob_info: RwLock, /// The starting slots for the range of data columns stored in the database. data_column_info: RwLock, + /// The starting slots for the range of execution proofs stored in the database. + execution_proof_info: RwLock, pub(crate) config: StoreConfig, pub hierarchy: HierarchyModuli, /// Cold database containing compact historical data. @@ -93,6 +96,7 @@ struct BlockCache { block_cache: LruCache>, blob_cache: LruCache>, data_column_cache: LruCache>>>, + execution_proof_cache: LruCache>>, data_column_custody_info_cache: Option, } @@ -102,6 +106,7 @@ impl BlockCache { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), data_column_cache: LruCache::new(size), + execution_proof_cache: LruCache::new(size), data_column_custody_info_cache: None, } } @@ -116,6 +121,9 @@ impl BlockCache { .get_or_insert_mut(block_root, Default::default) .insert(data_column.index, data_column); } + pub fn put_execution_proofs(&mut self, block_root: Hash256, proofs: Vec>) { + self.execution_proof_cache.put(block_root, proofs); + } pub fn put_data_column_custody_info( &mut self, data_column_custody_info: Option, @@ -139,6 +147,12 @@ impl BlockCache { .get(block_root) .and_then(|map| map.get(column_index).cloned()) } + pub fn get_execution_proofs( + &mut self, + block_root: &Hash256, + ) -> Option>> { + self.execution_proof_cache.get(block_root).cloned() + } pub fn get_data_column_custody_info(&self) -> Option { self.data_column_custody_info_cache.clone() } @@ -151,10 +165,14 @@ impl BlockCache { pub fn delete_data_columns(&mut self, block_root: &Hash256) { let _ = self.data_column_cache.pop(block_root); } + pub fn delete_execution_proofs(&mut self, block_root: &Hash256) { + let _ = self.execution_proof_cache.pop(block_root); + } pub fn delete(&mut self, block_root: &Hash256) { self.delete_block(block_root); self.delete_blobs(block_root); self.delete_data_columns(block_root); + self.delete_execution_proofs(block_root); } } @@ -232,6 +250,7 @@ impl HotColdDB, MemoryStore> { anchor_info: RwLock::new(ANCHOR_UNINITIALIZED), blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), + execution_proof_info: RwLock::new(ExecutionProofInfo::default()), cold_db: MemoryStore::open(), blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), @@ -286,6 +305,7 @@ impl HotColdDB, BeaconNodeBackend> { anchor_info, blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), + execution_proof_info: RwLock::new(ExecutionProofInfo::default()), blobs_db: BeaconNodeBackend::open(&config, blobs_db_path)?, cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, @@ -395,10 +415,38 @@ impl HotColdDB, BeaconNodeBackend> { new_data_column_info.clone(), )?; + // Initialize execution proof info + let execution_proof_info = db.load_execution_proof_info()?; + let zkvm_fork_slot = db + .spec + .zkvm_fork_epoch() + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let new_execution_proof_info = match &execution_proof_info { + Some(execution_proof_info) => { + // Set the oldest execution proof slot to the fork slot if it is not yet set. + let oldest_execution_proof_slot = execution_proof_info + .oldest_execution_proof_slot + .or(zkvm_fork_slot); + ExecutionProofInfo { + oldest_execution_proof_slot, + } + } + // First start. + None => ExecutionProofInfo { + // Set the oldest execution proof slot to the fork slot if it is not yet set. + oldest_execution_proof_slot: zkvm_fork_slot, + }, + }; + db.compare_and_set_execution_proof_info_with_write( + <_>::default(), + new_execution_proof_info.clone(), + )?; + info!( path = ?blobs_db_path, oldest_blob_slot = ?new_blob_info.oldest_blob_slot, oldest_data_column_slot = ?new_data_column_info.oldest_data_column_slot, + oldest_execution_proof_slot = ?new_execution_proof_info.oldest_execution_proof_slot, "Blob DB initialized" ); @@ -1027,6 +1075,47 @@ impl, Cold: ItemStore> HotColdDB } } + /// Store execution proofs for a block. + pub fn put_execution_proofs( + &self, + block_root: &Hash256, + proofs: &[ExecutionProof], + ) -> Result<(), Error> { + for proof in proofs { + self.blobs_db.put_bytes( + DBColumn::BeaconExecutionProof, + &get_execution_proof_key(block_root, proof.proof_id.as_u8()), + &proof.as_ssz_bytes(), + )?; + } + if !proofs.is_empty() { + let cached = proofs + .iter() + .map(|proof| Arc::new(proof.clone())) + .collect::>(); + self.block_cache + .as_ref() + .inspect(|cache| cache.lock().put_execution_proofs(*block_root, cached)); + } + Ok(()) + } + + /// Create key-value store operations for storing execution proofs. + pub fn execution_proofs_as_kv_store_ops( + &self, + block_root: &Hash256, + proofs: &[ExecutionProof], + ops: &mut Vec, + ) { + for proof in proofs { + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconExecutionProof, + get_execution_proof_key(block_root, proof.proof_id.as_u8()), + proof.as_ssz_bytes(), + )); + } + } + /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); @@ -2558,6 +2647,47 @@ impl, Cold: ItemStore> HotColdDB } } + /// Fetch all execution proofs for a given block from the store. + pub fn get_execution_proofs( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + if let Some(proofs) = self + .block_cache + .as_ref() + .and_then(|cache| cache.lock().get_execution_proofs(block_root)) + { + return Ok(proofs); + } + + let mut proofs = Vec::new(); + let prefix = block_root.as_slice(); + + for result in self + .blobs_db + .iter_column_from::>(DBColumn::BeaconExecutionProof, prefix) + { + let (key, value) = result?; + // Check if key starts with our block_root prefix + if !key.starts_with(prefix) { + // We've moved past this block's proofs + break; + } + let proof = Arc::new(ExecutionProof::from_ssz_bytes(&value)?); + proofs.push(proof); + } + + if !proofs.is_empty() { + self.block_cache.as_ref().inspect(|cache| { + cache + .lock() + .put_execution_proofs(*block_root, proofs.clone()) + }); + } + + Ok(proofs) + } + /// Fetch all keys in the data_column column with prefix `block_root` pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { self.blobs_db @@ -2877,6 +3007,77 @@ impl, Cold: ItemStore> HotColdDB data_column_info.as_kv_store_op(DATA_COLUMN_INFO_KEY) } + /// Get a clone of the store's execution proof info. + /// + /// To do mutations, use `compare_and_set_execution_proof_info`. + pub fn get_execution_proof_info(&self) -> ExecutionProofInfo { + self.execution_proof_info.read_recursive().clone() + } + + /// Initialize the `ExecutionProofInfo` when starting from genesis or a checkpoint. + pub fn init_execution_proof_info(&self, anchor_slot: Slot) -> Result { + let oldest_execution_proof_slot = self.spec.zkvm_fork_epoch().map(|fork_epoch| { + std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) + }); + let execution_proof_info = ExecutionProofInfo { + oldest_execution_proof_slot, + }; + self.compare_and_set_execution_proof_info( + self.get_execution_proof_info(), + execution_proof_info, + ) + } + + /// Atomically update the execution proof info from `prev_value` to `new_value`. + /// + /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other + /// values. + /// + /// Return an `ExecutionProofInfoConcurrentMutation` error if the `prev_value` provided + /// is not correct. + pub fn compare_and_set_execution_proof_info( + &self, + prev_value: ExecutionProofInfo, + new_value: ExecutionProofInfo, + ) -> Result { + let mut execution_proof_info = self.execution_proof_info.write(); + if *execution_proof_info == prev_value { + let kv_op = self.store_execution_proof_info_in_batch(&new_value); + *execution_proof_info = new_value; + Ok(kv_op) + } else { + Err(Error::ExecutionProofInfoConcurrentMutation) + } + } + + /// As for `compare_and_set_execution_proof_info`, but also writes to disk immediately. + pub fn compare_and_set_execution_proof_info_with_write( + &self, + prev_value: ExecutionProofInfo, + new_value: ExecutionProofInfo, + ) -> Result<(), Error> { + let kv_store_op = self.compare_and_set_execution_proof_info(prev_value, new_value)?; + self.hot_db.do_atomically(vec![kv_store_op]) + } + + /// Load the execution proof info from disk, but do not set `self.execution_proof_info`. + fn load_execution_proof_info(&self) -> Result, Error> { + self.hot_db + .get(&EXECUTION_PROOF_INFO_KEY) + .map_err(|e| Error::LoadExecutionProofInfo(e.into())) + } + + /// Store the given `execution_proof_info` to disk. + /// + /// The argument is intended to be `self.execution_proof_info`, but is passed manually to avoid + /// issues with recursive locking. + fn store_execution_proof_info_in_batch( + &self, + execution_proof_info: &ExecutionProofInfo, + ) -> KeyValueStoreOp { + execution_proof_info.as_kv_store_op(EXECUTION_PROOF_INFO_KEY) + } + /// Return the slot-window describing the available historic states. /// /// Returns `(lower_limit, upper_limit)`. @@ -3395,6 +3596,178 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Try to prune execution proofs older than the execution proof boundary. + /// + /// Proofs from the epoch `execution_proof_boundary` are retained. + /// This epoch is an _exclusive_ endpoint for the pruning process. + /// + /// This function only supports pruning execution proofs older than the split point, + /// which is older than (or equal to) finalization. + pub fn try_prune_execution_proofs( + &self, + force: bool, + execution_proof_boundary: Epoch, + ) -> Result<(), Error> { + // Check if zkvm fork is enabled + if self.spec.zkvm_fork_epoch().is_none() { + debug!("ZKVM fork is disabled"); + return Ok(()); + } + + let pruning_enabled = self.get_config().prune_blobs; // Use same config as blobs for now + if !force && !pruning_enabled { + debug!( + prune_blobs = pruning_enabled, + "Execution proof pruning is disabled" + ); + return Ok(()); + } + + let execution_proof_info = self.get_execution_proof_info(); + let Some(oldest_execution_proof_slot) = execution_proof_info.oldest_execution_proof_slot + else { + debug!("No execution proofs stored yet"); + return Ok(()); + }; + + let start_epoch = oldest_execution_proof_slot.epoch(E::slots_per_epoch()); + + // Prune execution proofs up until the `execution_proof_boundary - 1` or the split + // slot's epoch, whichever is older. + let split = self.get_split_info(); + let end_epoch = std::cmp::min( + execution_proof_boundary.saturating_sub(1u64), + split.slot.epoch(E::slots_per_epoch()).saturating_sub(1u64), + ); + let end_slot = end_epoch.end_slot(E::slots_per_epoch()); + + let can_prune = end_epoch != Epoch::new(0) && start_epoch <= end_epoch; + if !can_prune { + debug!( + %oldest_execution_proof_slot, + %execution_proof_boundary, + %split.slot, + %end_epoch, + %start_epoch, + "Execution proofs are pruned" + ); + return Ok(()); + } + + debug!( + %end_epoch, + %execution_proof_boundary, + "Pruning execution proofs" + ); + + // Iterate blocks backwards from the `end_epoch`. + let Some((end_block_root, _)) = self + .forwards_block_roots_iterator_until(end_slot, end_slot, || { + self.get_hot_state(&split.state_root, true)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + )) + .map(|state| (state, split.state_root)) + .map_err(Into::into) + })? + .next() + .transpose()? + else { + debug!( + %end_epoch, + %execution_proof_boundary, + "No execution proofs to prune" + ); + return Ok(()); + }; + + let mut db_ops = vec![]; + let mut removed_block_roots = vec![]; + let mut new_oldest_slot: Option = None; + + // Iterate blocks backwards until we reach blocks older than the boundary. + for tuple in ParentRootBlockIterator::new(self, end_block_root) { + let (block_root, blinded_block) = tuple?; + let slot = blinded_block.slot(); + + // Get all execution proof keys for this block + let keys = self.get_all_execution_proof_keys(&block_root); + + // Check if any proofs exist for this block + let mut block_has_proofs = false; + for key in keys { + if self + .blobs_db + .key_exists(DBColumn::BeaconExecutionProof, &key)? + { + block_has_proofs = true; + db_ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconExecutionProof, + key, + )); + } + } + + if block_has_proofs { + debug!( + ?block_root, + %slot, + "Pruning execution proofs for block" + ); + removed_block_roots.push(block_root); + new_oldest_slot = Some(slot); + } + // Continue iterating even if this block has no proofs - proofs may be sparse + } + + // Commit deletions + if !db_ops.is_empty() { + debug!( + num_deleted = db_ops.len(), + "Deleting execution proofs from disk" + ); + self.blobs_db.do_atomically(db_ops)?; + } + + // TODO(zkproofs): Fix this to make it more readable + if !removed_block_roots.is_empty() + && let Some(mut block_cache) = self.block_cache.as_ref().map(|cache| cache.lock()) + { + for block_root in removed_block_roots { + block_cache.delete_execution_proofs(&block_root); + } + } + + // Update the execution proof info with the new oldest slot + if let Some(new_slot) = new_oldest_slot { + let new_oldest = end_slot + 1; + self.compare_and_set_execution_proof_info_with_write( + execution_proof_info.clone(), + ExecutionProofInfo { + oldest_execution_proof_slot: Some(new_oldest), + }, + )?; + debug!( + old_oldest = %new_slot, + new_oldest = %new_oldest, + "Updated execution proof info" + ); + } + + debug!("Execution proof pruning complete"); + + Ok(()) + } + + /// Get all possible execution proof keys for a given block root. + /// Returns keys for proof_ids 0 to MAX_PROOFS-1. + fn get_all_execution_proof_keys(&self, block_root: &Hash256) -> Vec> { + (0..types::MAX_PROOFS as u8) + .map(|proof_id| get_execution_proof_key(block_root, proof_id)) + .collect() + } + /// Delete *all* states from the freezer database and update the anchor accordingly. /// /// WARNING: this method deletes the genesis state and replaces it with the provided diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ae5b2e1e571..516e858e581 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -40,6 +40,7 @@ use strum::{EnumIter, EnumString, IntoStaticStr}; pub use types::*; const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; +const EXECUTION_PROOF_DB_KEY_SIZE: usize = 32 + 1; // block_root + proof_id pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a, K> = Box> + 'a>; @@ -171,6 +172,25 @@ pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Er Ok((block_root, column_index)) } +pub fn get_execution_proof_key(block_root: &Hash256, proof_id: u8) -> Vec { + let mut result = block_root.as_slice().to_vec(); + result.push(proof_id); + result +} + +pub fn parse_execution_proof_key(data: Vec) -> Result<(Hash256, u8), Error> { + if data.len() != EXECUTION_PROOF_DB_KEY_SIZE { + return Err(Error::InvalidKey(format!( + "Unexpected BeaconExecutionProof key len {}", + data.len() + ))); + } + let (block_root_bytes, proof_id_bytes) = data.split_at(32); + let block_root = Hash256::from_slice(block_root_bytes); + let proof_id = proof_id_bytes[0]; + Ok((block_root, proof_id)) +} + #[must_use] #[derive(Clone)] pub enum KeyValueStoreOp { @@ -263,6 +283,12 @@ pub enum DBColumn { BeaconDataColumn, #[strum(serialize = "bdi")] BeaconDataColumnCustodyInfo, + /// For storing execution proofs (zkVM proofs) in the blob database. + /// + /// - Key: `Hash256` block root + `u8` proof_id (33 bytes total). + /// - Value: SSZ-encoded ExecutionProof. + #[strum(serialize = "bep")] + BeaconExecutionProof, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). /// /// DEPRECATED. @@ -437,6 +463,7 @@ impl DBColumn { | Self::LightClientUpdate | Self::Dummy => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, + Self::BeaconExecutionProof => EXECUTION_PROOF_DB_KEY_SIZE, } } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index cf494684515..7a5979481fe 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -19,6 +19,7 @@ pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); pub const DATA_COLUMN_CUSTODY_INFO_KEY: Hash256 = Hash256::repeat_byte(8); +pub const EXECUTION_PROOF_INFO_KEY: Hash256 = Hash256::repeat_byte(9); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -255,3 +256,30 @@ impl StoreItem for DataColumnInfo { Ok(Self::from_ssz_bytes(bytes)?) } } + +/// Database parameters relevant to execution proof sync. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct ExecutionProofInfo { + /// The slot after which execution proofs are or *will be* available (>=). + /// + /// If this slot is in the future, then it is the first slot of the ZKVM fork, from which + /// execution proofs will be available. + /// + /// If the `oldest_execution_proof_slot` is `None` then this means that the ZKVM fork epoch + /// is not yet known. + pub oldest_execution_proof_slot: Option, +} + +impl StoreItem for ExecutionProofInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 1bab464b689..21eb5e1b8c7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -61,7 +61,7 @@ pub mod chain_spec { } // Re-export execution_proof types for backwards compatibility -pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES}; +pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES, MAX_PROOFS}; pub use crate::execution_proof_id::{EXECUTION_PROOF_TYPE_COUNT, ExecutionProofId}; pub mod beacon_block { diff --git a/dummy_el/geth-wrapper.sh b/dummy_el/geth-wrapper.sh index 8112bb44e9c..5705888b49c 100644 --- a/dummy_el/geth-wrapper.sh +++ b/dummy_el/geth-wrapper.sh @@ -2,28 +2,106 @@ set -e # This is a wrapper that pretends to be geth but actually runs dummy_el -# Kurtosis calls: geth init ... && geth --authrpc.port=8551 ... -# We ignore the init, and when we see the actual geth command with authrpc.port, we start dummy_el +# Kurtosis may call various geth commands - we handle them all appropriately echo "[dummy_el geth-wrapper] Called with: $@" -# Check if this is the "geth init" command and ignore it +# Check if this is the "geth init" command - ignore it if echo "$@" | grep -q "init"; then echo "[dummy_el geth-wrapper] Ignoring 'geth init' command" exit 0 fi -# If we're here, it's the actual geth run command -# Kurtosis mounts JWT secret at /jwt/jwtsecret -JWT_PATH="/jwt/jwtsecret" +# Check for version/help commands +if echo "$@" | grep -qE "^(version|--version|-v|help|--help|-h)$"; then + echo "Dummy-EL/v0.1.0 (geth-compatible wrapper)" + exit 0 +fi + +# Filter out flags that we don't need for dummy_el +# These are geth-specific flags that kurtosis may pass +FILTERED_ARGS="" +for arg in "$@"; do + case "$arg" in + --override.*|--override*|-override.*|-override*) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --datadir=*|--datadir) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --syncmode=*|--syncmode) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --gcmode=*|--gcmode) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --networkid=*|--networkid) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + *) + FILTERED_ARGS="$FILTERED_ARGS $arg" + ;; + esac +done + +# For any other command, we start dummy_el +# Parse geth arguments to extract what we need + +JWT_PATH="" +ENGINE_PORT="8551" +RPC_PORT="8545" +WS_PORT="8546" +METRICS_PORT="9001" +P2P_PORT="30303" +HOST="0.0.0.0" + +# Parse arguments to find JWT secret and ports +for arg in "$@"; do + case "$arg" in + --authrpc.jwtsecret=*) + JWT_PATH="${arg#*=}" + ;; + --authrpc.port=*) + ENGINE_PORT="${arg#*=}" + ;; + --http.port=*) + RPC_PORT="${arg#*=}" + ;; + --ws.port=*) + WS_PORT="${arg#*=}" + ;; + --metrics.port=*) + METRICS_PORT="${arg#*=}" + ;; + --port=*) + P2P_PORT="${arg#*=}" + ;; + --discovery.port=*) + # Use discovery port for P2P if specified + P2P_PORT="${arg#*=}" + ;; + esac +done + +# Fallback to default JWT location if not parsed +if [ -z "$JWT_PATH" ] && [ -f "/jwt/jwtsecret" ]; then + JWT_PATH="/jwt/jwtsecret" +fi echo "[dummy_el geth-wrapper] Starting dummy_el instead of geth" +echo "[dummy_el geth-wrapper] Engine port: $ENGINE_PORT, RPC port: $RPC_PORT, WS port: $WS_PORT" +echo "[dummy_el geth-wrapper] Metrics port: $METRICS_PORT, P2P port: $P2P_PORT" -# Run dummy_el with JWT if available, otherwise without -if [ -f "$JWT_PATH" ]; then +# Build dummy_el command +DUMMY_EL_CMD="/usr/local/bin/dummy_el --host $HOST --port $ENGINE_PORT --rpc-port $RPC_PORT --ws-port $WS_PORT --metrics-port $METRICS_PORT --p2p-port $P2P_PORT" + +# Add JWT if available +if [ -n "$JWT_PATH" ] && [ -f "$JWT_PATH" ]; then echo "[dummy_el geth-wrapper] Using JWT from $JWT_PATH" - exec /usr/local/bin/dummy_el --host 0.0.0.0 --port 8551 --jwt-secret "$JWT_PATH" + DUMMY_EL_CMD="$DUMMY_EL_CMD --jwt-secret $JWT_PATH" else - echo "[dummy_el geth-wrapper] WARNING: No JWT file found at $JWT_PATH" - exec /usr/local/bin/dummy_el --host 0.0.0.0 --port 8551 + echo "[dummy_el geth-wrapper] WARNING: No JWT file found" fi + +echo "[dummy_el geth-wrapper] Executing: $DUMMY_EL_CMD" +exec $DUMMY_EL_CMD diff --git a/execution-witness-sentry/Cargo.toml b/execution-witness-sentry/Cargo.toml new file mode 100644 index 00000000000..ceee0f20bed --- /dev/null +++ b/execution-witness-sentry/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "execution-witness-sentry" +version = "0.1.0" +edition = { workspace = true } +description = "Monitors execution layer nodes and fetches execution witnesses" + +[dependencies] +alloy-provider = { version = "1", features = ["ws"] } +alloy-rpc-types-eth = "1" +anyhow = "1" +clap = { version = "4", features = ["derive"] } +discv5 = { workspace = true } +eventsource-client = "0.13" +flate2 = "1.1" +futures = { workspace = true } +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +thiserror = "1" +tokio = { workspace = true, features = ["sync", "rt-multi-thread", "macros"] } +toml = "0.8" +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +url = { workspace = true } diff --git a/execution-witness-sentry/config.toml b/execution-witness-sentry/config.toml new file mode 100644 index 00000000000..b35c2ca262e --- /dev/null +++ b/execution-witness-sentry/config.toml @@ -0,0 +1,27 @@ +output_dir = "." +chain = "local" +retain = 10 +num_proofs = 2 + +[[endpoints]] +name = "el-1-reth-lighthouse" +el_url = "http://127.0.0.1:32003" +el_ws_url = "ws://127.0.0.1:32004" + +# Non-zkvm CL for head event subscription (to know when new blocks arrive) +[[cl_endpoints]] +name = "cl-1-lighthouse-reth" +url = "http://127.0.0.1:33001/" + +# zkvm-enabled CLs for proof submission +[[cl_endpoints]] +name = "cl-4-lighthouse-geth" +url = "http://127.0.0.1:33022/" + +[[cl_endpoints]] +name = "cl-5-lighthouse-geth" +url = "http://127.0.0.1:33029/" + +[[cl_endpoints]] +name = "cl-6-lighthouse-geth" +url = "http://127.0.0.1:33036/" diff --git a/execution-witness-sentry/src/cl_subscription.rs b/execution-witness-sentry/src/cl_subscription.rs new file mode 100644 index 00000000000..040e01cc438 --- /dev/null +++ b/execution-witness-sentry/src/cl_subscription.rs @@ -0,0 +1,128 @@ +//! SSE subscription for CL head events. + +use std::pin::Pin; +use std::task::{Context, Poll}; + +use eventsource_client::{Client, SSE}; +use futures::Stream; +use serde::Deserialize; +use url::Url; + +use crate::error::{Error, Result}; + +/// Head event from the CL. +#[derive(Debug, Clone, Deserialize)] +pub struct HeadEvent { + pub slot: String, + pub block: String, + pub state: String, + pub epoch_transition: bool, + pub execution_optimistic: bool, +} + +/// Block event from the CL. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockEvent { + pub slot: String, + pub block: String, + pub execution_optimistic: bool, +} + +/// Unified CL event. +#[derive(Debug, Clone)] +pub enum ClEvent { + Head(HeadEvent), + Block(BlockEvent), +} + +impl ClEvent { + pub fn slot(&self) -> &str { + match self { + ClEvent::Head(e) => &e.slot, + ClEvent::Block(e) => &e.slot, + } + } + + pub fn block_root(&self) -> &str { + match self { + ClEvent::Head(e) => &e.block, + ClEvent::Block(e) => &e.block, + } + } +} + +/// Stream of CL events. +pub struct ClEventStream { + client: Pin> + Send>>, +} + +impl Stream for ClEventStream { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match self.client.as_mut().poll_next(cx) { + Poll::Ready(Some(Ok(SSE::Event(event)))) => { + let result = match event.event_type.as_str() { + "head" => serde_json::from_str::(&event.data) + .map(ClEvent::Head) + .map_err(Error::Parse), + "block" => serde_json::from_str::(&event.data) + .map(ClEvent::Block) + .map_err(Error::Parse), + _ => continue, + }; + return Poll::Ready(Some(result)); + } + Poll::Ready(Some(Ok(SSE::Comment(_)))) => continue, + Poll::Ready(Some(Ok(SSE::Connected(_)))) => continue, + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(Err(Error::Sse(format!("{:?}", e))))); + } + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } +} + +/// Subscribe to CL head events via SSE. +pub fn subscribe_cl_events(base_url: &str) -> Result { + let url = build_events_url(base_url)?; + + let client = eventsource_client::ClientBuilder::for_url(url.as_str()) + .map_err(|e| Error::Config(format!("Invalid SSE URL: {}", e)))? + .build(); + + Ok(ClEventStream { + client: Box::pin(client.stream()), + }) +} + +fn build_events_url(base_url: &str) -> Result { + let base = Url::parse(base_url)?; + Ok(base.join("/eth/v1/events?topics=head,block")?) +} + +#[cfg(test)] +mod tests { + use super::build_events_url; + + #[test] + fn build_events_url_adds_path_without_trailing_slash() { + let url = build_events_url("http://localhost:5052").unwrap(); + assert_eq!( + url.as_str(), + "http://localhost:5052/eth/v1/events?topics=head,block" + ); + } + + #[test] + fn build_events_url_adds_path_with_trailing_slash() { + let url = build_events_url("http://localhost:5052/").unwrap(); + assert_eq!( + url.as_str(), + "http://localhost:5052/eth/v1/events?topics=head,block" + ); + } +} diff --git a/execution-witness-sentry/src/config.rs b/execution-witness-sentry/src/config.rs new file mode 100644 index 00000000000..50d64969e72 --- /dev/null +++ b/execution-witness-sentry/src/config.rs @@ -0,0 +1,58 @@ +//! Configuration types for the execution witness sentry. + +use std::path::Path; + +use serde::{Deserialize, Serialize}; + +use crate::error::{Error, Result}; + +/// Sentry configuration. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct Config { + /// Execution layer endpoints to monitor. + pub endpoints: Vec, + /// Consensus layer endpoints to submit proofs to. + pub cl_endpoints: Option>, + /// Directory to save block and witness data. + pub output_dir: Option, + /// Chain identifier (used in output path). + pub chain: Option, + /// Number of recent blocks to retain (older blocks are deleted). + pub retain: Option, + /// Number of proofs to submit per block. + pub num_proofs: Option, +} + +/// Execution layer endpoint configuration. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct Endpoint { + /// Human-readable name for this endpoint. + pub name: String, + /// HTTP JSON-RPC URL. + pub el_url: String, + /// WebSocket URL for subscriptions. + pub el_ws_url: String, +} + +/// Consensus layer endpoint configuration. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct ClEndpoint { + /// Human-readable name for this endpoint. + pub name: String, + /// HTTP API URL. + pub url: String, +} + +impl Config { + /// Load configuration from a TOML file. + pub fn load(path: impl AsRef) -> Result { + let content = std::fs::read_to_string(path.as_ref()).map_err(|e| { + Error::Config(format!( + "failed to read config file '{}': {}", + path.as_ref().display(), + e + )) + })?; + Ok(toml::from_str(&content)?) + } +} diff --git a/execution-witness-sentry/src/error.rs b/execution-witness-sentry/src/error.rs new file mode 100644 index 00000000000..91f0a7b72e1 --- /dev/null +++ b/execution-witness-sentry/src/error.rs @@ -0,0 +1,53 @@ +//! Error types for the execution witness sentry. + +use std::io; + +use thiserror::Error; + +/// Errors that can occur in the execution witness sentry. +#[derive(Debug, Error)] +pub enum Error { + /// Failed to load or parse configuration. + #[error("config error: {0}")] + Config(String), + + /// HTTP request failed. + #[error("HTTP error: {0}")] + Http(#[from] reqwest::Error), + + /// JSON-RPC error returned by the node. + #[error("RPC error {code}: {message}")] + Rpc { + /// Error code. + code: i64, + /// Error message. + message: String, + }, + + /// Failed to parse response. + #[error("parse error: {0}")] + Parse(#[from] serde_json::Error), + + /// WebSocket connection or subscription failed. + #[error("WebSocket error: {0}")] + WebSocket(String), + + /// URL parsing failed. + #[error("invalid URL: {0}")] + InvalidUrl(#[from] url::ParseError), + + /// I/O error (file operations, compression). + #[error("I/O error: {0}")] + Io(#[from] io::Error), + + /// TOML parsing error. + #[error("TOML parse error: {0}")] + Toml(#[from] toml::de::Error), + + /// SSE connection error. + #[error("SSE error: {0}")] + Sse(String), +} + +/// Result type alias using our Error type. +pub type Result = std::result::Result; diff --git a/execution-witness-sentry/src/lib.rs b/execution-witness-sentry/src/lib.rs new file mode 100644 index 00000000000..fcb9b077cc8 --- /dev/null +++ b/execution-witness-sentry/src/lib.rs @@ -0,0 +1,45 @@ +//! Execution witness sentry - monitors execution layer nodes for new blocks +//! and fetches their execution witnesses. +//! +//! This crate provides functionality to: +//! - Subscribe to new block headers via WebSocket +//! - Fetch blocks and execution witnesses via JSON-RPC +//! - Store block data and witnesses to disk +//! - Submit execution proofs to consensus layer nodes +//! +//! ## Example +//! +//! ```ignore +//! use execution_witness_sentry::{Config, ElClient, subscribe_blocks}; +//! +//! let config = Config::load("config.toml")?; +//! let client = ElClient::new(url); +//! +//! // Subscribe to new blocks +//! let mut stream = subscribe_blocks(&ws_url).await?; +//! +//! while let Some(header) = stream.next().await { +//! let witness = client.get_execution_witness(header.number).await?; +//! // Process witness... +//! } +//! ``` + +pub mod cl_subscription; +pub mod config; +pub mod error; +pub mod rpc; +pub mod storage; +pub mod subscription; + +// Re-export main types at crate root for convenience. +pub use cl_subscription::{BlockEvent, ClEvent, ClEventStream, HeadEvent, subscribe_cl_events}; +pub use config::{ClEndpoint, Config, Endpoint}; +pub use error::{Error, Result}; +pub use rpc::{BlockInfo, ClClient, ElClient, ExecutionProof, generate_random_proof}; +pub use storage::{ + BlockMetadata, BlockStorage, SavedProof, compress_gzip, decompress_gzip, load_block_data, +}; +pub use subscription::subscribe_blocks; + +// Re-export alloy types that appear in our public API. +pub use alloy_rpc_types_eth::{Block, Header}; diff --git a/execution-witness-sentry/src/main.rs b/execution-witness-sentry/src/main.rs new file mode 100644 index 00000000000..8170bcda024 --- /dev/null +++ b/execution-witness-sentry/src/main.rs @@ -0,0 +1,731 @@ +//! Execution witness sentry CLI. +//! +//! Monitors execution layer nodes for new blocks and fetches their execution witnesses. +//! Subscribes to CL head events to correlate EL blocks with beacon slots. + +use std::collections::HashMap; +use std::path::PathBuf; +use std::pin::pin; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use clap::Parser; +use futures::StreamExt; +use tokio::sync::Mutex; +use tracing::{debug, error, info, warn}; +use url::Url; + +use execution_witness_sentry::{ + BlockStorage, ClClient, ClEvent, Config, ElClient, ExecutionProof, SavedProof, + generate_random_proof, subscribe_blocks, subscribe_cl_events, +}; + +/// Execution witness sentry - monitors EL nodes and fetches witnesses. +#[derive(Parser, Debug)] +#[command(name = "execution-witness-sentry")] +#[command(about = "Monitor execution layer nodes and fetch execution witnesses")] +struct Cli { + /// Path to configuration file. + #[arg(long, short, default_value = "config.toml")] + config: PathBuf, +} + +/// Cached EL block data waiting for CL correlation. +struct CachedElBlock { + block_number: u64, + timestamp: Instant, +} + +/// Cache for EL blocks keyed by block_hash. +struct ElBlockCache { + blocks: HashMap, + max_age: Duration, +} + +impl ElBlockCache { + fn new(max_age: Duration) -> Self { + Self { + blocks: HashMap::new(), + max_age, + } + } + + fn insert(&mut self, block_hash: String, block_number: u64, _endpoint_name: String) { + self.blocks.insert( + block_hash, + CachedElBlock { + block_number, + timestamp: Instant::now(), + }, + ); + self.cleanup(); + } + + fn get(&self, block_hash: &str) -> Option<&CachedElBlock> { + self.blocks.get(block_hash) + } + + fn remove(&mut self, block_hash: &str) -> Option { + self.blocks.remove(block_hash) + } + + fn cleanup(&mut self) { + let now = Instant::now(); + self.blocks + .retain(|_, v| now.duration_since(v.timestamp) < self.max_age); + } +} + +/// EL event for the channel. +struct ElBlockEvent { + endpoint_name: String, + block_number: u64, + block_hash: String, +} + +/// CL event for the channel. +struct ClBlockEvent { + cl_name: String, + slot: u64, + block_root: String, + execution_block_hash: String, +} + +/// Status of a zkvm CL node. +#[derive(Debug, Clone)] +struct ZkvmClStatus { + name: String, + head_slot: u64, + gap: i64, // Negative means behind source CL +} + +/// Monitor zkvm CL nodes and report their sync status. +async fn monitor_zkvm_status( + source_client: &ClClient, + zkvm_clients: &[(String, ClClient)], +) -> Vec { + let source_head = match source_client.get_head_slot().await { + Ok(slot) => slot, + Err(e) => { + warn!(error = %e, "Failed to get source CL head"); + return vec![]; + } + }; + + let mut statuses = Vec::new(); + for (name, client) in zkvm_clients { + match client.get_head_slot().await { + Ok(head_slot) => { + let gap = head_slot as i64 - source_head as i64; + statuses.push(ZkvmClStatus { + name: name.clone(), + head_slot, + gap, + }); + } + Err(e) => { + warn!(name = %name, error = %e, "Failed to get zkvm CL head"); + } + } + } + + statuses +} + +/// Backfill proofs for a zkvm CL that is behind. +/// First tries to use saved proofs from disk, falls back to generating new ones. +/// Returns the number of proofs submitted. +async fn backfill_proofs( + source_client: &ClClient, + zkvm_client: &ClClient, + zkvm_name: &str, + num_proofs: usize, + max_slots: u64, + storage: Option<&BlockStorage>, +) -> usize { + // Get the zkvm CL's current head + let zkvm_head = match zkvm_client.get_head_slot().await { + Ok(slot) => slot, + Err(e) => { + warn!(name = %zkvm_name, error = %e, "Failed to get zkvm CL head for backfill"); + return 0; + } + }; + + // Get source CL head + let source_head = match source_client.get_head_slot().await { + Ok(slot) => slot, + Err(e) => { + warn!(error = %e, "Failed to get source CL head for backfill"); + return 0; + } + }; + + if zkvm_head >= source_head { + return 0; // Already caught up + } + + let gap = source_head - zkvm_head; + let slots_to_check = gap.min(max_slots); + + info!( + name = %zkvm_name, + zkvm_head = zkvm_head, + source_head = source_head, + gap = gap, + checking = slots_to_check, + "Backfilling proofs" + ); + + let mut proofs_submitted = 0; + + // Iterate through slots from zkvm_head + 1 to zkvm_head + slots_to_check + for slot in (zkvm_head + 1)..=(zkvm_head + slots_to_check) { + // First try to load saved proofs from disk + if let Some(storage) = storage + && let Ok(Some((_metadata, saved_proofs))) = storage.load_proofs_by_slot(slot) + && !saved_proofs.is_empty() + { + debug!( + slot = slot, + num_proofs = saved_proofs.len(), + "Using saved proofs from disk" + ); + + for saved_proof in &saved_proofs { + let proof = ExecutionProof { + proof_id: saved_proof.proof_id, + slot: saved_proof.slot, + block_hash: saved_proof.block_hash.clone(), + block_root: saved_proof.block_root.clone(), + proof_data: saved_proof.proof_data.clone(), + }; + + match zkvm_client.submit_execution_proof(&proof).await { + Ok(()) => { + debug!( + name = %zkvm_name, + slot = slot, + proof_id = saved_proof.proof_id, + "Backfill proof submitted (from disk)" + ); + proofs_submitted += 1; + } + Err(e) => { + let msg = e.to_string(); + if !msg.contains("already known") { + debug!( + name = %zkvm_name, + slot = slot, + proof_id = saved_proof.proof_id, + error = %e, + "Backfill proof failed" + ); + } + } + } + } + continue; // Move to next slot + } + + // No saved proofs, fetch block info and generate new proofs + let block_info = match source_client.get_block_info(slot).await { + Ok(Some(info)) => info, + Ok(None) => { + debug!(slot = slot, "Empty slot, skipping"); + continue; + } + Err(e) => { + debug!(slot = slot, error = %e, "Failed to get block info"); + continue; + } + }; + + // Only submit proofs for blocks with execution payloads + let Some(exec_hash) = block_info.execution_block_hash else { + debug!(slot = slot, "No execution payload, skipping"); + continue; + }; + + // Generate and submit proofs + for proof_id in 0..num_proofs { + let proof = ExecutionProof { + proof_id: proof_id as u8, + slot, + block_hash: exec_hash.clone(), + block_root: block_info.block_root.clone(), + proof_data: generate_random_proof(proof_id as u32), + }; + + match zkvm_client.submit_execution_proof(&proof).await { + Ok(()) => { + debug!( + name = %zkvm_name, + slot = slot, + proof_id = proof_id, + "Backfill proof submitted (generated)" + ); + proofs_submitted += 1; + } + Err(e) => { + let msg = e.to_string(); + if !msg.contains("already known") { + debug!( + name = %zkvm_name, + slot = slot, + proof_id = proof_id, + error = %e, + "Backfill proof failed" + ); + } + } + } + } + } + + if proofs_submitted > 0 { + info!( + name = %zkvm_name, + proofs_submitted = proofs_submitted, + "Backfill complete" + ); + } + + proofs_submitted +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::from_default_env() + .add_directive("execution_witness_sentry=info".parse()?), + ) + .init(); + + let cli = Cli::parse(); + let config = Config::load(&cli.config)?; + + info!(endpoints = config.endpoints.len(), "Loaded configuration"); + for endpoint in &config.endpoints { + info!( + name = %endpoint.name, + el_url = %endpoint.el_url, + el_ws_url = %endpoint.el_ws_url, + "EL endpoint configured" + ); + } + + // Set up CL clients - separate zkvm targets from event sources + let mut zkvm_clients: Vec<(String, ClClient)> = Vec::new(); // zkvm-enabled nodes for proof submission + let mut event_source_client: Option<(String, String, ClClient)> = None; // First available CL for events + + if let Some(endpoints) = config.cl_endpoints.as_ref() { + for endpoint in endpoints { + let url = match Url::parse(&endpoint.url) { + Ok(u) => u, + Err(e) => { + warn!(name = %endpoint.name, error = %e, "Invalid CL endpoint URL"); + continue; + } + }; + let client = ClClient::new(url); + + match client.is_zkvm_enabled().await { + Ok(true) => { + info!(name = %endpoint.name, "CL endpoint has zkvm enabled (proof target)"); + zkvm_clients.push((endpoint.name.clone(), client)); + } + Ok(false) => { + info!(name = %endpoint.name, "CL endpoint does not have zkvm enabled"); + // Use first non-zkvm CL as event source + if event_source_client.is_none() { + info!(name = %endpoint.name, "Using as event source"); + event_source_client = + Some((endpoint.name.clone(), endpoint.url.clone(), client)); + } + } + Err(e) => { + warn!(name = %endpoint.name, error = %e, "Failed to check zkvm status"); + } + } + } + } + + info!( + zkvm_targets = zkvm_clients.len(), + "zkvm-enabled CL endpoints configured" + ); + + let Some(event_source) = event_source_client else { + error!("No non-zkvm CL endpoint available for event source"); + return Ok(()); + }; + info!(name = %event_source.0, "CL event source configured"); + + let num_proofs = config.num_proofs.unwrap_or(2) as usize; + + // Set up block storage + let storage = config.output_dir.as_ref().map(|dir| { + BlockStorage::new( + dir, + config.chain.as_deref().unwrap_or("unknown"), + config.retain, + ) + }); + + // Cache for EL blocks (keyed by block_hash) + let el_cache = Arc::new(Mutex::new(ElBlockCache::new(Duration::from_secs(60)))); + + // Channels for events + let (el_tx, mut el_rx) = tokio::sync::mpsc::channel::(100); + let (cl_tx, mut cl_rx) = tokio::sync::mpsc::channel::(100); + + // Spawn EL subscription tasks + for endpoint in config.endpoints.clone() { + let tx = el_tx.clone(); + let name = endpoint.name.clone(); + let ws_url = endpoint.el_ws_url.clone(); + + tokio::spawn(async move { + info!(name = %name, "Connecting to EL WebSocket"); + + let stream = match subscribe_blocks(&ws_url).await { + Ok(s) => s, + Err(e) => { + error!(name = %name, error = %e, "Failed to subscribe to EL"); + return; + } + }; + + info!(name = %name, "Subscribed to EL newHeads"); + let mut stream = pin!(stream); + + while let Some(result) = stream.next().await { + match result { + Ok(header) => { + let event = ElBlockEvent { + endpoint_name: name.clone(), + block_number: header.number, + block_hash: format!("{:?}", header.hash), + }; + if tx.send(event).await.is_err() { + break; + } + } + Err(e) => { + error!(name = %name, error = %e, "EL stream error"); + } + } + } + warn!(name = %name, "EL WebSocket stream ended"); + }); + } + + let (es_name, es_url, es_client) = event_source; + let source_client_for_monitor = es_client.clone(); + + // Spawn CL subscription task for the event source (non-zkvm CL) + { + let tx = cl_tx.clone(); + + tokio::spawn(async move { + info!(name = %es_name, "Connecting to CL SSE"); + + let stream = match subscribe_cl_events(&es_url) { + Ok(s) => s, + Err(e) => { + error!(name = %es_name, error = %e, "Failed to subscribe to CL events"); + return; + } + }; + + info!(name = %es_name, "Subscribed to CL head events"); + let mut stream = pin!(stream); + + while let Some(result) = stream.next().await { + match result { + Ok(ClEvent::Head(head)) => { + let slot: u64 = match head.slot.parse() { + Ok(slot) => slot, + Err(e) => { + warn!( + name = %es_name, + error = %e, + slot = %head.slot, + "Invalid head slot value" + ); + continue; + } + }; + let block_root = head.block.clone(); + + // Fetch the execution block hash for this beacon block + let exec_hash = match es_client.get_block_execution_hash(&block_root).await + { + Ok(Some(hash)) => hash, + Ok(None) => { + debug!(name = %es_name, slot = slot, "No execution hash for block"); + continue; + } + Err(e) => { + debug!(name = %es_name, error = %e, "Failed to get execution hash"); + continue; + } + }; + + let event = ClBlockEvent { + cl_name: es_name.clone(), + slot, + block_root, + execution_block_hash: exec_hash, + }; + if tx.send(event).await.is_err() { + break; + } + } + Ok(ClEvent::Block(_)) => { + // We use head events primarily + } + Err(e) => { + error!(name = %es_name, error = %e, "CL stream error"); + } + } + } + warn!(name = %es_name, "CL SSE stream ended"); + }); + } + + drop(el_tx); + drop(cl_tx); + + // Create a timer for periodic monitoring and backfill (500ms for fast catch-up) + let mut monitor_interval = tokio::time::interval(Duration::from_millis(500)); + monitor_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + info!("Waiting for events (with monitoring every 500ms)"); + + // Process events from both EL and CL + loop { + tokio::select! { + // Periodic monitoring and backfill + _ = monitor_interval.tick() => { + // Monitor zkvm CL status + let statuses = monitor_zkvm_status(&source_client_for_monitor, &zkvm_clients).await; + + for status in &statuses { + if status.gap < -5 { + // More than 5 slots behind - log warning and backfill + warn!( + name = %status.name, + head_slot = status.head_slot, + gap = status.gap, + "zkvm CL is behind, starting backfill" + ); + + // Find the client and backfill + if let Some((_, client)) = zkvm_clients.iter().find(|(n, _)| n == &status.name) { + backfill_proofs( + &source_client_for_monitor, + client, + &status.name, + num_proofs, + 20, // Max 20 slots per backfill cycle + storage.as_ref(), + ).await; + } + } else if status.gap < 0 { + // Slightly behind - just log + debug!( + name = %status.name, + head_slot = status.head_slot, + gap = status.gap, + "zkvm CL slightly behind" + ); + } else { + // In sync or ahead + debug!( + name = %status.name, + head_slot = status.head_slot, + gap = status.gap, + "zkvm CL in sync" + ); + } + } + } + + Some(el_event) = el_rx.recv() => { + info!( + name = %el_event.endpoint_name, + number = el_event.block_number, + hash = %el_event.block_hash, + "EL block received" + ); + + // Find the endpoint and fetch block + witness + let Some(endpoint) = config.endpoints.iter().find(|e| e.name == el_event.endpoint_name) else { + continue; + }; + + let Ok(el_url) = Url::parse(&endpoint.el_url) else { + continue; + }; + let el_client = ElClient::new(el_url); + + // Fetch block and witness + let (block, gzipped_block) = match el_client.get_block_by_hash(&el_event.block_hash).await { + Ok(Some(data)) => data, + Ok(None) => { + warn!(number = el_event.block_number, "Block not found"); + continue; + } + Err(e) => { + error!(number = el_event.block_number, error = %e, "Failed to fetch block"); + continue; + } + }; + + let (witness, gzipped_witness) = match el_client.get_execution_witness(el_event.block_number).await { + Ok(Some(data)) => data, + Ok(None) => { + warn!(number = el_event.block_number, "Witness not found"); + continue; + } + Err(e) => { + error!(number = el_event.block_number, error = %e, "Failed to fetch witness"); + continue; + } + }; + + info!( + number = el_event.block_number, + block_gzipped = gzipped_block.len(), + witness_gzipped = gzipped_witness.len(), + "Fetched block and witness" + ); + + // Save to disk if storage is configured + if let Some(ref storage) = storage { + let combined = serde_json::json!({ + "block": block, + "witness": witness, + }); + let combined_bytes = serde_json::to_vec(&combined)?; + let gzipped_combined = execution_witness_sentry::compress_gzip(&combined_bytes)?; + + if let Err(e) = storage.save_block(&block, &gzipped_combined) { + error!(error = %e, "Failed to save block"); + } else { + info!( + number = el_event.block_number, + separate = gzipped_block.len() + gzipped_witness.len(), + combined = gzipped_combined.len(), + "Saved" + ); + } + } + + // Cache the EL block for correlation with CL events + let mut cache = el_cache.lock().await; + cache.insert( + el_event.block_hash.clone(), + el_event.block_number, + el_event.endpoint_name.clone(), + ); + } + + Some(cl_event) = cl_rx.recv() => { + info!( + source = %cl_event.cl_name, + slot = cl_event.slot, + block_root = %cl_event.block_root, + exec_hash = %cl_event.execution_block_hash, + "CL head event received" + ); + + // Check if we have the EL block cached + let cached_block_number = { + let cache = el_cache.lock().await; + cache.get(&cl_event.execution_block_hash).map(|c| c.block_number) + }; + + if cached_block_number.is_none() { + debug!( + exec_hash = %cl_event.execution_block_hash, + "EL block not in cache, skipping proof submission" + ); + continue; + } + let block_number = cached_block_number.unwrap(); + + // Generate proofs once (for all CLs and for saving) + let mut generated_proofs: Vec = Vec::new(); + for proof_id in 0..num_proofs { + generated_proofs.push(SavedProof { + proof_id: proof_id as u8, + slot: cl_event.slot, + block_hash: cl_event.execution_block_hash.clone(), + block_root: cl_event.block_root.clone(), + proof_data: generate_random_proof(proof_id as u32), + }); + } + + // Save proofs to disk for backfill + if let Some(ref storage) = storage { + if let Err(e) = storage.save_proofs( + block_number, + cl_event.slot, + &cl_event.block_root, + &cl_event.execution_block_hash, + &generated_proofs, + ) { + warn!(slot = cl_event.slot, error = %e, "Failed to save proofs to disk"); + } else { + debug!(slot = cl_event.slot, block_number = block_number, "Saved proofs to disk"); + } + } + + // Submit proofs to ALL zkvm-enabled CL clients + for (cl_name, cl_client) in &zkvm_clients { + for saved_proof in &generated_proofs { + let proof = ExecutionProof { + proof_id: saved_proof.proof_id, + slot: saved_proof.slot, + block_hash: saved_proof.block_hash.clone(), + block_root: saved_proof.block_root.clone(), + proof_data: saved_proof.proof_data.clone(), + }; + + match cl_client.submit_execution_proof(&proof).await { + Ok(()) => { + info!( + cl = %cl_name, + slot = cl_event.slot, + proof_id = saved_proof.proof_id, + "Proof submitted" + ); + } + Err(e) => { + debug!( + cl = %cl_name, + slot = cl_event.slot, + proof_id = saved_proof.proof_id, + error = %e, + "Proof submission failed" + ); + } + } + } + } + + // Remove from cache after submission + let mut cache = el_cache.lock().await; + cache.remove(&cl_event.execution_block_hash); + } + + else => break, + } + } + + Ok(()) +} diff --git a/execution-witness-sentry/src/rpc.rs b/execution-witness-sentry/src/rpc.rs new file mode 100644 index 00000000000..9722ee67085 --- /dev/null +++ b/execution-witness-sentry/src/rpc.rs @@ -0,0 +1,393 @@ +//! JSON-RPC client for execution layer nodes. + +use alloy_rpc_types_eth::Block; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::error::{Error, Result}; +use crate::storage::compress_gzip; + +/// JSON-RPC request structure. +#[derive(Debug, Clone, Serialize)] +struct JsonRpcRequest { + jsonrpc: &'static str, + method: &'static str, + params: T, + id: u64, +} + +/// JSON-RPC response structure. +#[derive(Debug, Clone, Deserialize)] +pub struct JsonRpcResponse { + pub result: Option, + pub error: Option, +} + +/// JSON-RPC error structure. +#[derive(Debug, Clone, Deserialize)] +pub struct JsonRpcError { + pub code: i64, + pub message: String, +} + +/// Execution layer JSON-RPC client. +pub struct ElClient { + url: Url, + http_client: reqwest::Client, +} + +impl ElClient { + /// Create a new EL client. + pub fn new(url: Url) -> Self { + Self { + url, + http_client: reqwest::Client::new(), + } + } + + /// Fetch a block by hash. Returns the block and its gzipped JSON. + pub async fn get_block_by_hash(&self, block_hash: &str) -> Result)>> { + let request = JsonRpcRequest { + jsonrpc: "2.0", + method: "eth_getBlockByHash", + params: (block_hash, false), + id: 1, + }; + + let response = self + .http_client + .post(self.url.clone()) + .json(&request) + .send() + .await?; + + if !response.status().is_success() { + return Err(Error::Rpc { + code: response.status().as_u16() as i64, + message: response.text().await.unwrap_or_default(), + }); + } + + let rpc_response: JsonRpcResponse = response.json().await?; + + if let Some(error) = rpc_response.error { + return Err(Error::Rpc { + code: error.code, + message: error.message, + }); + } + + match rpc_response.result { + Some(block) => { + let json_bytes = serde_json::to_vec(&block)?; + let gzipped = compress_gzip(&json_bytes)?; + Ok(Some((block, gzipped))) + } + None => Ok(None), + } + } + + /// Fetch execution witness for a block. Returns the witness and its gzipped JSON. + pub async fn get_execution_witness( + &self, + block_number: u64, + ) -> Result)>> { + let block_num_hex = format!("0x{:x}", block_number); + let request = JsonRpcRequest { + jsonrpc: "2.0", + method: "debug_executionWitness", + params: (block_num_hex,), + id: 1, + }; + + let response = self + .http_client + .post(self.url.clone()) + .json(&request) + .send() + .await?; + + if !response.status().is_success() { + return Err(Error::Rpc { + code: response.status().as_u16() as i64, + message: response.text().await.unwrap_or_default(), + }); + } + + let rpc_response: JsonRpcResponse = response.json().await?; + + if let Some(error) = rpc_response.error { + return Err(Error::Rpc { + code: error.code, + message: error.message, + }); + } + + match rpc_response.result { + Some(witness) => { + let json_bytes = serde_json::to_vec(&witness)?; + let gzipped = compress_gzip(&json_bytes)?; + Ok(Some((witness, gzipped))) + } + None => Ok(None), + } + } +} + +/// Execution proof to submit to CL nodes. +#[derive(Debug, Clone, Serialize)] +pub struct ExecutionProof { + pub proof_id: u8, + pub slot: u64, + pub block_hash: String, + pub block_root: String, + pub proof_data: Vec, +} + +/// Consensus layer HTTP API client. +#[derive(Clone)] +pub struct ClClient { + url: Url, + http_client: reqwest::Client, +} + +/// Block response with execution payload. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockResponse { + pub data: BlockData, +} + +/// Block data. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockData { + pub message: BlockMessage, +} + +/// Block message. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockMessage { + pub body: BlockBody, +} + +/// Block body. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockBody { + pub execution_payload: Option, +} + +/// Execution payload (minimal fields). +#[derive(Debug, Clone, Deserialize)] +pub struct ExecutionPayload { + pub block_hash: String, +} + +/// Syncing status response. +#[derive(Debug, Clone, Deserialize)] +pub struct SyncingResponse { + pub data: SyncingData, +} + +/// Syncing status data. +#[derive(Debug, Clone, Deserialize)] +pub struct SyncingData { + pub head_slot: String, + pub is_syncing: bool, + pub is_optimistic: Option, +} + +/// Block header response. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockHeaderResponse { + pub data: BlockHeaderData, +} + +/// Block header data. +#[derive(Debug, Clone, Deserialize)] +pub struct BlockHeaderData { + pub root: String, +} + +/// Node identity response. +#[derive(Debug, Clone, Deserialize)] +pub struct IdentityResponse { + pub data: IdentityData, +} + +/// Node identity data. +#[derive(Debug, Clone, Deserialize)] +pub struct IdentityData { + pub enr: String, +} + +impl ClClient { + /// Create a new CL client. + pub fn new(url: Url) -> Self { + Self { + url, + http_client: reqwest::Client::new(), + } + } + + /// Get node syncing status. + pub async fn get_syncing(&self) -> Result { + let url = self.url.join("eth/v1/node/syncing")?; + let response = self.http_client.get(url).send().await?; + Ok(response.json().await?) + } + + /// Get block header for a slot. + pub async fn get_block_header(&self, slot: u64) -> Result> { + let url = self.url.join(&format!("eth/v1/beacon/headers/{}", slot))?; + let response = self.http_client.get(url).send().await?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + return Ok(None); + } + + Ok(Some(response.json().await?)) + } + + /// Submit an execution proof. + pub async fn submit_execution_proof(&self, proof: &ExecutionProof) -> Result<()> { + let url = self.url.join("eth/v1/beacon/pool/execution_proofs")?; + + let response = self.http_client.post(url).json(proof).send().await?; + + if !response.status().is_success() { + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + return Err(Error::Rpc { + code: status.as_u16() as i64, + message: body, + }); + } + + Ok(()) + } + + /// Get node identity (including ENR). + pub async fn get_identity(&self) -> Result { + let url = self.url.join("eth/v1/node/identity")?; + let response = self.http_client.get(url).send().await?; + Ok(response.json().await?) + } + + /// Check if the node has zkvm enabled by inspecting its ENR. + pub async fn is_zkvm_enabled(&self) -> Result { + let identity = self.get_identity().await?; + Ok(enr_has_zkvm(&identity.data.enr)) + } + + /// Get the execution block hash for a beacon block. + pub async fn get_block_execution_hash(&self, block_root: &str) -> Result> { + let url = self + .url + .join(&format!("eth/v2/beacon/blocks/{}", block_root))?; + let response = self.http_client.get(url).send().await?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + return Ok(None); + } + + let block_response: BlockResponse = response.json().await?; + Ok(block_response + .data + .message + .body + .execution_payload + .map(|p| p.block_hash)) + } + + /// Get the current head slot. + pub async fn get_head_slot(&self) -> Result { + let syncing = self.get_syncing().await?; + syncing + .data + .head_slot + .parse() + .map_err(|e| Error::Config(format!("Invalid head slot: {}", e))) + } + + /// Get block info (slot, block_root, execution_block_hash) for a given slot. + /// Returns None if the slot is empty (no block). + pub async fn get_block_info(&self, slot: u64) -> Result> { + let url = self.url.join(&format!("eth/v2/beacon/blocks/{}", slot))?; + let response = self.http_client.get(url).send().await?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + return Ok(None); + } + + if !response.status().is_success() { + return Err(Error::Rpc { + code: response.status().as_u16() as i64, + message: response.text().await.unwrap_or_default(), + }); + } + + let block_response: BlockResponse = response.json().await?; + let execution_block_hash = block_response + .data + .message + .body + .execution_payload + .map(|p| p.block_hash); + + // Get the block root from headers endpoint + let header_url = self.url.join(&format!("eth/v1/beacon/headers/{}", slot))?; + let header_response = self.http_client.get(header_url).send().await?; + + if header_response.status() == reqwest::StatusCode::NOT_FOUND { + return Ok(None); + } + + let header: BlockHeaderResponse = header_response.json().await?; + + Ok(Some(BlockInfo { + slot, + block_root: header.data.root, + execution_block_hash, + })) + } +} + +/// Block info for backfill. +#[derive(Debug, Clone)] +pub struct BlockInfo { + pub slot: u64, + pub block_root: String, + pub execution_block_hash: Option, +} + +/// The ENR field specifying whether zkVM execution proofs are enabled. +const ZKVM_ENABLED_ENR_KEY: &str = "zkvm"; + +/// Check if an ENR string contains the zkvm flag. +fn enr_has_zkvm(enr_str: &str) -> bool { + use discv5::enr::{CombinedKey, Enr}; + use std::str::FromStr; + + match Enr::::from_str(enr_str) { + Ok(enr) => enr + .get_decodable::(ZKVM_ENABLED_ENR_KEY) + .and_then(|result| result.ok()) + .unwrap_or(false), + Err(_) => false, + } +} + +/// Generate random proof bytes. +pub fn generate_random_proof(proof_id: u32) -> Vec { + use std::time::{SystemTime, UNIX_EPOCH}; + let seed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos() as u64; + + let mut proof = vec![0u8; 32]; + for (i, byte) in proof.iter_mut().enumerate() { + *byte = ((seed >> (i % 8)) ^ (i as u64)) as u8; + } + proof[31] = proof_id as u8; + proof +} diff --git a/execution-witness-sentry/src/storage.rs b/execution-witness-sentry/src/storage.rs new file mode 100644 index 00000000000..58a80d449d4 --- /dev/null +++ b/execution-witness-sentry/src/storage.rs @@ -0,0 +1,248 @@ +//! Block data storage utilities. + +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; + +use alloy_rpc_types_eth::Block; +use flate2::Compression; +use flate2::read::GzDecoder; +use flate2::write::GzEncoder; +use serde::{Deserialize, Serialize}; + +use crate::error::Result; + +/// Metadata stored alongside block data. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockMetadata { + /// EL block hash + pub block_hash: String, + /// EL block number + pub block_number: u64, + /// Gas used in the block + pub gas_used: u64, + /// CL slot number (if known) + #[serde(skip_serializing_if = "Option::is_none")] + pub slot: Option, + /// CL beacon block root (if known) + #[serde(skip_serializing_if = "Option::is_none")] + pub block_root: Option, + /// Number of proofs stored + #[serde(default)] + pub num_proofs: usize, +} + +/// A saved proof that can be loaded for backfill. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SavedProof { + pub proof_id: u8, + pub slot: u64, + pub block_hash: String, + pub block_root: String, + pub proof_data: Vec, +} + +/// Compress data using gzip. +pub fn compress_gzip(data: &[u8]) -> Result> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(data)?; + Ok(encoder.finish()?) +} + +/// Decompress gzip data. +pub fn decompress_gzip(data: &[u8]) -> Result> { + let mut decoder = GzDecoder::new(data); + let mut decompressed = Vec::new(); + decoder.read_to_end(&mut decompressed)?; + Ok(decompressed) +} + +/// Load block data from a gzipped JSON file. +pub fn load_block_data(path: impl AsRef) -> Result { + let compressed = std::fs::read(path)?; + let decompressed = decompress_gzip(&compressed)?; + Ok(serde_json::from_slice(&decompressed)?) +} + +/// Manages block data storage on disk. +pub struct BlockStorage { + output_dir: PathBuf, + chain: String, + retain: Option, +} + +impl BlockStorage { + /// Create a new block storage manager. + pub fn new( + output_dir: impl Into, + chain: impl Into, + retain: Option, + ) -> Self { + Self { + output_dir: output_dir.into(), + chain: chain.into(), + retain, + } + } + + /// Get the directory path for a block number. + pub fn block_dir(&self, block_number: u64) -> PathBuf { + self.output_dir + .join(&self.chain) + .join(block_number.to_string()) + } + + /// Save block data to disk (without CL info - will be updated later). + pub fn save_block(&self, block: &Block, combined_data: &[u8]) -> Result<()> { + let block_number = block.header.number; + let block_hash = format!("{:?}", block.header.hash); + let gas_used = block.header.gas_used; + + let block_dir = self.block_dir(block_number); + std::fs::create_dir_all(&block_dir)?; + + // Write metadata (without CL info initially) + let metadata = BlockMetadata { + block_hash, + block_number, + gas_used, + slot: None, + block_root: None, + num_proofs: 0, + }; + let metadata_path = block_dir.join("metadata.json"); + std::fs::write(metadata_path, serde_json::to_string_pretty(&metadata)?)?; + + // Write combined block + witness data + let data_path = block_dir.join("data.json.gz"); + std::fs::write(data_path, combined_data)?; + + // Clean up old blocks if retention is configured + if let Some(retain) = self.retain + && block_number > retain + { + self.delete_old_block(block_number - retain)?; + } + + Ok(()) + } + + /// Save proofs and update metadata with CL info. + /// This is called when we receive CL head event with slot/block_root. + pub fn save_proofs( + &self, + block_number: u64, + slot: u64, + block_root: &str, + block_hash: &str, + proofs: &[SavedProof], + ) -> Result<()> { + let block_dir = self.block_dir(block_number); + + // Create dir if it doesn't exist (in case block wasn't saved yet) + std::fs::create_dir_all(&block_dir)?; + + // Load existing metadata or create new + let metadata_path = block_dir.join("metadata.json"); + let mut metadata = if metadata_path.exists() { + let content = std::fs::read_to_string(&metadata_path)?; + serde_json::from_str(&content)? + } else { + BlockMetadata { + block_hash: block_hash.to_string(), + block_number, + gas_used: 0, + slot: None, + block_root: None, + num_proofs: 0, + } + }; + + // Update with CL info + metadata.slot = Some(slot); + metadata.block_root = Some(block_root.to_string()); + metadata.num_proofs = proofs.len(); + + // Save updated metadata + std::fs::write(&metadata_path, serde_json::to_string_pretty(&metadata)?)?; + + // Save proofs + let proofs_path = block_dir.join("proofs.json"); + std::fs::write(&proofs_path, serde_json::to_string_pretty(&proofs)?)?; + + Ok(()) + } + + /// Load proofs for a given slot. + /// Searches for a block directory that has matching slot in metadata. + pub fn load_proofs_by_slot( + &self, + slot: u64, + ) -> Result)>> { + let chain_dir = self.output_dir.join(&self.chain); + if !chain_dir.exists() { + return Ok(None); + } + + // Iterate through block directories to find one with matching slot + for entry in std::fs::read_dir(&chain_dir)? { + let entry = entry?; + let block_dir = entry.path(); + + if !block_dir.is_dir() { + continue; + } + + let metadata_path = block_dir.join("metadata.json"); + if !metadata_path.exists() { + continue; + } + + let content = std::fs::read_to_string(&metadata_path)?; + let metadata: BlockMetadata = match serde_json::from_str(&content) { + Ok(m) => m, + Err(_) => continue, + }; + + if metadata.slot == Some(slot) { + // Found matching slot, load proofs + let proofs_path = block_dir.join("proofs.json"); + if proofs_path.exists() { + let proofs_content = std::fs::read_to_string(&proofs_path)?; + let proofs: Vec = serde_json::from_str(&proofs_content)?; + return Ok(Some((metadata, proofs))); + } else { + return Ok(Some((metadata, vec![]))); + } + } + } + + Ok(None) + } + + /// Load metadata for a given block number. + pub fn load_metadata(&self, block_number: u64) -> Result> { + let block_dir = self.block_dir(block_number); + let metadata_path = block_dir.join("metadata.json"); + + if !metadata_path.exists() { + return Ok(None); + } + + let content = std::fs::read_to_string(&metadata_path)?; + Ok(Some(serde_json::from_str(&content)?)) + } + + /// Delete an old block directory. + fn delete_old_block(&self, block_number: u64) -> Result<()> { + let old_dir = self.block_dir(block_number); + if old_dir.exists() { + std::fs::remove_dir_all(old_dir)?; + } + Ok(()) + } + + /// Get the chain directory path. + pub fn chain_dir(&self) -> PathBuf { + self.output_dir.join(&self.chain) + } +} diff --git a/execution-witness-sentry/src/subscription.rs b/execution-witness-sentry/src/subscription.rs new file mode 100644 index 00000000000..3882561590b --- /dev/null +++ b/execution-witness-sentry/src/subscription.rs @@ -0,0 +1,45 @@ +//! WebSocket subscription for new block headers. + +use std::pin::Pin; +use std::task::{Context, Poll}; + +use alloy_provider::{Provider, ProviderBuilder, WsConnect}; +use alloy_rpc_types_eth::Header; +use futures::Stream; + +use crate::error::{Error, Result}; + +/// Subscription stream that keeps the provider alive. +pub struct BlockSubscription

{ + #[allow(dead_code)] + provider: P, + stream: Pin + Send>>, +} + +impl

Unpin for BlockSubscription

{} + +impl Stream for BlockSubscription

{ + type Item = Result

; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.stream.as_mut().poll_next(cx).map(|opt| opt.map(Ok)) + } +} + +/// Subscribe to new block headers via WebSocket. +pub async fn subscribe_blocks(ws_url: &str) -> Result> + Send> { + let ws = WsConnect::new(ws_url); + let provider = ProviderBuilder::new() + .connect_ws(ws) + .await + .map_err(|e| Error::WebSocket(format!("WebSocket connection failed: {}", e)))?; + + let subscription = provider + .subscribe_blocks() + .await + .map_err(|e| Error::WebSocket(format!("Block subscription failed: {}", e)))?; + + let stream = Box::pin(subscription.into_stream()); + + Ok(BlockSubscription { provider, stream }) +} diff --git a/execution-witness-sentry/tests/cl_subscription.rs b/execution-witness-sentry/tests/cl_subscription.rs new file mode 100644 index 00000000000..40d86890eb0 --- /dev/null +++ b/execution-witness-sentry/tests/cl_subscription.rs @@ -0,0 +1,11 @@ +use execution_witness_sentry::subscribe_cl_events; + +#[test] +fn subscribe_cl_events_accepts_base_url_without_trailing_slash() { + assert!(subscribe_cl_events("http://localhost:5052").is_ok()); +} + +#[test] +fn subscribe_cl_events_accepts_base_url_with_trailing_slash() { + assert!(subscribe_cl_events("http://localhost:5052/").is_ok()); +} diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index a048674e630..79ff7df07b6 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -21,11 +21,7 @@ network_params: seconds_per_slot: 6 snooper_enabled: false global_log_level: debug -additional_services: - - dora - - spamoor - - prometheus_grafana - - tempo +additional_services: [] spamoor_params: image: ethpandaops/spamoor:master spammers: @@ -34,4 +30,4 @@ spamoor_params: throughput: 200 - scenario: blobs config: - throughput: 20 \ No newline at end of file + throughput: 20 diff --git a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml index 11439e6d0eb..b7d7e7f62ea 100644 --- a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml +++ b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml @@ -1,25 +1,23 @@ -# Mixed configuration: 3 normal nodes, 1 node with dummy EL +# Mixed configuration: 3 normal nodes with reth, 3 zkvm nodes with dummy_el participants: - # Nodes with real execution layer (nodes 1-3) - - el_type: geth - el_image: ethereum/client-go:latest + # Normal nodes with real EL (nodes 1-3) + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth:latest cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: - - --activate-zkvm - - --target-peers=3 + - --target-peers=5 count: 3 - # Node with dummy execution layer (node 4) - # TODO(zkproofs): Currently there is no way to add no client here - # We likely want to use our dummy zkvm EL here + # ZKVM nodes with dummy EL (nodes 4-6) + # Uses dummy_el wrapped as geth - returns SYNCING for all engine calls - el_type: geth el_image: dummy_el:local cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: - --activate-zkvm - - --target-peers=3 - count: 1 + - --target-peers=5 + count: 3 network_params: electra_fork_epoch: 0 fulu_fork_epoch: 1 @@ -29,3 +27,10 @@ snooper_enabled: false additional_services: - dora - prometheus_grafana +port_publisher: + el: + enabled: true + public_port_start: 32000 + cl: + enabled: true + public_port_start: 33000 \ No newline at end of file diff --git a/scripts/local_testnet/network_params_simple.yaml b/scripts/local_testnet/network_params_simple.yaml new file mode 100644 index 00000000000..3f2ca40f371 --- /dev/null +++ b/scripts/local_testnet/network_params_simple.yaml @@ -0,0 +1,19 @@ +# Simple testnet config for testing EL listener with 2 nodes using Reth +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: reth + count: 2 +network_params: + electra_fork_epoch: 0 + seconds_per_slot: 6 +snooper_enabled: false +global_log_level: info +additional_services: [] +port_publisher: + el: + enabled: true + public_port_start: 32000 + cl: + enabled: true + public_port_start: 33000 \ No newline at end of file From c233c8ee0be47981dd765069b121987dd7becb17 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Wed, 31 Dec 2025 23:54:55 +0000 Subject: [PATCH 59/67] Add endpoint to get execution proofs (#10) --- Cargo.lock | 1 + beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/block_id.rs | 62 +++++++++++++++++++-- beacon_node/http_api/src/lib.rs | 49 +++++++++++++++++ beacon_node/http_api/tests/tests.rs | 80 +++++++++++++++++++++++++++- common/eth2/src/lib.rs | 35 ++++++++++++ common/eth2/src/types.rs | 7 +++ 7 files changed, 231 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b10d62ef57c..f78e8090243 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4401,6 +4401,7 @@ dependencies = [ "serde", "serde_json", "slot_clock", + "ssz_types", "state_processing", "store", "sysinfo", diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 571dab10273..c559b98edb9 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -39,6 +39,7 @@ sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } state_processing = { workspace = true } store = { workspace = true } sysinfo = { workspace = true } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index ea8b47f91ef..382eb329a85 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -5,14 +5,17 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkip use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; -use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; +use eth2::types::{ + BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery, ExecutionProofIdsQuery, +}; use fixed_bytes::FixedBytesExtended; +use ssz_types::RuntimeVariableList; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ - BlobSidecarList, DataColumnSidecarList, EthSpec, ForkName, Hash256, SignedBeaconBlock, - SignedBlindedBeaconBlock, Slot, + BlobSidecarList, DataColumnSidecarList, EthSpec, ExecutionProof, ExecutionProofId, ForkName, + Hash256, MAX_PROOFS, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; @@ -30,6 +33,12 @@ type DataColumnsResponse = ( Finalized, ); +type ExecutionProofsResponse = ( + RuntimeVariableList>, + ExecutionOptimistic, + Finalized, +); + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -312,6 +321,53 @@ impl BlockId { )) } + pub fn get_execution_proofs( + &self, + query: ExecutionProofIdsQuery, + chain: &BeaconChain, + ) -> Result { + if !chain.spec.is_zkvm_enabled() { + return Err(warp_utils::reject::custom_bad_request( + "zkvm is not enabled for this node".to_string(), + )); + } + + let (root, execution_optimistic, finalized) = self.root(chain)?; + let _block = BlockId::blinded_block_by_root(&root, chain)?.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) + })?; + + let mut proofs = chain + .store + .get_execution_proofs(&root) + .map_err(warp_utils::reject::unhandled_error)?; + + if proofs.is_empty() { + return Err(warp_utils::reject::custom_not_found(format!( + "no execution proofs stored for block {root}" + ))); + } + + let proof_ids = query + .proof_ids + .map(|ids| { + ids.into_iter() + .map(ExecutionProofId::new) + .collect::, _>>() + }) + .transpose() + .map_err(warp_utils::reject::custom_bad_request)?; + + if let Some(proof_ids) = proof_ids { + proofs.retain(|proof| proof_ids.contains(&proof.proof_id)); + } + + let proof_list = RuntimeVariableList::new(proofs, MAX_PROOFS) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))?; + + Ok((proof_list, execution_optimistic, finalized)) + } + #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 8139e47985f..01c5314b1de 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -210,6 +210,7 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( }, ); + // GET beacon/execution_proofs/{block_id} + let get_execution_proofs = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("execution_proofs")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) + .then( + |block_id: BlockId, + proof_ids_res: Result, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let proof_ids = proof_ids_res?; + let (proofs, execution_optimistic, finalized) = + block_id.get_execution_proofs(proof_ids, &chain)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(proofs.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::No, + execution_optimistic, + finalized, + &proofs, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } + }) + }, + ); + /* * beacon/pool */ @@ -3287,6 +3335,7 @@ pub fn serve( .uor(get_beacon_block_root) .uor(get_blob_sidecars) .uor(get_blobs) + .uor(get_execution_proofs) .uor(get_beacon_pool_attestations) .uor(get_beacon_pool_attester_slashings) .uor(get_beacon_pool_proposer_slashings) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b33a7ad23fe..ac4f42c07e9 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -118,7 +118,9 @@ impl ApiTesterConfig { } fn with_zkvm(mut self) -> Self { + // TODO(zkproofs): shouldn't need both of these to be enabled self.enable_zkvm = true; + self.spec.zkvm_enabled = true; self } } @@ -1962,6 +1964,60 @@ impl ApiTester { self } + pub async fn test_get_execution_proofs(self, filter: bool) -> Self { + let head = self.chain.head_snapshot(); + let block_root = head.beacon_block_root; + + let proof_ids = [ + ExecutionProofId::new(0).expect("Valid proof id"), + ExecutionProofId::new(1).expect("Valid proof id"), + ]; + let proofs = proof_ids + .iter() + .map(|proof_id| self.create_test_execution_proof_with_id(*proof_id)) + .collect::>(); + + self.chain + .store + .put_execution_proofs(&block_root, &proofs) + .unwrap(); + + let filter_ids = filter.then(|| vec![proof_ids[1].as_u8()]); + let result = match self + .client + .get_execution_proofs(CoreBlockId::Root(block_root), filter_ids.as_deref()) + .await + { + Ok(result) => result.unwrap().into_data(), + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + if filter { + assert_eq!(result.len(), 1); + assert_eq!(result[0].proof_id, proof_ids[1]); + } else { + assert_eq!(result.len(), proofs.len()); + } + + self + } + + pub async fn test_get_execution_proofs_zkvm_disabled(self) -> Self { + let block_id = BlockId(CoreBlockId::Head); + let (block_root, _, _) = block_id.root(&self.chain).unwrap(); + let result = self + .client + .get_execution_proofs(CoreBlockId::Root(block_root), None) + .await; + + match result { + Ok(response) => panic!("query should fail: {response:?}"), + Err(e) => assert_eq!(e.status().unwrap(), 400), + } + + self + } + pub async fn test_get_blobs_post_fulu_full_node(self, versioned_hashes: bool) -> Self { let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); @@ -2752,6 +2808,11 @@ impl ApiTester { /// Helper to create a test execution proof for the head block fn create_test_execution_proof(&self) -> ExecutionProof { + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + self.create_test_execution_proof_with_id(proof_id) + } + + fn create_test_execution_proof_with_id(&self, proof_id: ExecutionProofId) -> ExecutionProof { let head = self.chain.head_snapshot(); let block_root = head.beacon_block_root; let slot = head.beacon_block.slot(); @@ -2763,7 +2824,6 @@ impl ApiTester { .map(|p| p.block_hash()) .unwrap_or_else(|_| ExecutionBlockHash::zero()); - let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); let proof_data = vec![0u8; 32]; // Dummy proof data ExecutionProof::new(proof_id, slot, block_hash, block_root, proof_data) @@ -8168,6 +8228,24 @@ async fn get_blob_sidecars_pre_deneb() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_execution_proofs() { + ApiTester::new_with_zkvm() + .await + .test_get_execution_proofs(false) + .await + .test_get_execution_proofs(true) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_execution_proofs_zkvm_disabled() { + ApiTester::new() + .await + .test_get_execution_proofs_zkvm_disabled() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_liveness_epoch() { ApiTester::new() diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 84d85d6c83e..b3ad86a1c93 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1297,6 +1297,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/execution_proofs/{block_id}` + pub fn get_execution_proofs_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("execution_proofs") + .push(&block_id.to_string()); + Ok(path) + } + /// Path for `v1/beacon/blinded_blocks/{block_id}` pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V1)?; @@ -1376,6 +1387,30 @@ impl BeaconNodeHttpClient { .map(|opt| opt.map(BeaconResponse::Unversioned)) } + /// `GET v1/beacon/execution_proofs/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_execution_proofs( + &self, + block_id: BlockId, + proof_ids: Option<&[u8]>, + ) -> Result>>, Error> + { + let mut path = self.get_execution_proofs_path(block_id)?; + if let Some(proof_ids) = proof_ids { + let ids_string = proof_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + .join(","); + path.query_pairs_mut().append_pair("proof_ids", &ids_string); + } + + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::Unversioned)) + } + /// `GET v1/beacon/blinded_blocks/{block_id}` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index b1a61ce00cc..b96ae7dd7a0 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -688,6 +688,13 @@ pub struct BlobIndicesQuery { pub indices: Option>, } +#[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ExecutionProofIdsQuery { + #[serde(default, deserialize_with = "option_query_vec")] + pub proof_ids: Option>, +} + #[derive(Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct BlobsVersionedHashesQuery { From 97b10fef94288a09a6e62d73a54384821262d212 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 1 Jan 2026 02:59:59 +0000 Subject: [PATCH 60/67] remove proof gen only file --- .../network_params_proof_gen_only.sh | 155 ------------------ 1 file changed, 155 deletions(-) delete mode 100755 scripts/local_testnet/network_params_proof_gen_only.sh diff --git a/scripts/local_testnet/network_params_proof_gen_only.sh b/scripts/local_testnet/network_params_proof_gen_only.sh deleted file mode 100755 index 70c2c8f5c69..00000000000 --- a/scripts/local_testnet/network_params_proof_gen_only.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/bash - -# Helper script for monitoring execution proof generation and gossip -# Usage: ./network_params_proof_gen_only.sh [command] -# ENCLAVE=my-testnet ./network_params_proof_gen_only.sh [command] -# -# Set ENCLAVE environment variable to use a different testnet. -# Default: local-testnet - -ENCLAVE="${ENCLAVE:-local-testnet}" - -# Color output -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -RED='\033[0;31m' -NC='\033[0m' # No Color - -case "${1:-help}" in - generation) - echo -e "${GREEN}=== Proof Generation and Publishing ===${NC}" - for i in 1 2 3 4; do - echo -e "\n${YELLOW}--- Node $i ---${NC}" - kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep -E "(Generating execution proof|Proof successfully published)" | tail -5 - done - ;; - - gossip-subscribe) - echo -e "${GREEN}=== ExecutionProof Topic Subscriptions ===${NC}" - for i in 1 2 3 4; do - echo -e "\n${YELLOW}--- Node $i ---${NC}" - kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Subscribed to topic.*execution_proof" - done - ;; - - gossip-receive) - echo -e "${GREEN}=== Received Execution Proofs via Gossip ===${NC}" - for i in 1 2 3 4; do - count=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Received execution proof via gossip" | wc -l) - echo -e "${YELLOW}Node $i:${NC} $count proofs received" - done - ;; - - gossip-verified) - echo -e "${GREEN}=== Verified Execution Proofs ===${NC}" - for i in 1 2 3 4; do - count=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Successfully verified gossip execution proof" | wc -l) - echo -e "${YELLOW}Node $i:${NC} $count proofs verified" - done - ;; - - errors) - echo -e "${GREEN}=== Checking for Errors ===${NC}" - for i in 1 2 3 4; do - echo -e "\n${YELLOW}--- Node $i ---${NC}" - no_peers=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "NoPeersSubscribedToTopic.*execution_proof" | wc -l) - failed_sub=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Failed to subscribe.*execution_proof" | wc -l) - - if [ "$no_peers" -gt 0 ]; then - echo -e "${RED}NoPeersSubscribedToTopic errors: $no_peers${NC}" - else - echo -e "${GREEN}NoPeersSubscribedToTopic errors: 0${NC}" - fi - - if [ "$failed_sub" -gt 0 ]; then - echo -e "${RED}Failed subscription errors: $failed_sub${NC}" - else - echo -e "${GREEN}Failed subscription errors: 0${NC}" - fi - done - ;; - - zkvm-logs) - echo -e "${GREEN}=== ZKVM Debug Logs ===${NC}" - for i in 1 2 3 4; do - echo -e "\n${YELLOW}--- Node $i ---${NC}" - kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "ZKVM:" | head -5 - done - ;; - - fork-transition) - echo -e "${GREEN}=== Fork Transition Logs ===${NC}" - for i in 1 2 3 4; do - echo -e "\n${YELLOW}--- Node $i ---${NC}" - kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep -E "(Subscribing to new fork|subscribe_new_fork_topics called)" - done - ;; - - stats) - echo -e "${GREEN}=== Execution Proof Statistics ===${NC}" - for i in 1 2 3 4; do - generated=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Generating execution proof" | wc -l) - published=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Proof successfully published" | wc -l) - received=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Received execution proof via gossip" | wc -l) - verified=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Successfully verified gossip execution proof" | wc -l) - - echo -e "${YELLOW}Node $i:${NC}" - echo -e " Generated: $generated" - echo -e " Published: $published" - echo -e " Received: $received" - echo -e " Verified: $verified" - done - ;; - - follow) - NODE="${2:-1}" - echo -e "${GREEN}=== Following Execution Proof Logs for Node $NODE ===${NC}" - echo -e "${YELLOW}Press Ctrl+C to stop${NC}" - kurtosis service logs $ENCLAVE cl-$NODE-lighthouse-geth -f | grep --line-buffered -E "(Generating execution proof|Proof successfully published|Received execution proof via gossip|Successfully verified gossip execution proof)" - ;; - - all) - echo -e "${GREEN}=== Complete Execution Proof Report ===${NC}\n" - $0 zkvm-logs - echo -e "\n" - $0 fork-transition - echo -e "\n" - $0 gossip-subscribe - echo -e "\n" - $0 stats - echo -e "\n" - $0 errors - ;; - - help|*) - echo "Helper script for monitoring execution proof generation and gossip" - echo "" - echo "Usage: $0 [command]" - echo " ENCLAVE=name $0 [command]" - echo "" - echo "Environment Variables:" - echo " ENCLAVE - Testnet enclave name (default: local-testnet)" - echo "" - echo "Commands:" - echo " generation - Show proof generation and publishing logs" - echo " gossip-subscribe - Show ExecutionProof topic subscriptions" - echo " gossip-receive - Count received proofs on each node" - echo " gossip-verified - Count verified proofs on each node" - echo " errors - Check for gossip errors" - echo " zkvm-logs - Show ZKVM debug logs" - echo " fork-transition - Show fork transition logs" - echo " stats - Show proof statistics for all nodes" - echo " follow [node] - Follow proof logs in real-time (default: node 1)" - echo " all - Show complete report" - echo " help - Show this help message" - echo "" - echo "Examples:" - echo " # Use default testnet (local-testnet)" - echo " $0 stats" - echo " $0 follow 2" - echo " $0 all" - echo "" - echo " # Use custom testnet enclave" - echo " ENCLAVE=my-testnet $0 stats" - ;; -esac From a3dae4f77c59f803a2e8f0bfe10d419fb763ae6b Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 1 Jan 2026 03:36:45 +0000 Subject: [PATCH 61/67] initial dummy prover code -- simplified execution-witness-sentry --- Cargo.lock | 6 + scripts/local_testnet/start_dummy_prover.sh | 83 ++++++ zkvm_execution_layer/Cargo.toml | 6 + zkvm_execution_layer/src/bin/dummy-prover.rs | 290 +++++++++++++++++++ 4 files changed, 385 insertions(+) create mode 100755 scripts/local_testnet/start_dummy_prover.sh create mode 100644 zkvm_execution_layer/src/bin/dummy-prover.rs diff --git a/Cargo.lock b/Cargo.lock index f78e8090243..043126349da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11321,13 +11321,19 @@ name = "zkvm_execution_layer" version = "0.1.0" dependencies = [ "async-trait", + "clap", + "eth2", "execution_layer", "fixed_bytes", + "futures", "hashbrown 0.15.5", "lru 0.12.5", + "sensitive_url", "serde", "thiserror 2.0.17", "tokio", + "tracing", + "tracing-subscriber", "types", ] diff --git a/scripts/local_testnet/start_dummy_prover.sh b/scripts/local_testnet/start_dummy_prover.sh new file mode 100755 index 00000000000..3504cb95ae3 --- /dev/null +++ b/scripts/local_testnet/start_dummy_prover.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ROOT_DIR="$( cd -- "$SCRIPT_DIR/../.." &> /dev/null && pwd )" + +ENCLAVE_NAME="${ENCLAVE_NAME:-local-testnet}" +CL_SERVICE="${CL_SERVICE:-cl-1-lighthouse-geth}" +BEACON_NODE_URL="${BEACON_NODE_URL:-}" +SOURCE_BEACON_NODE_URL="${SOURCE_BEACON_NODE_URL:-}" +PROOFS_PER_BLOCK="${PROOFS_PER_BLOCK:-1}" +PROOF_DELAY_MS="${PROOF_DELAY_MS:-1000}" +BACKFILL_THRESHOLD_SLOTS="${BACKFILL_THRESHOLD_SLOTS:-32}" +BACKFILL_INTERVAL_SECS="${BACKFILL_INTERVAL_SECS:-10}" + +while getopts "e:s:b:S:p:d:t:i:h" flag; do + case "${flag}" in + e) ENCLAVE_NAME=${OPTARG};; + s) CL_SERVICE=${OPTARG};; + b) BEACON_NODE_URL=${OPTARG};; + S) SOURCE_BEACON_NODE_URL=${OPTARG};; + p) PROOFS_PER_BLOCK=${OPTARG};; + d) PROOF_DELAY_MS=${OPTARG};; + t) BACKFILL_THRESHOLD_SLOTS=${OPTARG};; + i) BACKFILL_INTERVAL_SECS=${OPTARG};; + h) + echo "Start the dummy prover against a local testnet." + echo "Note: Run this after the testnet is up so the beacon node endpoint exists." + echo + echo "Usage: $0 [options]" + echo + echo "Options:" + echo " -e ENCLAVE_NAME Kurtosis enclave name (default: $ENCLAVE_NAME)" + echo " -s CL_SERVICE Kurtosis CL service name (default: $CL_SERVICE)" + echo " -b BEACON_NODE_URL Target beacon node URL (default: from kurtosis)" + echo " -S SOURCE_BEACON_NODE_URL Source beacon node URL (default: target URL)" + echo " -p PROOFS_PER_BLOCK Proof IDs to submit per block (default: $PROOFS_PER_BLOCK)" + echo " -d PROOF_DELAY_MS Proof generation delay in ms (default: $PROOF_DELAY_MS)" + echo " -t BACKFILL_THRESHOLD Backfill threshold in slots (default: $BACKFILL_THRESHOLD_SLOTS)" + echo " -i BACKFILL_INTERVAL Backfill interval in seconds (default: $BACKFILL_INTERVAL_SECS)" + echo " -h Show this help" + echo + echo "Example:" + echo " $0 -e local-testnet -s cl-1-lighthouse-geth -p 2 -d 1000 -t 64 -i 5" + exit + ;; + esac +done + +if [ -z "$BEACON_NODE_URL" ]; then + if command -v kurtosis &> /dev/null; then + if BEACON_NODE_URL=$(kurtosis port print "$ENCLAVE_NAME" "$CL_SERVICE" http 2>/dev/null); then + echo "Using beacon node from kurtosis: $BEACON_NODE_URL" + else + echo "Failed to detect beacon node URL via kurtosis. Set -b or BEACON_NODE_URL." >&2 + exit 1 + fi + else + BEACON_NODE_URL="http://localhost:5052" + echo "kurtosis not found, defaulting to $BEACON_NODE_URL" + fi +fi + +if [ -z "$SOURCE_BEACON_NODE_URL" ]; then + SOURCE_BEACON_NODE_URL="$BEACON_NODE_URL" +fi + +echo "Starting dummy prover..." +echo " target: $BEACON_NODE_URL" +echo " source: $SOURCE_BEACON_NODE_URL" +echo " proofs: $PROOFS_PER_BLOCK" +echo " delay: ${PROOF_DELAY_MS}ms" +echo " backfill threshold: ${BACKFILL_THRESHOLD_SLOTS} slots" +echo " backfill interval: ${BACKFILL_INTERVAL_SECS}s" + +exec cargo run --manifest-path "$ROOT_DIR/Cargo.toml" -p zkvm_execution_layer --bin dummy-prover -- \ + --beacon-node "$BEACON_NODE_URL" \ + --source-beacon-node "$SOURCE_BEACON_NODE_URL" \ + --proofs-per-block "$PROOFS_PER_BLOCK" \ + --proof-delay-ms "$PROOF_DELAY_MS" \ + --backfill-threshold-slots "$BACKFILL_THRESHOLD_SLOTS" \ + --backfill-interval-secs "$BACKFILL_INTERVAL_SECS" diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml index 1f6f291e9a7..202a3f6933e 100644 --- a/zkvm_execution_layer/Cargo.toml +++ b/zkvm_execution_layer/Cargo.toml @@ -5,12 +5,18 @@ edition = "2021" [dependencies] async-trait = "0.1" #TODO(zkproofs): Remove +clap = { workspace = true } +eth2 = { workspace = true, features = ["events"] } execution_layer = { path = "../beacon_node/execution_layer" } +futures = { workspace = true } hashbrown = "0.15" lru = "0.12" +sensitive_url = { workspace = true } serde = { version = "1.0", features = ["derive"] } thiserror = "2" tokio = { version = "1", features = ["full"] } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } types = { path = "../consensus/types" } [dev-dependencies] diff --git a/zkvm_execution_layer/src/bin/dummy-prover.rs b/zkvm_execution_layer/src/bin/dummy-prover.rs new file mode 100644 index 00000000000..aa35c8ade7c --- /dev/null +++ b/zkvm_execution_layer/src/bin/dummy-prover.rs @@ -0,0 +1,290 @@ +use clap::Parser; +use eth2::types::{BlockId, EventKind, EventTopic}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use futures::StreamExt; +use sensitive_url::SensitiveUrl; +use std::time::Duration; +use tokio::time::{interval, MissedTickBehavior}; +use tracing::{debug, info, warn}; +use types::{ExecPayload, ExecutionBlockHash, ExecutionProofId, Hash256, MainnetEthSpec, Slot}; +use zkvm_execution_layer::dummy_proof_gen::DummyProofGenerator; +use zkvm_execution_layer::proof_generation::ProofGenerator; + +const DEFAULT_TIMEOUT_SECS: u64 = 12; + +/// Generate and submit dummy execution proofs to a beacon node. +#[derive(Parser, Debug)] +#[command(name = "dummy-prover")] +struct Cli { + /// Beacon node HTTP endpoint to submit proofs to. + #[arg(long, default_value = "http://localhost:5052")] + beacon_node: String, + + /// Beacon node HTTP endpoint to source blocks from (defaults to --beacon-node). + #[arg(long)] + source_beacon_node: Option, + + /// Number of proof IDs to submit per block (max 8). + #[arg(long, default_value_t = 1)] + proofs_per_block: usize, + + /// Delay in milliseconds to simulate proof generation time. + #[arg(long, default_value_t = 1000)] + proof_delay_ms: u64, + + /// Start backfill when sync_distance is >= this many slots. + #[arg(long, default_value_t = 32)] + backfill_threshold_slots: u64, + + /// Backfill check interval in seconds. + #[arg(long, default_value_t = 10)] + backfill_interval_secs: u64, +} + +#[derive(Clone, Copy)] +struct BlockProofInputs { + slot: Slot, + block_root: Hash256, + block_hash: ExecutionBlockHash, +} + +struct Prover { + source: BeaconNodeHttpClient, + target: BeaconNodeHttpClient, + proof_ids: Vec, + proof_delay: Duration, + backfill_threshold_slots: u64, +} + +impl Prover { + async fn handle_block_gossip(&self, block_root: Hash256, slot: Slot) { + let Some(inputs) = self + .fetch_block_for_proofs(BlockId::Root(block_root), Some(slot)) + .await + else { + return; + }; + + if inputs.block_root != block_root { + debug!( + expected = ?block_root, + actual = ?inputs.block_root, + "Block root mismatch" + ); + } + + self.submit_dummy_proofs(inputs).await; + } + + async fn backfill_missing_slots(&self) { + if self.backfill_threshold_slots == 0 { + return; + } + + let syncing = match self.target.get_node_syncing().await { + Ok(response) => response.data, + Err(err) => { + warn!(error = ?err, "Failed to query target sync status"); + return; + } + }; + + let sync_distance = syncing.sync_distance.as_u64(); + if sync_distance < self.backfill_threshold_slots { + return; + } + + let slots_to_backfill = sync_distance.min(self.backfill_threshold_slots); + let head_slot = syncing.head_slot; + info!( + ?head_slot, + sync_distance, slots_to_backfill, "Backfilling dummy proofs" + ); + + for offset in 1..=slots_to_backfill { + let slot = head_slot.saturating_add(Slot::new(offset)); + let Some(inputs) = self + .fetch_block_for_proofs(BlockId::Slot(slot), Some(slot)) + .await + else { + continue; + }; + + self.submit_dummy_proofs(inputs).await; + } + } + + async fn submit_dummy_proofs(&self, inputs: BlockProofInputs) { + let BlockProofInputs { + slot, + block_root, + block_hash, + } = inputs; + + for proof_id in &self.proof_ids { + let generator = DummyProofGenerator::with_delay(*proof_id, self.proof_delay); + let proof = match generator.generate(slot, &block_hash, &block_root).await { + Ok(proof) => proof, + Err(err) => { + warn!( + ?block_root, + ?slot, + ?proof_id, + error = ?err, + "Failed to build dummy proof" + ); + continue; + } + }; + + if let Err(err) = self.target.post_beacon_pool_execution_proofs(&proof).await { + debug!( + ?block_root, + ?slot, + ?proof_id, + error = ?err, + "Failed to submit dummy proof" + ); + } else { + debug!(?block_root, ?slot, ?proof_id, "Submitted dummy proof"); + } + } + } + + async fn fetch_block_for_proofs( + &self, + block_id: BlockId, + slot_hint: Option, + ) -> Option { + let block = match self + .source + .get_beacon_blinded_blocks::(block_id) + .await + { + Ok(Some(response)) => response, + Ok(None) => { + debug!(?block_id, ?slot_hint, "Block not found in source node"); + return None; + } + Err(err) => { + warn!(?block_id, ?slot_hint, error = ?err, "Failed to fetch block"); + return None; + } + }; + + let block = block.data(); + let slot = block.slot(); + if let Some(expected_slot) = slot_hint { + if slot != expected_slot { + debug!( + ?block_id, + expected = ?expected_slot, + actual = ?slot, + "Block slot mismatch" + ); + } + } + + let payload = match block.message().body().execution_payload() { + Ok(payload) => payload, + Err(_) => { + debug!(?block_id, ?slot, "Block has no execution payload"); + return None; + } + }; + + Some(BlockProofInputs { + slot, + block_root: block.canonical_root(), + block_hash: payload.block_hash(), + }) + } +} + +fn build_proof_ids(count: usize) -> Vec { + let mut ids = ExecutionProofId::all(); + ids.truncate(count.min(ids.len())); + ids +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::from_default_env() + .add_directive("dummy_prover=info".parse().unwrap()), + ) + .init(); + + let cli = Cli::parse(); + let target_url = SensitiveUrl::parse(&cli.beacon_node)?; + let source_url = SensitiveUrl::parse( + cli.source_beacon_node + .as_deref() + .unwrap_or(&cli.beacon_node), + )?; + + let timeouts = Timeouts::set_all(Duration::from_secs(DEFAULT_TIMEOUT_SECS)); + let target = BeaconNodeHttpClient::new(target_url, timeouts.clone()); + let source = BeaconNodeHttpClient::new(source_url, timeouts); + + let proof_ids = build_proof_ids(cli.proofs_per_block); + if proof_ids.is_empty() { + warn!("No proof IDs configured, exiting"); + return Ok(()); + } + + let prover = Prover { + source, + target, + proof_ids, + proof_delay: Duration::from_millis(cli.proof_delay_ms), + backfill_threshold_slots: cli.backfill_threshold_slots, + }; + + info!( + target = %cli.beacon_node, + source = %prover.source.server(), + proofs_per_block = prover.proof_ids.len(), + proof_delay_ms = cli.proof_delay_ms, + backfill_threshold_slots = prover.backfill_threshold_slots, + "Starting dummy prover" + ); + + let mut events = prover + .source + .get_events::(&[EventTopic::BlockGossip]) + .await + .map_err(|e| format!("Failed to subscribe to events: {:?}", e))?; + let mut backfill_interval = interval(Duration::from_secs(cli.backfill_interval_secs)); + backfill_interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Shutdown requested"); + break; + } + _ = backfill_interval.tick() => { + prover.backfill_missing_slots().await; + } + maybe_event = events.next() => { + let Some(event) = maybe_event else { + warn!("Event stream ended"); + break; + }; + match event { + Ok(EventKind::BlockGossip(gossip)) => { + prover.handle_block_gossip(gossip.block, gossip.slot).await; + } + Ok(_) => {} + Err(err) => { + warn!(error = ?err, "Event stream error"); + } + } + } + } + } + + Ok(()) +} From 927b8dddcfb50506616e3b2ffb49d0037d344337 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 1 Jan 2026 03:37:58 +0000 Subject: [PATCH 62/67] remove execution-witness-sentry (move this and its complexities to zkboost) --- Cargo.toml | 1 - execution-witness-sentry/Cargo.toml | 24 - execution-witness-sentry/config.toml | 27 - .../src/cl_subscription.rs | 128 --- execution-witness-sentry/src/config.rs | 58 -- execution-witness-sentry/src/error.rs | 53 -- execution-witness-sentry/src/lib.rs | 45 -- execution-witness-sentry/src/main.rs | 731 ------------------ execution-witness-sentry/src/rpc.rs | 393 ---------- execution-witness-sentry/src/storage.rs | 248 ------ execution-witness-sentry/src/subscription.rs | 45 -- .../tests/cl_subscription.rs | 11 - 12 files changed, 1764 deletions(-) delete mode 100644 execution-witness-sentry/Cargo.toml delete mode 100644 execution-witness-sentry/config.toml delete mode 100644 execution-witness-sentry/src/cl_subscription.rs delete mode 100644 execution-witness-sentry/src/config.rs delete mode 100644 execution-witness-sentry/src/error.rs delete mode 100644 execution-witness-sentry/src/lib.rs delete mode 100644 execution-witness-sentry/src/main.rs delete mode 100644 execution-witness-sentry/src/rpc.rs delete mode 100644 execution-witness-sentry/src/storage.rs delete mode 100644 execution-witness-sentry/src/subscription.rs delete mode 100644 execution-witness-sentry/tests/cl_subscription.rs diff --git a/Cargo.toml b/Cargo.toml index 19158eb29e5..ba2316bb034 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,6 @@ members = [ "crypto/kzg", "database_manager", "dummy_el", - "execution-witness-sentry", "lcli", "lighthouse", "lighthouse/environment", diff --git a/execution-witness-sentry/Cargo.toml b/execution-witness-sentry/Cargo.toml deleted file mode 100644 index ceee0f20bed..00000000000 --- a/execution-witness-sentry/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "execution-witness-sentry" -version = "0.1.0" -edition = { workspace = true } -description = "Monitors execution layer nodes and fetches execution witnesses" - -[dependencies] -alloy-provider = { version = "1", features = ["ws"] } -alloy-rpc-types-eth = "1" -anyhow = "1" -clap = { version = "4", features = ["derive"] } -discv5 = { workspace = true } -eventsource-client = "0.13" -flate2 = "1.1" -futures = { workspace = true } -reqwest = { workspace = true, features = ["json"] } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -thiserror = "1" -tokio = { workspace = true, features = ["sync", "rt-multi-thread", "macros"] } -toml = "0.8" -tracing = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } -url = { workspace = true } diff --git a/execution-witness-sentry/config.toml b/execution-witness-sentry/config.toml deleted file mode 100644 index b35c2ca262e..00000000000 --- a/execution-witness-sentry/config.toml +++ /dev/null @@ -1,27 +0,0 @@ -output_dir = "." -chain = "local" -retain = 10 -num_proofs = 2 - -[[endpoints]] -name = "el-1-reth-lighthouse" -el_url = "http://127.0.0.1:32003" -el_ws_url = "ws://127.0.0.1:32004" - -# Non-zkvm CL for head event subscription (to know when new blocks arrive) -[[cl_endpoints]] -name = "cl-1-lighthouse-reth" -url = "http://127.0.0.1:33001/" - -# zkvm-enabled CLs for proof submission -[[cl_endpoints]] -name = "cl-4-lighthouse-geth" -url = "http://127.0.0.1:33022/" - -[[cl_endpoints]] -name = "cl-5-lighthouse-geth" -url = "http://127.0.0.1:33029/" - -[[cl_endpoints]] -name = "cl-6-lighthouse-geth" -url = "http://127.0.0.1:33036/" diff --git a/execution-witness-sentry/src/cl_subscription.rs b/execution-witness-sentry/src/cl_subscription.rs deleted file mode 100644 index 040e01cc438..00000000000 --- a/execution-witness-sentry/src/cl_subscription.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! SSE subscription for CL head events. - -use std::pin::Pin; -use std::task::{Context, Poll}; - -use eventsource_client::{Client, SSE}; -use futures::Stream; -use serde::Deserialize; -use url::Url; - -use crate::error::{Error, Result}; - -/// Head event from the CL. -#[derive(Debug, Clone, Deserialize)] -pub struct HeadEvent { - pub slot: String, - pub block: String, - pub state: String, - pub epoch_transition: bool, - pub execution_optimistic: bool, -} - -/// Block event from the CL. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockEvent { - pub slot: String, - pub block: String, - pub execution_optimistic: bool, -} - -/// Unified CL event. -#[derive(Debug, Clone)] -pub enum ClEvent { - Head(HeadEvent), - Block(BlockEvent), -} - -impl ClEvent { - pub fn slot(&self) -> &str { - match self { - ClEvent::Head(e) => &e.slot, - ClEvent::Block(e) => &e.slot, - } - } - - pub fn block_root(&self) -> &str { - match self { - ClEvent::Head(e) => &e.block, - ClEvent::Block(e) => &e.block, - } - } -} - -/// Stream of CL events. -pub struct ClEventStream { - client: Pin> + Send>>, -} - -impl Stream for ClEventStream { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - match self.client.as_mut().poll_next(cx) { - Poll::Ready(Some(Ok(SSE::Event(event)))) => { - let result = match event.event_type.as_str() { - "head" => serde_json::from_str::(&event.data) - .map(ClEvent::Head) - .map_err(Error::Parse), - "block" => serde_json::from_str::(&event.data) - .map(ClEvent::Block) - .map_err(Error::Parse), - _ => continue, - }; - return Poll::Ready(Some(result)); - } - Poll::Ready(Some(Ok(SSE::Comment(_)))) => continue, - Poll::Ready(Some(Ok(SSE::Connected(_)))) => continue, - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(Err(Error::Sse(format!("{:?}", e))))); - } - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - } - } - } -} - -/// Subscribe to CL head events via SSE. -pub fn subscribe_cl_events(base_url: &str) -> Result { - let url = build_events_url(base_url)?; - - let client = eventsource_client::ClientBuilder::for_url(url.as_str()) - .map_err(|e| Error::Config(format!("Invalid SSE URL: {}", e)))? - .build(); - - Ok(ClEventStream { - client: Box::pin(client.stream()), - }) -} - -fn build_events_url(base_url: &str) -> Result { - let base = Url::parse(base_url)?; - Ok(base.join("/eth/v1/events?topics=head,block")?) -} - -#[cfg(test)] -mod tests { - use super::build_events_url; - - #[test] - fn build_events_url_adds_path_without_trailing_slash() { - let url = build_events_url("http://localhost:5052").unwrap(); - assert_eq!( - url.as_str(), - "http://localhost:5052/eth/v1/events?topics=head,block" - ); - } - - #[test] - fn build_events_url_adds_path_with_trailing_slash() { - let url = build_events_url("http://localhost:5052/").unwrap(); - assert_eq!( - url.as_str(), - "http://localhost:5052/eth/v1/events?topics=head,block" - ); - } -} diff --git a/execution-witness-sentry/src/config.rs b/execution-witness-sentry/src/config.rs deleted file mode 100644 index 50d64969e72..00000000000 --- a/execution-witness-sentry/src/config.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Configuration types for the execution witness sentry. - -use std::path::Path; - -use serde::{Deserialize, Serialize}; - -use crate::error::{Error, Result}; - -/// Sentry configuration. -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct Config { - /// Execution layer endpoints to monitor. - pub endpoints: Vec, - /// Consensus layer endpoints to submit proofs to. - pub cl_endpoints: Option>, - /// Directory to save block and witness data. - pub output_dir: Option, - /// Chain identifier (used in output path). - pub chain: Option, - /// Number of recent blocks to retain (older blocks are deleted). - pub retain: Option, - /// Number of proofs to submit per block. - pub num_proofs: Option, -} - -/// Execution layer endpoint configuration. -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct Endpoint { - /// Human-readable name for this endpoint. - pub name: String, - /// HTTP JSON-RPC URL. - pub el_url: String, - /// WebSocket URL for subscriptions. - pub el_ws_url: String, -} - -/// Consensus layer endpoint configuration. -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct ClEndpoint { - /// Human-readable name for this endpoint. - pub name: String, - /// HTTP API URL. - pub url: String, -} - -impl Config { - /// Load configuration from a TOML file. - pub fn load(path: impl AsRef) -> Result { - let content = std::fs::read_to_string(path.as_ref()).map_err(|e| { - Error::Config(format!( - "failed to read config file '{}': {}", - path.as_ref().display(), - e - )) - })?; - Ok(toml::from_str(&content)?) - } -} diff --git a/execution-witness-sentry/src/error.rs b/execution-witness-sentry/src/error.rs deleted file mode 100644 index 91f0a7b72e1..00000000000 --- a/execution-witness-sentry/src/error.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Error types for the execution witness sentry. - -use std::io; - -use thiserror::Error; - -/// Errors that can occur in the execution witness sentry. -#[derive(Debug, Error)] -pub enum Error { - /// Failed to load or parse configuration. - #[error("config error: {0}")] - Config(String), - - /// HTTP request failed. - #[error("HTTP error: {0}")] - Http(#[from] reqwest::Error), - - /// JSON-RPC error returned by the node. - #[error("RPC error {code}: {message}")] - Rpc { - /// Error code. - code: i64, - /// Error message. - message: String, - }, - - /// Failed to parse response. - #[error("parse error: {0}")] - Parse(#[from] serde_json::Error), - - /// WebSocket connection or subscription failed. - #[error("WebSocket error: {0}")] - WebSocket(String), - - /// URL parsing failed. - #[error("invalid URL: {0}")] - InvalidUrl(#[from] url::ParseError), - - /// I/O error (file operations, compression). - #[error("I/O error: {0}")] - Io(#[from] io::Error), - - /// TOML parsing error. - #[error("TOML parse error: {0}")] - Toml(#[from] toml::de::Error), - - /// SSE connection error. - #[error("SSE error: {0}")] - Sse(String), -} - -/// Result type alias using our Error type. -pub type Result = std::result::Result; diff --git a/execution-witness-sentry/src/lib.rs b/execution-witness-sentry/src/lib.rs deleted file mode 100644 index fcb9b077cc8..00000000000 --- a/execution-witness-sentry/src/lib.rs +++ /dev/null @@ -1,45 +0,0 @@ -//! Execution witness sentry - monitors execution layer nodes for new blocks -//! and fetches their execution witnesses. -//! -//! This crate provides functionality to: -//! - Subscribe to new block headers via WebSocket -//! - Fetch blocks and execution witnesses via JSON-RPC -//! - Store block data and witnesses to disk -//! - Submit execution proofs to consensus layer nodes -//! -//! ## Example -//! -//! ```ignore -//! use execution_witness_sentry::{Config, ElClient, subscribe_blocks}; -//! -//! let config = Config::load("config.toml")?; -//! let client = ElClient::new(url); -//! -//! // Subscribe to new blocks -//! let mut stream = subscribe_blocks(&ws_url).await?; -//! -//! while let Some(header) = stream.next().await { -//! let witness = client.get_execution_witness(header.number).await?; -//! // Process witness... -//! } -//! ``` - -pub mod cl_subscription; -pub mod config; -pub mod error; -pub mod rpc; -pub mod storage; -pub mod subscription; - -// Re-export main types at crate root for convenience. -pub use cl_subscription::{BlockEvent, ClEvent, ClEventStream, HeadEvent, subscribe_cl_events}; -pub use config::{ClEndpoint, Config, Endpoint}; -pub use error::{Error, Result}; -pub use rpc::{BlockInfo, ClClient, ElClient, ExecutionProof, generate_random_proof}; -pub use storage::{ - BlockMetadata, BlockStorage, SavedProof, compress_gzip, decompress_gzip, load_block_data, -}; -pub use subscription::subscribe_blocks; - -// Re-export alloy types that appear in our public API. -pub use alloy_rpc_types_eth::{Block, Header}; diff --git a/execution-witness-sentry/src/main.rs b/execution-witness-sentry/src/main.rs deleted file mode 100644 index 8170bcda024..00000000000 --- a/execution-witness-sentry/src/main.rs +++ /dev/null @@ -1,731 +0,0 @@ -//! Execution witness sentry CLI. -//! -//! Monitors execution layer nodes for new blocks and fetches their execution witnesses. -//! Subscribes to CL head events to correlate EL blocks with beacon slots. - -use std::collections::HashMap; -use std::path::PathBuf; -use std::pin::pin; -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use clap::Parser; -use futures::StreamExt; -use tokio::sync::Mutex; -use tracing::{debug, error, info, warn}; -use url::Url; - -use execution_witness_sentry::{ - BlockStorage, ClClient, ClEvent, Config, ElClient, ExecutionProof, SavedProof, - generate_random_proof, subscribe_blocks, subscribe_cl_events, -}; - -/// Execution witness sentry - monitors EL nodes and fetches witnesses. -#[derive(Parser, Debug)] -#[command(name = "execution-witness-sentry")] -#[command(about = "Monitor execution layer nodes and fetch execution witnesses")] -struct Cli { - /// Path to configuration file. - #[arg(long, short, default_value = "config.toml")] - config: PathBuf, -} - -/// Cached EL block data waiting for CL correlation. -struct CachedElBlock { - block_number: u64, - timestamp: Instant, -} - -/// Cache for EL blocks keyed by block_hash. -struct ElBlockCache { - blocks: HashMap, - max_age: Duration, -} - -impl ElBlockCache { - fn new(max_age: Duration) -> Self { - Self { - blocks: HashMap::new(), - max_age, - } - } - - fn insert(&mut self, block_hash: String, block_number: u64, _endpoint_name: String) { - self.blocks.insert( - block_hash, - CachedElBlock { - block_number, - timestamp: Instant::now(), - }, - ); - self.cleanup(); - } - - fn get(&self, block_hash: &str) -> Option<&CachedElBlock> { - self.blocks.get(block_hash) - } - - fn remove(&mut self, block_hash: &str) -> Option { - self.blocks.remove(block_hash) - } - - fn cleanup(&mut self) { - let now = Instant::now(); - self.blocks - .retain(|_, v| now.duration_since(v.timestamp) < self.max_age); - } -} - -/// EL event for the channel. -struct ElBlockEvent { - endpoint_name: String, - block_number: u64, - block_hash: String, -} - -/// CL event for the channel. -struct ClBlockEvent { - cl_name: String, - slot: u64, - block_root: String, - execution_block_hash: String, -} - -/// Status of a zkvm CL node. -#[derive(Debug, Clone)] -struct ZkvmClStatus { - name: String, - head_slot: u64, - gap: i64, // Negative means behind source CL -} - -/// Monitor zkvm CL nodes and report their sync status. -async fn monitor_zkvm_status( - source_client: &ClClient, - zkvm_clients: &[(String, ClClient)], -) -> Vec { - let source_head = match source_client.get_head_slot().await { - Ok(slot) => slot, - Err(e) => { - warn!(error = %e, "Failed to get source CL head"); - return vec![]; - } - }; - - let mut statuses = Vec::new(); - for (name, client) in zkvm_clients { - match client.get_head_slot().await { - Ok(head_slot) => { - let gap = head_slot as i64 - source_head as i64; - statuses.push(ZkvmClStatus { - name: name.clone(), - head_slot, - gap, - }); - } - Err(e) => { - warn!(name = %name, error = %e, "Failed to get zkvm CL head"); - } - } - } - - statuses -} - -/// Backfill proofs for a zkvm CL that is behind. -/// First tries to use saved proofs from disk, falls back to generating new ones. -/// Returns the number of proofs submitted. -async fn backfill_proofs( - source_client: &ClClient, - zkvm_client: &ClClient, - zkvm_name: &str, - num_proofs: usize, - max_slots: u64, - storage: Option<&BlockStorage>, -) -> usize { - // Get the zkvm CL's current head - let zkvm_head = match zkvm_client.get_head_slot().await { - Ok(slot) => slot, - Err(e) => { - warn!(name = %zkvm_name, error = %e, "Failed to get zkvm CL head for backfill"); - return 0; - } - }; - - // Get source CL head - let source_head = match source_client.get_head_slot().await { - Ok(slot) => slot, - Err(e) => { - warn!(error = %e, "Failed to get source CL head for backfill"); - return 0; - } - }; - - if zkvm_head >= source_head { - return 0; // Already caught up - } - - let gap = source_head - zkvm_head; - let slots_to_check = gap.min(max_slots); - - info!( - name = %zkvm_name, - zkvm_head = zkvm_head, - source_head = source_head, - gap = gap, - checking = slots_to_check, - "Backfilling proofs" - ); - - let mut proofs_submitted = 0; - - // Iterate through slots from zkvm_head + 1 to zkvm_head + slots_to_check - for slot in (zkvm_head + 1)..=(zkvm_head + slots_to_check) { - // First try to load saved proofs from disk - if let Some(storage) = storage - && let Ok(Some((_metadata, saved_proofs))) = storage.load_proofs_by_slot(slot) - && !saved_proofs.is_empty() - { - debug!( - slot = slot, - num_proofs = saved_proofs.len(), - "Using saved proofs from disk" - ); - - for saved_proof in &saved_proofs { - let proof = ExecutionProof { - proof_id: saved_proof.proof_id, - slot: saved_proof.slot, - block_hash: saved_proof.block_hash.clone(), - block_root: saved_proof.block_root.clone(), - proof_data: saved_proof.proof_data.clone(), - }; - - match zkvm_client.submit_execution_proof(&proof).await { - Ok(()) => { - debug!( - name = %zkvm_name, - slot = slot, - proof_id = saved_proof.proof_id, - "Backfill proof submitted (from disk)" - ); - proofs_submitted += 1; - } - Err(e) => { - let msg = e.to_string(); - if !msg.contains("already known") { - debug!( - name = %zkvm_name, - slot = slot, - proof_id = saved_proof.proof_id, - error = %e, - "Backfill proof failed" - ); - } - } - } - } - continue; // Move to next slot - } - - // No saved proofs, fetch block info and generate new proofs - let block_info = match source_client.get_block_info(slot).await { - Ok(Some(info)) => info, - Ok(None) => { - debug!(slot = slot, "Empty slot, skipping"); - continue; - } - Err(e) => { - debug!(slot = slot, error = %e, "Failed to get block info"); - continue; - } - }; - - // Only submit proofs for blocks with execution payloads - let Some(exec_hash) = block_info.execution_block_hash else { - debug!(slot = slot, "No execution payload, skipping"); - continue; - }; - - // Generate and submit proofs - for proof_id in 0..num_proofs { - let proof = ExecutionProof { - proof_id: proof_id as u8, - slot, - block_hash: exec_hash.clone(), - block_root: block_info.block_root.clone(), - proof_data: generate_random_proof(proof_id as u32), - }; - - match zkvm_client.submit_execution_proof(&proof).await { - Ok(()) => { - debug!( - name = %zkvm_name, - slot = slot, - proof_id = proof_id, - "Backfill proof submitted (generated)" - ); - proofs_submitted += 1; - } - Err(e) => { - let msg = e.to_string(); - if !msg.contains("already known") { - debug!( - name = %zkvm_name, - slot = slot, - proof_id = proof_id, - error = %e, - "Backfill proof failed" - ); - } - } - } - } - } - - if proofs_submitted > 0 { - info!( - name = %zkvm_name, - proofs_submitted = proofs_submitted, - "Backfill complete" - ); - } - - proofs_submitted -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - tracing_subscriber::fmt() - .with_env_filter( - tracing_subscriber::EnvFilter::from_default_env() - .add_directive("execution_witness_sentry=info".parse()?), - ) - .init(); - - let cli = Cli::parse(); - let config = Config::load(&cli.config)?; - - info!(endpoints = config.endpoints.len(), "Loaded configuration"); - for endpoint in &config.endpoints { - info!( - name = %endpoint.name, - el_url = %endpoint.el_url, - el_ws_url = %endpoint.el_ws_url, - "EL endpoint configured" - ); - } - - // Set up CL clients - separate zkvm targets from event sources - let mut zkvm_clients: Vec<(String, ClClient)> = Vec::new(); // zkvm-enabled nodes for proof submission - let mut event_source_client: Option<(String, String, ClClient)> = None; // First available CL for events - - if let Some(endpoints) = config.cl_endpoints.as_ref() { - for endpoint in endpoints { - let url = match Url::parse(&endpoint.url) { - Ok(u) => u, - Err(e) => { - warn!(name = %endpoint.name, error = %e, "Invalid CL endpoint URL"); - continue; - } - }; - let client = ClClient::new(url); - - match client.is_zkvm_enabled().await { - Ok(true) => { - info!(name = %endpoint.name, "CL endpoint has zkvm enabled (proof target)"); - zkvm_clients.push((endpoint.name.clone(), client)); - } - Ok(false) => { - info!(name = %endpoint.name, "CL endpoint does not have zkvm enabled"); - // Use first non-zkvm CL as event source - if event_source_client.is_none() { - info!(name = %endpoint.name, "Using as event source"); - event_source_client = - Some((endpoint.name.clone(), endpoint.url.clone(), client)); - } - } - Err(e) => { - warn!(name = %endpoint.name, error = %e, "Failed to check zkvm status"); - } - } - } - } - - info!( - zkvm_targets = zkvm_clients.len(), - "zkvm-enabled CL endpoints configured" - ); - - let Some(event_source) = event_source_client else { - error!("No non-zkvm CL endpoint available for event source"); - return Ok(()); - }; - info!(name = %event_source.0, "CL event source configured"); - - let num_proofs = config.num_proofs.unwrap_or(2) as usize; - - // Set up block storage - let storage = config.output_dir.as_ref().map(|dir| { - BlockStorage::new( - dir, - config.chain.as_deref().unwrap_or("unknown"), - config.retain, - ) - }); - - // Cache for EL blocks (keyed by block_hash) - let el_cache = Arc::new(Mutex::new(ElBlockCache::new(Duration::from_secs(60)))); - - // Channels for events - let (el_tx, mut el_rx) = tokio::sync::mpsc::channel::(100); - let (cl_tx, mut cl_rx) = tokio::sync::mpsc::channel::(100); - - // Spawn EL subscription tasks - for endpoint in config.endpoints.clone() { - let tx = el_tx.clone(); - let name = endpoint.name.clone(); - let ws_url = endpoint.el_ws_url.clone(); - - tokio::spawn(async move { - info!(name = %name, "Connecting to EL WebSocket"); - - let stream = match subscribe_blocks(&ws_url).await { - Ok(s) => s, - Err(e) => { - error!(name = %name, error = %e, "Failed to subscribe to EL"); - return; - } - }; - - info!(name = %name, "Subscribed to EL newHeads"); - let mut stream = pin!(stream); - - while let Some(result) = stream.next().await { - match result { - Ok(header) => { - let event = ElBlockEvent { - endpoint_name: name.clone(), - block_number: header.number, - block_hash: format!("{:?}", header.hash), - }; - if tx.send(event).await.is_err() { - break; - } - } - Err(e) => { - error!(name = %name, error = %e, "EL stream error"); - } - } - } - warn!(name = %name, "EL WebSocket stream ended"); - }); - } - - let (es_name, es_url, es_client) = event_source; - let source_client_for_monitor = es_client.clone(); - - // Spawn CL subscription task for the event source (non-zkvm CL) - { - let tx = cl_tx.clone(); - - tokio::spawn(async move { - info!(name = %es_name, "Connecting to CL SSE"); - - let stream = match subscribe_cl_events(&es_url) { - Ok(s) => s, - Err(e) => { - error!(name = %es_name, error = %e, "Failed to subscribe to CL events"); - return; - } - }; - - info!(name = %es_name, "Subscribed to CL head events"); - let mut stream = pin!(stream); - - while let Some(result) = stream.next().await { - match result { - Ok(ClEvent::Head(head)) => { - let slot: u64 = match head.slot.parse() { - Ok(slot) => slot, - Err(e) => { - warn!( - name = %es_name, - error = %e, - slot = %head.slot, - "Invalid head slot value" - ); - continue; - } - }; - let block_root = head.block.clone(); - - // Fetch the execution block hash for this beacon block - let exec_hash = match es_client.get_block_execution_hash(&block_root).await - { - Ok(Some(hash)) => hash, - Ok(None) => { - debug!(name = %es_name, slot = slot, "No execution hash for block"); - continue; - } - Err(e) => { - debug!(name = %es_name, error = %e, "Failed to get execution hash"); - continue; - } - }; - - let event = ClBlockEvent { - cl_name: es_name.clone(), - slot, - block_root, - execution_block_hash: exec_hash, - }; - if tx.send(event).await.is_err() { - break; - } - } - Ok(ClEvent::Block(_)) => { - // We use head events primarily - } - Err(e) => { - error!(name = %es_name, error = %e, "CL stream error"); - } - } - } - warn!(name = %es_name, "CL SSE stream ended"); - }); - } - - drop(el_tx); - drop(cl_tx); - - // Create a timer for periodic monitoring and backfill (500ms for fast catch-up) - let mut monitor_interval = tokio::time::interval(Duration::from_millis(500)); - monitor_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - info!("Waiting for events (with monitoring every 500ms)"); - - // Process events from both EL and CL - loop { - tokio::select! { - // Periodic monitoring and backfill - _ = monitor_interval.tick() => { - // Monitor zkvm CL status - let statuses = monitor_zkvm_status(&source_client_for_monitor, &zkvm_clients).await; - - for status in &statuses { - if status.gap < -5 { - // More than 5 slots behind - log warning and backfill - warn!( - name = %status.name, - head_slot = status.head_slot, - gap = status.gap, - "zkvm CL is behind, starting backfill" - ); - - // Find the client and backfill - if let Some((_, client)) = zkvm_clients.iter().find(|(n, _)| n == &status.name) { - backfill_proofs( - &source_client_for_monitor, - client, - &status.name, - num_proofs, - 20, // Max 20 slots per backfill cycle - storage.as_ref(), - ).await; - } - } else if status.gap < 0 { - // Slightly behind - just log - debug!( - name = %status.name, - head_slot = status.head_slot, - gap = status.gap, - "zkvm CL slightly behind" - ); - } else { - // In sync or ahead - debug!( - name = %status.name, - head_slot = status.head_slot, - gap = status.gap, - "zkvm CL in sync" - ); - } - } - } - - Some(el_event) = el_rx.recv() => { - info!( - name = %el_event.endpoint_name, - number = el_event.block_number, - hash = %el_event.block_hash, - "EL block received" - ); - - // Find the endpoint and fetch block + witness - let Some(endpoint) = config.endpoints.iter().find(|e| e.name == el_event.endpoint_name) else { - continue; - }; - - let Ok(el_url) = Url::parse(&endpoint.el_url) else { - continue; - }; - let el_client = ElClient::new(el_url); - - // Fetch block and witness - let (block, gzipped_block) = match el_client.get_block_by_hash(&el_event.block_hash).await { - Ok(Some(data)) => data, - Ok(None) => { - warn!(number = el_event.block_number, "Block not found"); - continue; - } - Err(e) => { - error!(number = el_event.block_number, error = %e, "Failed to fetch block"); - continue; - } - }; - - let (witness, gzipped_witness) = match el_client.get_execution_witness(el_event.block_number).await { - Ok(Some(data)) => data, - Ok(None) => { - warn!(number = el_event.block_number, "Witness not found"); - continue; - } - Err(e) => { - error!(number = el_event.block_number, error = %e, "Failed to fetch witness"); - continue; - } - }; - - info!( - number = el_event.block_number, - block_gzipped = gzipped_block.len(), - witness_gzipped = gzipped_witness.len(), - "Fetched block and witness" - ); - - // Save to disk if storage is configured - if let Some(ref storage) = storage { - let combined = serde_json::json!({ - "block": block, - "witness": witness, - }); - let combined_bytes = serde_json::to_vec(&combined)?; - let gzipped_combined = execution_witness_sentry::compress_gzip(&combined_bytes)?; - - if let Err(e) = storage.save_block(&block, &gzipped_combined) { - error!(error = %e, "Failed to save block"); - } else { - info!( - number = el_event.block_number, - separate = gzipped_block.len() + gzipped_witness.len(), - combined = gzipped_combined.len(), - "Saved" - ); - } - } - - // Cache the EL block for correlation with CL events - let mut cache = el_cache.lock().await; - cache.insert( - el_event.block_hash.clone(), - el_event.block_number, - el_event.endpoint_name.clone(), - ); - } - - Some(cl_event) = cl_rx.recv() => { - info!( - source = %cl_event.cl_name, - slot = cl_event.slot, - block_root = %cl_event.block_root, - exec_hash = %cl_event.execution_block_hash, - "CL head event received" - ); - - // Check if we have the EL block cached - let cached_block_number = { - let cache = el_cache.lock().await; - cache.get(&cl_event.execution_block_hash).map(|c| c.block_number) - }; - - if cached_block_number.is_none() { - debug!( - exec_hash = %cl_event.execution_block_hash, - "EL block not in cache, skipping proof submission" - ); - continue; - } - let block_number = cached_block_number.unwrap(); - - // Generate proofs once (for all CLs and for saving) - let mut generated_proofs: Vec = Vec::new(); - for proof_id in 0..num_proofs { - generated_proofs.push(SavedProof { - proof_id: proof_id as u8, - slot: cl_event.slot, - block_hash: cl_event.execution_block_hash.clone(), - block_root: cl_event.block_root.clone(), - proof_data: generate_random_proof(proof_id as u32), - }); - } - - // Save proofs to disk for backfill - if let Some(ref storage) = storage { - if let Err(e) = storage.save_proofs( - block_number, - cl_event.slot, - &cl_event.block_root, - &cl_event.execution_block_hash, - &generated_proofs, - ) { - warn!(slot = cl_event.slot, error = %e, "Failed to save proofs to disk"); - } else { - debug!(slot = cl_event.slot, block_number = block_number, "Saved proofs to disk"); - } - } - - // Submit proofs to ALL zkvm-enabled CL clients - for (cl_name, cl_client) in &zkvm_clients { - for saved_proof in &generated_proofs { - let proof = ExecutionProof { - proof_id: saved_proof.proof_id, - slot: saved_proof.slot, - block_hash: saved_proof.block_hash.clone(), - block_root: saved_proof.block_root.clone(), - proof_data: saved_proof.proof_data.clone(), - }; - - match cl_client.submit_execution_proof(&proof).await { - Ok(()) => { - info!( - cl = %cl_name, - slot = cl_event.slot, - proof_id = saved_proof.proof_id, - "Proof submitted" - ); - } - Err(e) => { - debug!( - cl = %cl_name, - slot = cl_event.slot, - proof_id = saved_proof.proof_id, - error = %e, - "Proof submission failed" - ); - } - } - } - } - - // Remove from cache after submission - let mut cache = el_cache.lock().await; - cache.remove(&cl_event.execution_block_hash); - } - - else => break, - } - } - - Ok(()) -} diff --git a/execution-witness-sentry/src/rpc.rs b/execution-witness-sentry/src/rpc.rs deleted file mode 100644 index 9722ee67085..00000000000 --- a/execution-witness-sentry/src/rpc.rs +++ /dev/null @@ -1,393 +0,0 @@ -//! JSON-RPC client for execution layer nodes. - -use alloy_rpc_types_eth::Block; -use serde::{Deserialize, Serialize}; -use url::Url; - -use crate::error::{Error, Result}; -use crate::storage::compress_gzip; - -/// JSON-RPC request structure. -#[derive(Debug, Clone, Serialize)] -struct JsonRpcRequest { - jsonrpc: &'static str, - method: &'static str, - params: T, - id: u64, -} - -/// JSON-RPC response structure. -#[derive(Debug, Clone, Deserialize)] -pub struct JsonRpcResponse { - pub result: Option, - pub error: Option, -} - -/// JSON-RPC error structure. -#[derive(Debug, Clone, Deserialize)] -pub struct JsonRpcError { - pub code: i64, - pub message: String, -} - -/// Execution layer JSON-RPC client. -pub struct ElClient { - url: Url, - http_client: reqwest::Client, -} - -impl ElClient { - /// Create a new EL client. - pub fn new(url: Url) -> Self { - Self { - url, - http_client: reqwest::Client::new(), - } - } - - /// Fetch a block by hash. Returns the block and its gzipped JSON. - pub async fn get_block_by_hash(&self, block_hash: &str) -> Result)>> { - let request = JsonRpcRequest { - jsonrpc: "2.0", - method: "eth_getBlockByHash", - params: (block_hash, false), - id: 1, - }; - - let response = self - .http_client - .post(self.url.clone()) - .json(&request) - .send() - .await?; - - if !response.status().is_success() { - return Err(Error::Rpc { - code: response.status().as_u16() as i64, - message: response.text().await.unwrap_or_default(), - }); - } - - let rpc_response: JsonRpcResponse = response.json().await?; - - if let Some(error) = rpc_response.error { - return Err(Error::Rpc { - code: error.code, - message: error.message, - }); - } - - match rpc_response.result { - Some(block) => { - let json_bytes = serde_json::to_vec(&block)?; - let gzipped = compress_gzip(&json_bytes)?; - Ok(Some((block, gzipped))) - } - None => Ok(None), - } - } - - /// Fetch execution witness for a block. Returns the witness and its gzipped JSON. - pub async fn get_execution_witness( - &self, - block_number: u64, - ) -> Result)>> { - let block_num_hex = format!("0x{:x}", block_number); - let request = JsonRpcRequest { - jsonrpc: "2.0", - method: "debug_executionWitness", - params: (block_num_hex,), - id: 1, - }; - - let response = self - .http_client - .post(self.url.clone()) - .json(&request) - .send() - .await?; - - if !response.status().is_success() { - return Err(Error::Rpc { - code: response.status().as_u16() as i64, - message: response.text().await.unwrap_or_default(), - }); - } - - let rpc_response: JsonRpcResponse = response.json().await?; - - if let Some(error) = rpc_response.error { - return Err(Error::Rpc { - code: error.code, - message: error.message, - }); - } - - match rpc_response.result { - Some(witness) => { - let json_bytes = serde_json::to_vec(&witness)?; - let gzipped = compress_gzip(&json_bytes)?; - Ok(Some((witness, gzipped))) - } - None => Ok(None), - } - } -} - -/// Execution proof to submit to CL nodes. -#[derive(Debug, Clone, Serialize)] -pub struct ExecutionProof { - pub proof_id: u8, - pub slot: u64, - pub block_hash: String, - pub block_root: String, - pub proof_data: Vec, -} - -/// Consensus layer HTTP API client. -#[derive(Clone)] -pub struct ClClient { - url: Url, - http_client: reqwest::Client, -} - -/// Block response with execution payload. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockResponse { - pub data: BlockData, -} - -/// Block data. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockData { - pub message: BlockMessage, -} - -/// Block message. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockMessage { - pub body: BlockBody, -} - -/// Block body. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockBody { - pub execution_payload: Option, -} - -/// Execution payload (minimal fields). -#[derive(Debug, Clone, Deserialize)] -pub struct ExecutionPayload { - pub block_hash: String, -} - -/// Syncing status response. -#[derive(Debug, Clone, Deserialize)] -pub struct SyncingResponse { - pub data: SyncingData, -} - -/// Syncing status data. -#[derive(Debug, Clone, Deserialize)] -pub struct SyncingData { - pub head_slot: String, - pub is_syncing: bool, - pub is_optimistic: Option, -} - -/// Block header response. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockHeaderResponse { - pub data: BlockHeaderData, -} - -/// Block header data. -#[derive(Debug, Clone, Deserialize)] -pub struct BlockHeaderData { - pub root: String, -} - -/// Node identity response. -#[derive(Debug, Clone, Deserialize)] -pub struct IdentityResponse { - pub data: IdentityData, -} - -/// Node identity data. -#[derive(Debug, Clone, Deserialize)] -pub struct IdentityData { - pub enr: String, -} - -impl ClClient { - /// Create a new CL client. - pub fn new(url: Url) -> Self { - Self { - url, - http_client: reqwest::Client::new(), - } - } - - /// Get node syncing status. - pub async fn get_syncing(&self) -> Result { - let url = self.url.join("eth/v1/node/syncing")?; - let response = self.http_client.get(url).send().await?; - Ok(response.json().await?) - } - - /// Get block header for a slot. - pub async fn get_block_header(&self, slot: u64) -> Result> { - let url = self.url.join(&format!("eth/v1/beacon/headers/{}", slot))?; - let response = self.http_client.get(url).send().await?; - - if response.status() == reqwest::StatusCode::NOT_FOUND { - return Ok(None); - } - - Ok(Some(response.json().await?)) - } - - /// Submit an execution proof. - pub async fn submit_execution_proof(&self, proof: &ExecutionProof) -> Result<()> { - let url = self.url.join("eth/v1/beacon/pool/execution_proofs")?; - - let response = self.http_client.post(url).json(proof).send().await?; - - if !response.status().is_success() { - let status = response.status(); - let body = response.text().await.unwrap_or_default(); - return Err(Error::Rpc { - code: status.as_u16() as i64, - message: body, - }); - } - - Ok(()) - } - - /// Get node identity (including ENR). - pub async fn get_identity(&self) -> Result { - let url = self.url.join("eth/v1/node/identity")?; - let response = self.http_client.get(url).send().await?; - Ok(response.json().await?) - } - - /// Check if the node has zkvm enabled by inspecting its ENR. - pub async fn is_zkvm_enabled(&self) -> Result { - let identity = self.get_identity().await?; - Ok(enr_has_zkvm(&identity.data.enr)) - } - - /// Get the execution block hash for a beacon block. - pub async fn get_block_execution_hash(&self, block_root: &str) -> Result> { - let url = self - .url - .join(&format!("eth/v2/beacon/blocks/{}", block_root))?; - let response = self.http_client.get(url).send().await?; - - if response.status() == reqwest::StatusCode::NOT_FOUND { - return Ok(None); - } - - let block_response: BlockResponse = response.json().await?; - Ok(block_response - .data - .message - .body - .execution_payload - .map(|p| p.block_hash)) - } - - /// Get the current head slot. - pub async fn get_head_slot(&self) -> Result { - let syncing = self.get_syncing().await?; - syncing - .data - .head_slot - .parse() - .map_err(|e| Error::Config(format!("Invalid head slot: {}", e))) - } - - /// Get block info (slot, block_root, execution_block_hash) for a given slot. - /// Returns None if the slot is empty (no block). - pub async fn get_block_info(&self, slot: u64) -> Result> { - let url = self.url.join(&format!("eth/v2/beacon/blocks/{}", slot))?; - let response = self.http_client.get(url).send().await?; - - if response.status() == reqwest::StatusCode::NOT_FOUND { - return Ok(None); - } - - if !response.status().is_success() { - return Err(Error::Rpc { - code: response.status().as_u16() as i64, - message: response.text().await.unwrap_or_default(), - }); - } - - let block_response: BlockResponse = response.json().await?; - let execution_block_hash = block_response - .data - .message - .body - .execution_payload - .map(|p| p.block_hash); - - // Get the block root from headers endpoint - let header_url = self.url.join(&format!("eth/v1/beacon/headers/{}", slot))?; - let header_response = self.http_client.get(header_url).send().await?; - - if header_response.status() == reqwest::StatusCode::NOT_FOUND { - return Ok(None); - } - - let header: BlockHeaderResponse = header_response.json().await?; - - Ok(Some(BlockInfo { - slot, - block_root: header.data.root, - execution_block_hash, - })) - } -} - -/// Block info for backfill. -#[derive(Debug, Clone)] -pub struct BlockInfo { - pub slot: u64, - pub block_root: String, - pub execution_block_hash: Option, -} - -/// The ENR field specifying whether zkVM execution proofs are enabled. -const ZKVM_ENABLED_ENR_KEY: &str = "zkvm"; - -/// Check if an ENR string contains the zkvm flag. -fn enr_has_zkvm(enr_str: &str) -> bool { - use discv5::enr::{CombinedKey, Enr}; - use std::str::FromStr; - - match Enr::::from_str(enr_str) { - Ok(enr) => enr - .get_decodable::(ZKVM_ENABLED_ENR_KEY) - .and_then(|result| result.ok()) - .unwrap_or(false), - Err(_) => false, - } -} - -/// Generate random proof bytes. -pub fn generate_random_proof(proof_id: u32) -> Vec { - use std::time::{SystemTime, UNIX_EPOCH}; - let seed = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_nanos() as u64; - - let mut proof = vec![0u8; 32]; - for (i, byte) in proof.iter_mut().enumerate() { - *byte = ((seed >> (i % 8)) ^ (i as u64)) as u8; - } - proof[31] = proof_id as u8; - proof -} diff --git a/execution-witness-sentry/src/storage.rs b/execution-witness-sentry/src/storage.rs deleted file mode 100644 index 58a80d449d4..00000000000 --- a/execution-witness-sentry/src/storage.rs +++ /dev/null @@ -1,248 +0,0 @@ -//! Block data storage utilities. - -use std::io::{Read, Write}; -use std::path::{Path, PathBuf}; - -use alloy_rpc_types_eth::Block; -use flate2::Compression; -use flate2::read::GzDecoder; -use flate2::write::GzEncoder; -use serde::{Deserialize, Serialize}; - -use crate::error::Result; - -/// Metadata stored alongside block data. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct BlockMetadata { - /// EL block hash - pub block_hash: String, - /// EL block number - pub block_number: u64, - /// Gas used in the block - pub gas_used: u64, - /// CL slot number (if known) - #[serde(skip_serializing_if = "Option::is_none")] - pub slot: Option, - /// CL beacon block root (if known) - #[serde(skip_serializing_if = "Option::is_none")] - pub block_root: Option, - /// Number of proofs stored - #[serde(default)] - pub num_proofs: usize, -} - -/// A saved proof that can be loaded for backfill. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SavedProof { - pub proof_id: u8, - pub slot: u64, - pub block_hash: String, - pub block_root: String, - pub proof_data: Vec, -} - -/// Compress data using gzip. -pub fn compress_gzip(data: &[u8]) -> Result> { - let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(data)?; - Ok(encoder.finish()?) -} - -/// Decompress gzip data. -pub fn decompress_gzip(data: &[u8]) -> Result> { - let mut decoder = GzDecoder::new(data); - let mut decompressed = Vec::new(); - decoder.read_to_end(&mut decompressed)?; - Ok(decompressed) -} - -/// Load block data from a gzipped JSON file. -pub fn load_block_data(path: impl AsRef) -> Result { - let compressed = std::fs::read(path)?; - let decompressed = decompress_gzip(&compressed)?; - Ok(serde_json::from_slice(&decompressed)?) -} - -/// Manages block data storage on disk. -pub struct BlockStorage { - output_dir: PathBuf, - chain: String, - retain: Option, -} - -impl BlockStorage { - /// Create a new block storage manager. - pub fn new( - output_dir: impl Into, - chain: impl Into, - retain: Option, - ) -> Self { - Self { - output_dir: output_dir.into(), - chain: chain.into(), - retain, - } - } - - /// Get the directory path for a block number. - pub fn block_dir(&self, block_number: u64) -> PathBuf { - self.output_dir - .join(&self.chain) - .join(block_number.to_string()) - } - - /// Save block data to disk (without CL info - will be updated later). - pub fn save_block(&self, block: &Block, combined_data: &[u8]) -> Result<()> { - let block_number = block.header.number; - let block_hash = format!("{:?}", block.header.hash); - let gas_used = block.header.gas_used; - - let block_dir = self.block_dir(block_number); - std::fs::create_dir_all(&block_dir)?; - - // Write metadata (without CL info initially) - let metadata = BlockMetadata { - block_hash, - block_number, - gas_used, - slot: None, - block_root: None, - num_proofs: 0, - }; - let metadata_path = block_dir.join("metadata.json"); - std::fs::write(metadata_path, serde_json::to_string_pretty(&metadata)?)?; - - // Write combined block + witness data - let data_path = block_dir.join("data.json.gz"); - std::fs::write(data_path, combined_data)?; - - // Clean up old blocks if retention is configured - if let Some(retain) = self.retain - && block_number > retain - { - self.delete_old_block(block_number - retain)?; - } - - Ok(()) - } - - /// Save proofs and update metadata with CL info. - /// This is called when we receive CL head event with slot/block_root. - pub fn save_proofs( - &self, - block_number: u64, - slot: u64, - block_root: &str, - block_hash: &str, - proofs: &[SavedProof], - ) -> Result<()> { - let block_dir = self.block_dir(block_number); - - // Create dir if it doesn't exist (in case block wasn't saved yet) - std::fs::create_dir_all(&block_dir)?; - - // Load existing metadata or create new - let metadata_path = block_dir.join("metadata.json"); - let mut metadata = if metadata_path.exists() { - let content = std::fs::read_to_string(&metadata_path)?; - serde_json::from_str(&content)? - } else { - BlockMetadata { - block_hash: block_hash.to_string(), - block_number, - gas_used: 0, - slot: None, - block_root: None, - num_proofs: 0, - } - }; - - // Update with CL info - metadata.slot = Some(slot); - metadata.block_root = Some(block_root.to_string()); - metadata.num_proofs = proofs.len(); - - // Save updated metadata - std::fs::write(&metadata_path, serde_json::to_string_pretty(&metadata)?)?; - - // Save proofs - let proofs_path = block_dir.join("proofs.json"); - std::fs::write(&proofs_path, serde_json::to_string_pretty(&proofs)?)?; - - Ok(()) - } - - /// Load proofs for a given slot. - /// Searches for a block directory that has matching slot in metadata. - pub fn load_proofs_by_slot( - &self, - slot: u64, - ) -> Result)>> { - let chain_dir = self.output_dir.join(&self.chain); - if !chain_dir.exists() { - return Ok(None); - } - - // Iterate through block directories to find one with matching slot - for entry in std::fs::read_dir(&chain_dir)? { - let entry = entry?; - let block_dir = entry.path(); - - if !block_dir.is_dir() { - continue; - } - - let metadata_path = block_dir.join("metadata.json"); - if !metadata_path.exists() { - continue; - } - - let content = std::fs::read_to_string(&metadata_path)?; - let metadata: BlockMetadata = match serde_json::from_str(&content) { - Ok(m) => m, - Err(_) => continue, - }; - - if metadata.slot == Some(slot) { - // Found matching slot, load proofs - let proofs_path = block_dir.join("proofs.json"); - if proofs_path.exists() { - let proofs_content = std::fs::read_to_string(&proofs_path)?; - let proofs: Vec = serde_json::from_str(&proofs_content)?; - return Ok(Some((metadata, proofs))); - } else { - return Ok(Some((metadata, vec![]))); - } - } - } - - Ok(None) - } - - /// Load metadata for a given block number. - pub fn load_metadata(&self, block_number: u64) -> Result> { - let block_dir = self.block_dir(block_number); - let metadata_path = block_dir.join("metadata.json"); - - if !metadata_path.exists() { - return Ok(None); - } - - let content = std::fs::read_to_string(&metadata_path)?; - Ok(Some(serde_json::from_str(&content)?)) - } - - /// Delete an old block directory. - fn delete_old_block(&self, block_number: u64) -> Result<()> { - let old_dir = self.block_dir(block_number); - if old_dir.exists() { - std::fs::remove_dir_all(old_dir)?; - } - Ok(()) - } - - /// Get the chain directory path. - pub fn chain_dir(&self) -> PathBuf { - self.output_dir.join(&self.chain) - } -} diff --git a/execution-witness-sentry/src/subscription.rs b/execution-witness-sentry/src/subscription.rs deleted file mode 100644 index 3882561590b..00000000000 --- a/execution-witness-sentry/src/subscription.rs +++ /dev/null @@ -1,45 +0,0 @@ -//! WebSocket subscription for new block headers. - -use std::pin::Pin; -use std::task::{Context, Poll}; - -use alloy_provider::{Provider, ProviderBuilder, WsConnect}; -use alloy_rpc_types_eth::Header; -use futures::Stream; - -use crate::error::{Error, Result}; - -/// Subscription stream that keeps the provider alive. -pub struct BlockSubscription

{ - #[allow(dead_code)] - provider: P, - stream: Pin + Send>>, -} - -impl

Unpin for BlockSubscription

{} - -impl Stream for BlockSubscription

{ - type Item = Result

; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.stream.as_mut().poll_next(cx).map(|opt| opt.map(Ok)) - } -} - -/// Subscribe to new block headers via WebSocket. -pub async fn subscribe_blocks(ws_url: &str) -> Result> + Send> { - let ws = WsConnect::new(ws_url); - let provider = ProviderBuilder::new() - .connect_ws(ws) - .await - .map_err(|e| Error::WebSocket(format!("WebSocket connection failed: {}", e)))?; - - let subscription = provider - .subscribe_blocks() - .await - .map_err(|e| Error::WebSocket(format!("Block subscription failed: {}", e)))?; - - let stream = Box::pin(subscription.into_stream()); - - Ok(BlockSubscription { provider, stream }) -} diff --git a/execution-witness-sentry/tests/cl_subscription.rs b/execution-witness-sentry/tests/cl_subscription.rs deleted file mode 100644 index 40d86890eb0..00000000000 --- a/execution-witness-sentry/tests/cl_subscription.rs +++ /dev/null @@ -1,11 +0,0 @@ -use execution_witness_sentry::subscribe_cl_events; - -#[test] -fn subscribe_cl_events_accepts_base_url_without_trailing_slash() { - assert!(subscribe_cl_events("http://localhost:5052").is_ok()); -} - -#[test] -fn subscribe_cl_events_accepts_base_url_with_trailing_slash() { - assert!(subscribe_cl_events("http://localhost:5052/").is_ok()); -} From 6c94a844f7c4dd9cbba38c22c2d5650224a8009d Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 1 Jan 2026 03:40:58 +0000 Subject: [PATCH 63/67] update cargo.lock --- Cargo.lock | 354 ++--------------------------------------------------- 1 file changed, 9 insertions(+), 345 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 043126349da..5820034a16d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -366,14 +366,12 @@ dependencies = [ "alloy-network", "alloy-network-primitives", "alloy-primitives", - "alloy-pubsub", "alloy-rpc-client", "alloy-rpc-types-eth", "alloy-signer", "alloy-sol-types", "alloy-transport", "alloy-transport-http", - "alloy-transport-ws", "async-stream", "async-trait", "auto_impl", @@ -394,28 +392,6 @@ dependencies = [ "wasmtimer", ] -[[package]] -name = "alloy-pubsub" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd4c64eb250a18101d22ae622357c6b505e158e9165d4c7974d59082a600c5e" -dependencies = [ - "alloy-json-rpc", - "alloy-primitives", - "alloy-transport", - "auto_impl", - "bimap", - "futures", - "parking_lot", - "serde", - "serde_json", - "tokio", - "tokio-stream", - "tower 0.5.2", - "tracing", - "wasmtimer", -] - [[package]] name = "alloy-rlp" version = "0.3.12" @@ -446,10 +422,8 @@ checksum = "d0882e72d2c1c0c79dcf4ab60a67472d3f009a949f774d4c17d0bdb669cfde05" dependencies = [ "alloy-json-rpc", "alloy-primitives", - "alloy-pubsub", "alloy-transport", "alloy-transport-http", - "alloy-transport-ws", "futures", "pin-project", "reqwest", @@ -645,24 +619,6 @@ dependencies = [ "url", ] -[[package]] -name = "alloy-transport-ws" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad2344a12398d7105e3722c9b7a7044ea837128e11d453604dec6e3731a86e2" -dependencies = [ - "alloy-pubsub", - "alloy-transport", - "futures", - "http 1.3.1", - "rustls 0.23.35", - "serde_json", - "tokio", - "tokio-tungstenite", - "tracing", - "ws_stream_wasm", -] - [[package]] name = "alloy-trie" version = "0.9.1" @@ -1125,17 +1081,6 @@ dependencies = [ "syn 2.0.110", ] -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version 0.4.1", -] - [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -1428,12 +1373,6 @@ dependencies = [ "types", ] -[[package]] -name = "bimap" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" - [[package]] name = "bincode" version = "1.3.3" @@ -3437,22 +3376,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "eventsource-client" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43ddc25e1ad2cc0106d5e2d967397b4fb2068a66677ee9b0eea4600e5cfe8fb4" -dependencies = [ - "futures", - "hyper 0.14.32", - "hyper-rustls 0.24.2", - "hyper-timeout 0.4.1", - "log", - "pin-project", - "rand 0.8.5", - "tokio", -] - [[package]] name = "eventsource-stream" version = "0.2.3" @@ -3464,29 +3387,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "execution-witness-sentry" -version = "0.1.0" -dependencies = [ - "alloy-provider", - "alloy-rpc-types-eth", - "anyhow", - "clap", - "discv5", - "eventsource-client", - "flate2", - "futures", - "reqwest", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "toml", - "tracing", - "tracing-subscriber", - "url", -] - [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -4504,22 +4404,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", - "log", - "rustls 0.21.12", - "rustls-native-certs 0.6.3", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" version = "0.27.7" @@ -4534,19 +4418,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.4", "tower-service", - "webpki-roots 1.0.4", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper 0.14.32", - "pin-project-lite", - "tokio", - "tokio-io-timeout", + "webpki-roots", ] [[package]] @@ -6979,16 +6851,6 @@ dependencies = [ "ucd-trie", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version 0.4.1", -] - [[package]] name = "pin-project" version = "1.1.10" @@ -7199,7 +7061,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit", ] [[package]] @@ -7711,7 +7573,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-rustls 0.27.7", + "hyper-rustls", "hyper-tls", "hyper-util", "js-sys", @@ -7738,7 +7600,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.4", + "webpki-roots", ] [[package]] @@ -7985,18 +7847,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.22.4" @@ -8026,18 +7876,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.4", - "schannel", - "security-framework 2.11.1", -] - [[package]] name = "rustls-native-certs" version = "0.8.2" @@ -8050,15 +7888,6 @@ dependencies = [ "security-framework 3.5.1", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -8078,16 +7907,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.8" @@ -8235,16 +8054,6 @@ dependencies = [ "sha2 0.9.9", ] -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sec1" version = "0.7.3" @@ -8345,12 +8154,6 @@ dependencies = [ "pest", ] -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - [[package]] name = "sensitive_url" version = "0.1.0" @@ -8435,15 +8238,6 @@ dependencies = [ "syn 2.0.110", ] -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -9379,16 +9173,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd86198d9ee903fedd2f9a2e72014287c0d9167e4ae43b5853007205dda1b76" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.6.0" @@ -9410,16 +9194,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.25.0" @@ -9453,22 +9227,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "tokio-tungstenite" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" -dependencies = [ - "futures-util", - "log", - "rustls 0.23.35", - "rustls-pki-types", - "tokio", - "tokio-rustls 0.26.4", - "tungstenite", - "webpki-roots 0.26.11", -] - [[package]] name = "tokio-util" version = "0.7.17" @@ -9484,27 +9242,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - [[package]] name = "toml_datetime" version = "0.7.3" @@ -9514,20 +9251,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap 2.12.0", - "serde", - "serde_spanned", - "toml_datetime 0.6.11", - "toml_write", - "winnow", -] - [[package]] name = "toml_edit" version = "0.23.7" @@ -9535,7 +9258,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ "indexmap 2.12.0", - "toml_datetime 0.7.3", + "toml_datetime", "toml_parser", "winnow", ] @@ -9549,12 +9272,6 @@ dependencies = [ "winnow", ] -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - [[package]] name = "tonic" version = "0.12.3" @@ -9571,7 +9288,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-timeout 0.5.2", + "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", @@ -9598,12 +9315,12 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-timeout 0.5.2", + "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "prost", - "rustls-native-certs 0.8.2", + "rustls-native-certs", "tokio", "tokio-rustls 0.26.4", "tokio-stream", @@ -9838,25 +9555,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "tungstenite" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" -dependencies = [ - "bytes", - "data-encoding", - "http 1.3.1", - "httparse", - "log", - "rand 0.9.2", - "rustls 0.23.35", - "rustls-pki-types", - "sha1", - "thiserror 2.0.17", - "utf-8", -] - [[package]] name = "typenum" version = "1.19.0" @@ -10036,12 +9734,6 @@ dependencies = [ "serde", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -10351,7 +10043,7 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", @@ -10532,15 +10224,6 @@ dependencies = [ "zip", ] -[[package]] -name = "webpki-roots" -version = "0.26.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" -dependencies = [ - "webpki-roots 1.0.4", -] - [[package]] name = "webpki-roots" version = "1.0.4" @@ -11041,25 +10724,6 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" -[[package]] -name = "ws_stream_wasm" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version 0.4.1", - "send_wrapper", - "thiserror 2.0.17", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wyz" version = "0.5.1" From 6a1883699ac457f473f5ee108673251b801cb755 Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 1 Jan 2026 04:31:59 +0000 Subject: [PATCH 64/67] cache proof even before block arrives --- .../src/data_availability_checker.rs | 65 +++++++------- .../overflow_lru_cache.rs | 89 +++++++++++++++++-- 2 files changed, 118 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 8359de354d9..bc658d12a44 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -406,43 +406,46 @@ impl DataAvailabilityChecker { .put_verified_execution_proofs(block_root, owned_proofs); }; - // Get the execution payload hash from the block - let execution_payload_hash = self - .availability_cache - .peek_pending_components(&block_root, |components| { - components.and_then(|c| c.block.as_ref().and_then(|b| b.execution_payload_hash())) - }) - .ok_or_else(|| { - warn!( - ?block_root, - "Cannot verify proofs: block not in cache or has no execution payload" - ); - AvailabilityCheckError::MissingExecutionPayload - })?; + // Get the execution payload hash from the block, if it is already cached. + let execution_payload_hash = + self.availability_cache + .peek_pending_components(&block_root, |components| { + components + .and_then(|c| c.block.as_ref().and_then(|b| b.execution_payload_hash())) + }); - debug!( - ?block_root, - ?execution_payload_hash, - "Got execution payload hash for proof verification" - ); + if let Some(execution_payload_hash) = execution_payload_hash { + debug!( + ?block_root, + ?execution_payload_hash, + "Got execution payload hash for proof verification" + ); + } else { + debug!( + ?block_root, + "Execution payload hash not available yet, deferring block hash check" + ); + } let mut verified_proofs = Vec::new(); for proof in proofs { let proof_id = proof.proof_id; - // Check that the proof's block_hash matches the execution payload hash - if proof.block_hash != execution_payload_hash { - warn!( - ?block_root, - ?proof_id, - proof_hash = ?proof.block_hash, - ?execution_payload_hash, - "Proof execution payload hash mismatch" - ); - return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { - proof_hash: proof.block_hash, - block_hash: execution_payload_hash, - }); + // If we have the block, check that the proof's block_hash matches the payload hash. + if let Some(execution_payload_hash) = execution_payload_hash { + if proof.block_hash != execution_payload_hash { + warn!( + ?block_root, + ?proof_id, + proof_hash = ?proof.block_hash, + ?execution_payload_hash, + "Proof execution payload hash mismatch" + ); + return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { + proof_hash: proof.block_hash, + block_hash: execution_payload_hash, + }); + } } let verifier = verifier_registry.get_verifier(proof_id).ok_or_else(|| { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index b5eba1c4a5f..398fc8759b6 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -227,20 +227,53 @@ impl PendingComponents { self.verified_execution_proofs.len() } + fn execution_payload_hash(&self) -> Option { + self.block + .as_ref() + .and_then(|block| block.execution_payload_hash()) + } + + fn retain_matching_execution_proofs(&mut self) { + let Some(expected_hash) = self.execution_payload_hash() else { + return; + }; + + let before = self.verified_execution_proofs.len(); + self.verified_execution_proofs + .retain(|proof| proof.block_hash == expected_hash); + let after = self.verified_execution_proofs.len(); + if before != after { + debug!( + ?expected_hash, + dropped = before - after, + "Dropped execution proofs with mismatched payload hash" + ); + } + } + /// Merges a single execution proof into the cache. /// /// Proofs are only inserted if: /// 1. We don't already have a proof from this subnet for this block - /// 2. The proof's block_hash matches the cached block_root (if block exists) + /// 2. The proof's block_hash matches the cached block payload hash (if block exists) pub fn merge_execution_proof(&mut self, proof: types::ExecutionProof) { - // Verify the proof is for the correct block - // ExecutionBlockHash is a wrapper around Hash256, so we need to convert - // Don't insert duplicate proofs if self.has_proof_with_id(proof.proof_id) { return; } + if let Some(expected_hash) = self.execution_payload_hash() { + if proof.block_hash != expected_hash { + debug!( + ?expected_hash, + proof_hash = ?proof.block_hash, + proof_id = ?proof.proof_id, + "Execution proof payload hash mismatch" + ); + return; + } + } + self.verified_execution_proofs.push(proof); } @@ -259,6 +292,7 @@ impl PendingComponents { /// Blobs that don't match the new block's commitments are evicted. pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { self.insert_executed_block(block); + self.retain_matching_execution_proofs(); let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); } @@ -1414,7 +1448,10 @@ mod pending_components_tests { use rand::rngs::StdRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; - use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; + use types::{ + BeaconState, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkName, + MainnetEthSpec, SignedBeaconBlock, Slot, + }; type E = MainnetEthSpec; @@ -1609,6 +1646,48 @@ mod pending_components_tests { assert_cache_consistent(cache, max_len); } + #[test] + fn execution_proofs_filtered_on_block_merge() { + let (block, blobs, invalid_blobs, max_len) = pre_setup(); + let block_root = block.canonical_root(); + let slot = block.slot(); + let payload_hash = block + .message() + .body() + .execution_payload() + .expect("block has execution payload") + .execution_payload_ref() + .block_hash(); + + let proof_id_0 = ExecutionProofId::new(0).expect("proof id 0 is valid"); + let proof_id_1 = ExecutionProofId::new(1).expect("proof id 1 is valid"); + + let proof_ok = + ExecutionProof::new(proof_id_0, slot, payload_hash, block_root, vec![1, 2, 3]) + .expect("valid proof"); + let proof_bad = ExecutionProof::new( + proof_id_1, + slot, + ExecutionBlockHash::repeat_byte(42), + block_root, + vec![4, 5, 6], + ) + .expect("valid proof with mismatched hash"); + + let (pending_block, _, _) = setup_pending_components(block, blobs, invalid_blobs); + let mut cache = >::empty(block_root, max_len); + + cache.merge_execution_proofs(vec![proof_ok, proof_bad]); + assert_eq!(cache.execution_proof_subnet_count(), 2); + + cache.merge_block(pending_block); + assert_eq!(cache.execution_proof_subnet_count(), 1); + assert_eq!( + cache.get_cached_execution_proofs()[0].block_hash, + payload_hash + ); + } + #[test] fn should_not_insert_pre_execution_block_if_executed_block_exists() { let (pre_execution_block, blobs, random_blobs, max_len) = pre_setup(); From 93766c6e29905c20577610c9885a36e76fd29d9f Mon Sep 17 00:00:00 2001 From: Kevaundray Wedderburn Date: Thu, 1 Jan 2026 05:21:33 +0000 Subject: [PATCH 65/67] lint --- .../src/data_availability_checker.rs | 28 +++++++++---------- .../overflow_lru_cache.rs | 20 ++++++------- zkvm_execution_layer/src/bin/dummy-prover.rs | 9 ++---- 3 files changed, 27 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index bc658d12a44..90c5b836286 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -432,20 +432,20 @@ impl DataAvailabilityChecker { let proof_id = proof.proof_id; // If we have the block, check that the proof's block_hash matches the payload hash. - if let Some(execution_payload_hash) = execution_payload_hash { - if proof.block_hash != execution_payload_hash { - warn!( - ?block_root, - ?proof_id, - proof_hash = ?proof.block_hash, - ?execution_payload_hash, - "Proof execution payload hash mismatch" - ); - return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { - proof_hash: proof.block_hash, - block_hash: execution_payload_hash, - }); - } + if let Some(execution_payload_hash) = execution_payload_hash + && proof.block_hash != execution_payload_hash + { + warn!( + ?block_root, + ?proof_id, + proof_hash = ?proof.block_hash, + ?execution_payload_hash, + "Proof execution payload hash mismatch" + ); + return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { + proof_hash: proof.block_hash, + block_hash: execution_payload_hash, + }); } let verifier = verifier_registry.get_verifier(proof_id).ok_or_else(|| { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 398fc8759b6..90e9c6b3b03 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -262,16 +262,16 @@ impl PendingComponents { return; } - if let Some(expected_hash) = self.execution_payload_hash() { - if proof.block_hash != expected_hash { - debug!( - ?expected_hash, - proof_hash = ?proof.block_hash, - proof_id = ?proof.proof_id, - "Execution proof payload hash mismatch" - ); - return; - } + if let Some(expected_hash) = self.execution_payload_hash() + && proof.block_hash != expected_hash + { + debug!( + ?expected_hash, + proof_hash = ?proof.block_hash, + proof_id = ?proof.proof_id, + "Execution proof payload hash mismatch" + ); + return; } self.verified_execution_proofs.push(proof); diff --git a/zkvm_execution_layer/src/bin/dummy-prover.rs b/zkvm_execution_layer/src/bin/dummy-prover.rs index aa35c8ade7c..32e0e0124f3 100644 --- a/zkvm_execution_layer/src/bin/dummy-prover.rs +++ b/zkvm_execution_layer/src/bin/dummy-prover.rs @@ -185,12 +185,9 @@ impl Prover { } } - let payload = match block.message().body().execution_payload() { - Ok(payload) => payload, - Err(_) => { - debug!(?block_id, ?slot, "Block has no execution payload"); - return None; - } + let Ok(payload) = block.message().body().execution_payload() else { + debug!(?block_id, ?slot, "Block has no execution payload"); + return None; }; Some(BlockProofInputs { From d53598382860a3ecf6b487a58b6305ba7be307b5 Mon Sep 17 00:00:00 2001 From: Han Date: Thu, 5 Feb 2026 23:35:55 +0900 Subject: [PATCH 66/67] Merge pull request #13 from han0110/optional-proofs Recompute head and nofity block is imported --- .github/workflows/docker.yml | 59 +++++++++++-------------- beacon_node/http_api/src/beacon/pool.rs | 29 +++++++++--- beacon_node/http_api/src/lib.rs | 19 +++++++- beacon_node/http_api/src/utils.rs | 4 +- beacon_node/network/src/lib.rs | 1 + beacon_node/network/src/router.rs | 5 +-- beacon_node/network/src/service.rs | 13 ++++++ 7 files changed, 86 insertions(+), 44 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 415f4db0e67..de17cc00e77 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,6 +3,7 @@ name: docker on: push: branches: + - optional-proofs - unstable - stable tags: @@ -13,10 +14,9 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DH_KEY }} - DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} + REGISTRY: ghcr.io jobs: # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX @@ -38,6 +38,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if optional-proofs) + if: github.event.ref == 'refs/heads/optional-proofs' + run: | + echo "VERSION=latest" >> $GITHUB_ENV + echo "VERSION_SUFFIX=-optional-proofs" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | @@ -52,8 +57,7 @@ jobs: runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: matrix: - binary: [lighthouse, - lcli] + binary: [lighthouse] cpu_arch: [aarch64, x86_64] include: @@ -68,9 +72,12 @@ jobs: - name: Update Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Sets env vars for Lighthouse if: startsWith(matrix.binary, 'lighthouse') @@ -82,11 +89,6 @@ jobs: run: | echo "MAKE_CMD=build-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - - name: Set `make` command for lcli - if: startsWith(matrix.binary, 'lcli') - run: | - echo "MAKE_CMD=build-lcli-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - - name: Cross build binaries run: | cargo install cross @@ -123,28 +125,14 @@ jobs: platforms: linux/${{ env.SHORT_ARCH }} push: true tags: | - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - - - name: Build and push (lcli) - if: startsWith(matrix.binary, 'lcli') - uses: docker/build-push-action@v5 - with: - file: ./lcli/Dockerfile.cross - context: . - platforms: linux/${{ env.SHORT_ARCH }} - push: true - - tags: | - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} build-docker-multiarch: name: build-docker-${{ matrix.binary }}-multiarch runs-on: ubuntu-22.04 strategy: matrix: - binary: [lighthouse, - lcli] + binary: [lighthouse] needs: [build-docker-single-arch, extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} @@ -153,13 +141,16 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Create and push multiarch manifests run: | - docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ - ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ - ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + docker buildx imagetools create -t ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs index 50a257db01b..3c4021fa611 100644 --- a/beacon_node/http_api/src/beacon/pool.rs +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -1,5 +1,7 @@ use crate::task_spawner::{Priority, TaskSpawner}; -use crate::utils::{NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter}; +use crate::utils::{ + NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter, SyncTxFilter, +}; use crate::version::{ ResponseIncludesVersion, V1, V2, add_consensus_version_header, beacon_response, unsupported_version_rejection, @@ -10,10 +12,10 @@ use beacon_chain::execution_proof_verification::{ }; use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::observed_operations::ObservationOutcome; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; use lighthouse_network::PubsubMessage; -use network::NetworkMessage; +use network::{NetworkMessage, SyncMessage}; use operation_pool::ReceivedPreCapella; use slot_clock::SlotClock; use std::collections::HashSet; @@ -533,6 +535,7 @@ pub fn post_beacon_pool_attestations_v2( /// If the proof makes a block available, the block will be imported. pub fn post_beacon_pool_execution_proofs( network_tx_filter: &NetworkTxFilter, + sync_tx_filter: &SyncTxFilter, beacon_pool_path: &BeaconPoolPathFilter, ) -> ResponseFilter { beacon_pool_path @@ -541,12 +544,15 @@ pub fn post_beacon_pool_execution_proofs( .and(warp::path::end()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) + .and(sync_tx_filter.clone()) .then( |_task_spawner: TaskSpawner, chain: Arc>, proof: ExecutionProof, - network_tx: UnboundedSender>| async move { - let result = publish_execution_proof(chain, proof, network_tx).await; + network_tx_filter: UnboundedSender>, + sync_tx_filter: UnboundedSender>| async move { + let result = + publish_execution_proof(chain, proof, network_tx_filter, sync_tx_filter).await; convert_rejection(result.map(|()| warp::reply::json(&()))).await }, ) @@ -558,6 +564,7 @@ async fn publish_execution_proof( chain: Arc>, proof: ExecutionProof, network_tx: UnboundedSender>, + sync_tx: UnboundedSender>, ) -> Result<(), warp::Rejection> { let proof = Arc::new(proof); @@ -614,6 +621,18 @@ async fn publish_execution_proof( ?status, "Execution proof submitted and published" ); + + if let AvailabilityProcessingStatus::Imported(_) = status { + chain.recompute_head_at_current_slot().await; + + // Notify that block was imported via HTTP API + if let Err(e) = sync_tx.send(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }) { + debug!(error = %e, "Could not send message to the sync service") + }; + } } Err(e) => { // Log the error but don't fail the request - the proof was already diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 01c5314b1de..2395a5235c4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -447,6 +447,23 @@ pub fn serve( }) .boxed(); + // Create a `warp` filter that provides access to the sync sender channel. + let sync_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.sync_send()); + let sync_tx_filter = warp::any() + .map(move || sync_tx.clone()) + .and_then(|sync_tx| async move { + match sync_tx { + Some(sync_tx) => Ok(sync_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (sync_tx).".to_string(), + )), + } + }) + .boxed(); + // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = warp::any() @@ -1515,7 +1532,7 @@ pub fn serve( // POST beacon/pool/execution_proofs let post_beacon_pool_execution_proofs = - post_beacon_pool_execution_proofs(&network_tx_filter, &beacon_pool_path); + post_beacon_pool_execution_proofs(&network_tx_filter, &sync_tx_filter, &beacon_pool_path); let beacon_rewards_path = eth_v1 .clone() diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs index f2b859ebe59..4dfba8a8636 100644 --- a/beacon_node/http_api/src/utils.rs +++ b/beacon_node/http_api/src/utils.rs @@ -3,7 +3,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::EndpointVersion; use lighthouse_network::PubsubMessage; use lighthouse_network::rpc::methods::MetaData; -use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use network::{NetworkMessage, SyncMessage, ValidatorSubscriptionMessage}; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::mpsc::{Sender, UnboundedSender}; @@ -20,6 +20,8 @@ pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner< pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; pub type NetworkTxFilter = BoxedFilter<(UnboundedSender::EthSpec>>,)>; +pub type SyncTxFilter = + BoxedFilter<(UnboundedSender::EthSpec>>,)>; pub type OptionalConsensusVersionHeaderFilter = BoxedFilter<(Option,)>; pub fn from_meta_data( diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 2a7fedb53e9..d6b4303d04f 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -14,3 +14,4 @@ pub use lighthouse_network::NetworkConfig; pub use service::{ NetworkMessage, NetworkReceivers, NetworkSenders, NetworkService, ValidatorSubscriptionMessage, }; +pub use sync::manager::SyncMessage; diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index f5bf65c9777..6ccfb55ddee 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -88,14 +88,13 @@ impl Router { invalid_block_storage: InvalidBlockStorage, beacon_processor_send: BeaconProcessorSend, fork_context: Arc, + sync_send: mpsc::UnboundedSender>, + sync_recv: mpsc::UnboundedReceiver>, ) -> Result>, String> { trace!("Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // generate the message channel - let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let network_beacon_processor = NetworkBeaconProcessor { beacon_processor_send, duplicate_cache: DuplicateCache::default(), diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0869b442aec..dcb1fd5a507 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,6 +5,7 @@ use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; +use crate::sync::manager::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::BeaconProcessorSend; use futures::channel::mpsc::Sender; @@ -138,11 +139,13 @@ pub enum ValidatorSubscriptionMessage { pub struct NetworkSenders { network_send: mpsc::UnboundedSender>, validator_subscription_send: mpsc::Sender, + sync_send: mpsc::UnboundedSender>, } pub struct NetworkReceivers { pub network_recv: mpsc::UnboundedReceiver>, pub validator_subscription_recv: mpsc::Receiver, + pub sync_recv: mpsc::UnboundedReceiver>, } impl NetworkSenders { @@ -150,13 +153,16 @@ impl NetworkSenders { let (network_send, network_recv) = mpsc::unbounded_channel::>(); let (validator_subscription_send, validator_subscription_recv) = mpsc::channel(VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE); + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); let senders = Self { network_send, validator_subscription_send, + sync_send, }; let receivers = NetworkReceivers { network_recv, validator_subscription_recv, + sync_recv, }; (senders, receivers) } @@ -168,6 +174,10 @@ impl NetworkSenders { pub fn validator_subscription_send(&self) -> mpsc::Sender { self.validator_subscription_send.clone() } + + pub fn sync_send(&self) -> mpsc::UnboundedSender> { + self.sync_send.clone() + } } /// Service that handles communication between internal services and the `lighthouse_network` network service. @@ -320,6 +330,8 @@ impl NetworkService { invalid_block_storage, beacon_processor_send, fork_context.clone(), + network_senders.sync_send(), + network_receivers.sync_recv, )?; // attestation and sync committee subnet service @@ -338,6 +350,7 @@ impl NetworkService { let NetworkReceivers { network_recv, validator_subscription_recv, + sync_recv: _, } = network_receivers; // create the network service and spawn the task From 38a0a89684c5ae4af346d52bbc1b807cc678c38d Mon Sep 17 00:00:00 2001 From: Han Date: Fri, 6 Feb 2026 01:07:48 +0900 Subject: [PATCH 67/67] ci: give permission to publish image (#14) --- .github/workflows/docker.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index de17cc00e77..5b776bcabab 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -53,6 +53,9 @@ jobs: VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: name: build-docker-${{ matrix.binary }}-${{ matrix.cpu_arch }}${{ matrix.features.version_suffix }} + permissions: + contents: read + packages: write # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: @@ -129,6 +132,9 @@ jobs: build-docker-multiarch: name: build-docker-${{ matrix.binary }}-multiarch + permissions: + contents: read + packages: write runs-on: ubuntu-22.04 strategy: matrix: