diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 415f4db0e67..5b776bcabab 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,6 +3,7 @@ name: docker on: push: branches: + - optional-proofs - unstable - stable tags: @@ -13,10 +14,9 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DH_KEY }} - DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} + REGISTRY: ghcr.io jobs: # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX @@ -38,6 +38,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if optional-proofs) + if: github.event.ref == 'refs/heads/optional-proofs' + run: | + echo "VERSION=latest" >> $GITHUB_ENV + echo "VERSION_SUFFIX=-optional-proofs" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | @@ -48,12 +53,14 @@ jobs: VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }} build-docker-single-arch: name: build-docker-${{ matrix.binary }}-${{ matrix.cpu_arch }}${{ matrix.features.version_suffix }} + permissions: + contents: read + packages: write # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: matrix: - binary: [lighthouse, - lcli] + binary: [lighthouse] cpu_arch: [aarch64, x86_64] include: @@ -68,9 +75,12 @@ jobs: - name: Update Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Sets env vars for Lighthouse if: startsWith(matrix.binary, 'lighthouse') @@ -82,11 +92,6 @@ jobs: run: | echo "MAKE_CMD=build-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - - name: Set `make` command for lcli - if: startsWith(matrix.binary, 'lcli') - run: | - echo "MAKE_CMD=build-lcli-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - - name: Cross build binaries run: | cargo install cross @@ -123,28 +128,17 @@ jobs: platforms: linux/${{ env.SHORT_ARCH }} push: true tags: | - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - - - name: Build and push (lcli) - if: startsWith(matrix.binary, 'lcli') - uses: docker/build-push-action@v5 - with: - file: ./lcli/Dockerfile.cross - context: . - platforms: linux/${{ env.SHORT_ARCH }} - push: true - - tags: | - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} build-docker-multiarch: name: build-docker-${{ matrix.binary }}-multiarch + permissions: + contents: read + packages: write runs-on: ubuntu-22.04 strategy: matrix: - binary: [lighthouse, - lcli] + binary: [lighthouse] needs: [build-docker-single-arch, extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} @@ -153,13 +147,16 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Create and push multiarch manifests run: | - docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ - ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ - ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + docker buildx imagetools create -t ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; diff --git a/Cargo.lock b/Cargo.lock index 6ed7bfd0b60..5820034a16d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -698,7 +698,7 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -709,7 +709,7 @@ checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -1142,6 +1142,8 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", + "hyper 1.8.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -1150,10 +1152,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper", + "tokio", "tower 0.5.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -1174,6 +1181,7 @@ dependencies = [ "sync_wrapper", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -1284,6 +1292,7 @@ dependencies = [ "tree_hash_derive", "typenum", "types", + "zkvm_execution_layer", "zstd 0.13.3", ] @@ -1318,6 +1327,7 @@ dependencies = [ "task_executor", "tracing", "types", + "zkvm_execution_layer", ] [[package]] @@ -1937,6 +1947,7 @@ dependencies = [ "tracing", "tracing-subscriber", "types", + "zkvm_execution_layer", ] [[package]] @@ -2790,6 +2801,22 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" +[[package]] +name = "dummy_el" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "clap", + "hex", + "jsonwebtoken", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "dunce" version = "1.0.5" @@ -3132,7 +3159,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -4274,6 +4301,7 @@ dependencies = [ "serde", "serde_json", "slot_clock", + "ssz_types", "state_processing", "store", "sysinfo", @@ -4776,7 +4804,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6369,7 +6397,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -7670,9 +7698,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.17.0" +version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" +checksum = "c141e807189ad38a07276942c6623032d3753c8859c146104ac2e4d68865945a" dependencies = [ "alloy-rlp", "arbitrary", @@ -7816,7 +7844,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -8189,6 +8217,16 @@ dependencies = [ "serde_core", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_repr" version = "0.1.20" @@ -8896,7 +8934,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -10241,7 +10279,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.48.0", ] [[package]] @@ -10942,6 +10980,27 @@ dependencies = [ "zstd 0.11.2+zstd.1.5.2", ] +[[package]] +name = "zkvm_execution_layer" +version = "0.1.0" +dependencies = [ + "async-trait", + "clap", + "eth2", + "execution_layer", + "fixed_bytes", + "futures", + "hashbrown 0.15.5", + "lru 0.12.5", + "sensitive_url", + "serde", + "thiserror 2.0.17", + "tokio", + "tracing", + "tracing-subscriber", + "types", +] + [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" diff --git a/Cargo.toml b/Cargo.toml index d5d1687c764..ba2316bb034 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ members = [ "crypto/eth2_wallet", "crypto/kzg", "database_manager", + "dummy_el", "lcli", "lighthouse", "lighthouse/environment", @@ -86,6 +87,7 @@ members = [ "validator_client/validator_metrics", "validator_client/validator_services", "validator_manager", + "zkvm_execution_layer", ] resolver = "2" diff --git a/Dockerfile b/Dockerfile index 8cc20ab000f..5ed4a7dd9c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,4 +21,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5352814dd5d..0777216d172 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -44,6 +44,8 @@ strum = { workspace = true } task_executor = { workspace = true } tracing = { workspace = true } types = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../zkvm_execution_layer" } [dev-dependencies] node_test_rig = { path = "../testing/node_test_rig" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 734cfdf32bb..eef64e1d9ad 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -69,6 +69,8 @@ tree_hash = { workspace = true } tree_hash_derive = { workspace = true } typenum = { workspace = true } types = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } zstd = { workspace = true } [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 46ba14f596b..d6990894018 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -31,6 +31,9 @@ use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; +use crate::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiSettings}; @@ -55,6 +58,7 @@ use crate::observed_attesters::{ }; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; +use crate::observed_execution_proofs::ObservedExecutionProofs; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::PersistedBeaconChain; @@ -417,6 +421,8 @@ pub struct BeaconChain { pub observed_blob_sidecars: RwLock>>, /// Maintains a record of column sidecars seen over the gossip network. pub observed_column_sidecars: RwLock>>, + /// Maintains a record of execution proofs seen over the gossip network. + pub observed_execution_proofs: RwLock, /// Maintains a record of slashable message seen over the gossip network or RPC. pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. @@ -2211,6 +2217,15 @@ impl BeaconChain { }) } + #[instrument(skip_all, level = "trace")] + pub fn verify_execution_proof_for_gossip( + self: &Arc, + execution_proof: Arc, + ) -> Result, GossipExecutionProofError> { + // TODO(zkproofs): Add metrics + GossipVerifiedExecutionProof::new(execution_proof, self) + } + #[instrument(skip_all, level = "trace")] pub fn verify_blob_sidecar_for_gossip( self: &Arc, @@ -3048,6 +3063,33 @@ impl BeaconChain { self.check_gossip_blob_availability_and_import(blob).await } + /// Process a gossip-verified execution proof by storing it in the DA checker. + /// + /// This method takes an execution proof that has already been validated via gossip + /// and stores it in the DataAvailabilityChecker. If all components for a block are + /// now available, the block will be imported to fork choice. + #[instrument(skip_all, level = "debug")] + pub async fn process_gossip_execution_proof( + self: &Arc, + execution_proof: GossipVerifiedExecutionProof, + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { + let block_root = execution_proof.block_root(); + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its execution proofs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + self.check_gossip_execution_proof_availability_and_import(execution_proof, publish_fn) + .await + } + /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was /// imported or errors. #[instrument(skip_all, level = "debug")] @@ -3131,6 +3173,45 @@ impl BeaconChain { .await } + /// Process execution proofs retrieved via RPC and returns the `AvailabilityProcessingStatus`. + /// + /// This method handles execution proofs received from peers during block sync. The proofs + /// are verified and stored in the data availability checker. If all required components + /// (block, blobs/columns, and proofs) are available, the block is imported into fork choice. + pub async fn process_rpc_execution_proofs( + self: &Arc, + slot: Slot, + block_root: Hash256, + execution_proofs: Vec>, + ) -> Result { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its execution proofs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + // Validate that all proofs are for the expected block_root + for proof in &execution_proofs { + if proof.block_root != block_root { + return Err(BlockError::AvailabilityCheck( + AvailabilityCheckError::Unexpected(format!( + "Proof block_root mismatch: expected {}, got {}", + block_root, proof.block_root + )), + )); + } + } + + // TODO(zkproofs): We can emit SSE events for execution proofs yet + + self.check_rpc_execution_proof_availability_and_import(slot, block_root, execution_proofs) + .await + } + /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. pub async fn process_engine_blobs( self: &Arc, @@ -3573,6 +3654,30 @@ impl BeaconChain { .await } + /// Checks if the provided execution proof can make any cached blocks available, and imports + /// immediately if so, otherwise caches the proof in the data availability checker. + async fn check_gossip_execution_proof_availability_and_import( + self: &Arc, + execution_proof: GossipVerifiedExecutionProof, + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { + let block_root = execution_proof.block_root(); + let slot = execution_proof.slot(); + + // TODO(zkproofs): Can we avoid the clone + let proof_arc = execution_proof.into_inner(); + let proof = (*proof_arc).clone(); + + // Store the proof in the DA checker + let availability = self + .data_availability_checker + .put_verified_execution_proofs(block_root, std::iter::once(proof)) + .map_err(BlockError::AvailabilityCheck)?; + + self.process_availability(slot, availability, publish_fn) + .await + } + fn check_blob_header_signature_and_slashability<'a>( self: &Arc, block_root: Hash256, @@ -3677,6 +3782,28 @@ impl BeaconChain { .await } + /// Checks if the provided execution proofs can make any cached blocks available, and imports + /// immediately if so, otherwise caches the proofs in the data availability checker. + async fn check_rpc_execution_proof_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + execution_proofs: Vec>, + ) -> Result { + // TODO(zkproofs): For optional proofs, they are currently not signed + // so we can't add any slashability checks here. We also don't want this + // because it could cause issues where we slash a validator for giving us bad + // proofs, but for nodes that don't need proofs (most of the network), they will + // not see this slashing or care about. + + let availability = self + .data_availability_checker + .put_rpc_execution_proofs(block_root, execution_proofs)?; + + self.process_availability(slot, availability, || Ok(())) + .await + } + fn check_data_column_sidecar_header_signature_and_slashability<'a>( self: &Arc, block_root: Hash256, @@ -4022,6 +4149,24 @@ impl BeaconChain { // This prevents inconsistency between the two at the expense of concurrency. drop(fork_choice); + // Persist execution proofs to the database if zkvm is enabled and proofs are cached. + // This is done after the block is successfully stored so we don't lose proofs on cache eviction. + if let Some(proofs) = self + .data_availability_checker + .get_execution_proofs(&block_root) + && !proofs.is_empty() + { + let proofs_owned: Vec<_> = proofs.iter().map(|p| (**p).clone()).collect(); + if let Err(e) = self.store.put_execution_proofs(&block_root, &proofs_owned) { + // Log but don't fail block import - proofs can still be served from cache + warn!( + %block_root, + error = ?e, + "Failed to persist execution proofs to database" + ); + } + } + // We're declaring the block "imported" at this point, since fork choice and the DB know // about it. let block_time_imported = timestamp_now(); @@ -7352,6 +7497,34 @@ impl BeaconChain { && self.spec.is_peer_das_enabled_for_epoch(block_epoch) } + /// Returns true if epoch is within the execution proof retention boundary + pub fn execution_proof_check_required_for_epoch(&self, epoch: Epoch) -> bool { + self.data_availability_checker + .execution_proof_check_required_for_epoch(epoch) + } + + /// Returns true if we should fetch execution proofs for this block + pub fn should_fetch_execution_proofs(&self, block_epoch: Epoch) -> bool { + // Check if ZK-VM mode is enabled + if self.min_execution_proofs_required().is_none() { + return false; + } + + // Only fetch proofs within retention window + self.execution_proof_check_required_for_epoch(block_epoch) + } + + /// Returns the minimum number of execution proofs required + pub fn min_execution_proofs_required(&self) -> Option { + self.data_availability_checker + .min_execution_proofs_required() + } + + /// Returns the execution proof retention boundary epoch + pub fn execution_proof_boundary(&self) -> Option { + self.data_availability_checker.execution_proof_boundary() + } + /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. @@ -7451,6 +7624,59 @@ impl BeaconChain { .custody_context() .custody_columns_for_epoch(epoch_opt, &self.spec) } + + /// Returns a deterministic list of execution proof subnet IDs to request for a block in the given epoch. + /// + /// The selection is deterministic based on the epoch, ensuring all nodes request the same + /// subnets for blocks in the same epoch. Different epochs will result in different subnet + /// selections, providing rotation over time. + /// + /// # Arguments + /// * `epoch` - The epoch of the block + /// * `count` - Number of subnets to select (typically min_execution_proofs_required) + /// + /// # Returns + /// A vector of `count` subnet IDs, deterministically selected based on the epoch. + pub fn execution_proof_subnets_for_epoch( + &self, + epoch: Epoch, + count: usize, + ) -> Vec { + use types::EXECUTION_PROOF_TYPE_COUNT; + + let total_subnets = EXECUTION_PROOF_TYPE_COUNT as usize; + let count = std::cmp::min(count, total_subnets); + + if count == 0 { + return vec![]; + } + + // Use epoch as a deterministic seed + // Hash the epoch to get a pseudo-random but deterministic ordering + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + use std::hash::{Hash, Hasher}; + epoch.hash(&mut hasher); + let seed = hasher.finish(); + + // Create a deterministic permutation of subnet IDs based on the seed + let mut subnet_ids: Vec = (0..EXECUTION_PROOF_TYPE_COUNT).collect(); + + // Simple deterministic shuffle using the seed + // This is a Fisher-Yates shuffle variant using deterministic randomness + for i in (1..subnet_ids.len()).rev() { + // Use seed + i for deterministic pseudo-random index + let j = ((seed.wrapping_add(i as u64).wrapping_mul(2654435761)) % ((i + 1) as u64)) + as usize; + subnet_ids.swap(i, j); + } + + // Take the first `count` subnet IDs and convert to ExecutionProofId + subnet_ids + .into_iter() + .take(count) + .filter_map(|id| types::ExecutionProofId::new(id).ok()) + .collect() + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 58dbf1c35e8..feabcd5f44a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -13,6 +13,7 @@ use crate::kzg_utils::build_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; +use crate::observed_execution_proofs::ObservedExecutionProofs; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::load_custody_context; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -107,6 +108,12 @@ pub struct BeaconChainBuilder { node_custody_type: NodeCustodyType, ordered_custody_column_indices: Option>, rng: Option>, + /// ZK-VM execution layer configuration. + /// + /// TODO(zkproofs): When this is Some(_), the traditional ExecutionLayer should + /// be replaced with ZkVmEngineApi from zkvm_execution_layer. This would allow the + /// --execution-endpoint CLI flag to be optional when running in ZK-VM mode. + zkvm_execution_layer_config: Option, } impl @@ -147,6 +154,7 @@ where node_custody_type: NodeCustodyType::Fullnode, ordered_custody_column_indices: None, rng: None, + zkvm_execution_layer_config: None, } } @@ -652,6 +660,16 @@ where self } + /// Sets the ZK-VM execution layer configuration. + /// When set, enables ZK-VM execution proof verification mode. + pub fn zkvm_execution_layer_config( + mut self, + config: Option, + ) -> Self { + self.zkvm_execution_layer_config = config; + self + } + /// Sets the ordered custody column indices for this node. /// This is used to determine the data columns the node is required to custody. pub fn ordered_custody_column_indices( @@ -1007,6 +1025,7 @@ where observed_block_producers: <_>::default(), observed_column_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), observed_blob_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), + observed_execution_proofs: RwLock::new(ObservedExecutionProofs::default()), observed_slashable: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), @@ -1052,6 +1071,11 @@ where store, Arc::new(custody_context), self.spec, + // Create verifier registry if zkvm mode is enabled + // For now, we use dummy verifiers for all subnets + self.zkvm_execution_layer_config + .as_ref() + .map(|_| Arc::new(zkvm_execution_layer::registry_proof_verification::VerifierRegistry::new_with_dummy_verifiers())), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), @@ -1135,6 +1159,13 @@ where .process_prune_blobs(data_availability_boundary); } + // Prune execution proofs older than the execution proof boundary in the background. + if let Some(execution_proof_boundary) = beacon_chain.execution_proof_boundary() { + beacon_chain + .store_migrator + .process_prune_execution_proofs(execution_proof_boundary); + } + Ok(beacon_chain) } } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 7dd4c88c513..17dc227430b 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -951,6 +951,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_execution_proofs.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.observed_slashable.write().prune( new_view .finalized_checkpoint @@ -1027,6 +1034,12 @@ impl BeaconChain { .process_prune_blobs(data_availability_boundary); } + // Prune execution proofs in the background. + if let Some(execution_proof_boundary) = self.execution_proof_boundary() { + self.store_migrator + .process_prune_execution_proofs(execution_proof_boundary); + } + // Take a write-lock on the canonical head and signal for it to prune. self.canonical_head.fork_choice_write_lock().prune()?; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 3e859456b18..90c5b836286 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -18,12 +18,13 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, - EthSpec, Hash256, SignedBeaconBlock, Slot, + EthSpec, ExecutionProof, ExecutionProofId, Hash256, SignedBeaconBlock, Slot, }; +use zkvm_execution_layer::registry_proof_verification::VerifierRegistry; mod error; mod overflow_lru_cache; @@ -86,6 +87,8 @@ pub struct DataAvailabilityChecker { kzg: Arc, custody_context: Arc>, spec: Arc, + /// Registry of proof verifiers for different zkVM proof IDs. + verifier_registry: Option>, } pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); @@ -118,6 +121,7 @@ impl Debug for Availability { } impl DataAvailabilityChecker { + #[allow(clippy::too_many_arguments)] pub fn new( complete_blob_backfill: bool, slot_clock: T::SlotClock, @@ -125,6 +129,7 @@ impl DataAvailabilityChecker { store: BeaconStore, custody_context: Arc>, spec: Arc, + verifier_registry: Option>, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY_NON_ZERO, @@ -139,6 +144,7 @@ impl DataAvailabilityChecker { kzg, custody_context, spec, + verifier_registry, }) } @@ -169,6 +175,54 @@ impl DataAvailabilityChecker { }) } + /// Return the set of cached execution proof IDs for `block_root`. Returns None if there is + /// no block component for `block_root`. + pub fn cached_execution_proof_subnet_ids( + &self, + block_root: &Hash256, + ) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| proof.proof_id) + .collect::>() + }) + }) + } + + /// Get proof IDs we already have for a block. + /// Used when creating RPC requests to tell peers what we don't need. + pub fn get_existing_proof_ids(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| proof.proof_id) + .collect::>() + }) + }) + } + + /// Get all execution proofs we have for a block. + /// Used when responding to RPC requests. + pub fn get_execution_proofs(&self, block_root: &Hash256) -> Option>> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| Arc::new(proof.clone())) + .collect::>() + }) + }) + } + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { @@ -193,6 +247,63 @@ impl DataAvailabilityChecker { }) } + /// Check if an execution proof is already cached in the availability cache. + /// + /// We usually call this method if the proof was made available ia RPC, and we later receive it via Gossip. + /// If it exists in the cache, we know it has already passed validation, + /// even though this particular instance may not have been seen/published on gossip yet. + pub fn is_execution_proof_cached( + &self, + block_root: &Hash256, + execution_proof: &ExecutionProof, + ) -> bool { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.is_some_and(|components| { + components + .get_cached_execution_proofs() + .iter() + .any(|cached| cached == execution_proof) + }) + }) + } + + /// Verify a single execution proof for gossip. + /// + /// This performs cryptographic verification of the proof without requiring the full block. + /// + /// Returns: + /// - Ok(true) if proof is valid + /// - Ok(false) if proof is invalid + /// - Err if no verifier is configured or verification fails + pub fn verify_execution_proof_for_gossip( + &self, + proof: &ExecutionProof, + ) -> Result { + let Some(verifier_registry) = &self.verifier_registry else { + // No verifier configured but receiving proofs - this is a configuration error. + // If the chain spec enables zkVM, the node must have --activate-zkvm flag set. + return Err(AvailabilityCheckError::ProofVerificationError( + "Node is receiving execution proofs but zkVM verification is not enabled. \ + Use --activate-zkvm flag to enable proof verification." + .to_string(), + )); + }; + + let subnet_id = proof.proof_id; + let verifier = verifier_registry.get_verifier(subnet_id).ok_or_else(|| { + warn!(?subnet_id, "No verifier registered for subnet"); + AvailabilityCheckError::UnsupportedProofID(subnet_id) + })?; + + verifier.verify(proof).map_err(|e| { + AvailabilityCheckError::ProofVerificationError(format!( + "Proof verification failed: {:?}", + e + )) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -269,6 +380,120 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, verified_custody_columns) } + /// Put a list of execution proofs received via RPC into the availability cache. + /// This performs cryptographic verification on the proofs. + #[instrument(skip_all, level = "trace")] + pub fn put_rpc_execution_proofs( + &self, + block_root: Hash256, + proofs: Vec>, + ) -> Result, AvailabilityCheckError> { + debug!( + ?block_root, + num_proofs = proofs.len(), + "Verifying and storing execution proofs in DA checker" + ); + + // If no verifier registry is configured, skip verification + let Some(verifier_registry) = &self.verifier_registry else { + debug!( + ?block_root, + "No verifier registry configured, storing proofs without verification" + ); + let owned_proofs = proofs.iter().map(|p| (**p).clone()); + return self + .availability_cache + .put_verified_execution_proofs(block_root, owned_proofs); + }; + + // Get the execution payload hash from the block, if it is already cached. + let execution_payload_hash = + self.availability_cache + .peek_pending_components(&block_root, |components| { + components + .and_then(|c| c.block.as_ref().and_then(|b| b.execution_payload_hash())) + }); + + if let Some(execution_payload_hash) = execution_payload_hash { + debug!( + ?block_root, + ?execution_payload_hash, + "Got execution payload hash for proof verification" + ); + } else { + debug!( + ?block_root, + "Execution payload hash not available yet, deferring block hash check" + ); + } + + let mut verified_proofs = Vec::new(); + for proof in proofs { + let proof_id = proof.proof_id; + + // If we have the block, check that the proof's block_hash matches the payload hash. + if let Some(execution_payload_hash) = execution_payload_hash + && proof.block_hash != execution_payload_hash + { + warn!( + ?block_root, + ?proof_id, + proof_hash = ?proof.block_hash, + ?execution_payload_hash, + "Proof execution payload hash mismatch" + ); + return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { + proof_hash: proof.block_hash, + block_hash: execution_payload_hash, + }); + } + + let verifier = verifier_registry.get_verifier(proof_id).ok_or_else(|| { + warn!(?proof_id, "No verifier registered for proof ID"); + AvailabilityCheckError::UnsupportedProofID(proof_id) + })?; + + // Verify the proof (proof contains block_hash internally) + match verifier.verify(&proof) { + Ok(true) => { + debug!(?proof_id, ?block_root, "Proof verification succeeded"); + verified_proofs.push((*proof).clone()); + } + Ok(false) => { + warn!( + ?proof_id, + ?block_root, + "Proof verification failed: proof is invalid" + ); + return Err(AvailabilityCheckError::InvalidProof { + proof_id, + reason: "Proof verification returns false".to_string(), + }); + } + Err(e) => { + warn!( + ?proof_id, + ?block_root, + error = ?e, + "Proof verification error" + ); + return Err(AvailabilityCheckError::ProofVerificationError( + e.to_string(), + )); + } + } + } + + debug!( + ?block_root, + verified_count = verified_proofs.len(), + "All proofs verified successfully" + ); + + self.availability_cache + .put_verified_execution_proofs(block_root, verified_proofs) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -338,6 +563,20 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, custody_columns) } + /// Put execution proofs into the availability cache as pending components. + /// + /// Returns `Availability` which has information about whether all components have been + /// received or more are required. + #[instrument(skip_all, level = "trace")] + pub fn put_verified_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_verified_execution_proofs(block_root, execution_proofs) + } + /// Check if we have all the blobs for a block. Returns `Availability` which has information /// about whether all components have been received or more are required. pub fn put_executed_block( @@ -566,6 +805,44 @@ impl DataAvailabilityChecker { }) } + /// The epoch at which we require execution proofs for block processing. + /// + /// Note: This follows the same pattern as blob retention: proofs are required starting from + /// the zkvm_fork epoch, but only retained for a configured number of epochs. + /// + /// TODO(zkproofs): We don't store proofs forever and we also don't store + /// blobs forever, perhaps we should because when the blob disappears, we may not + /// be able to remake the proof when we put blobs in blocks. + /// We don't for now because proofs are quite large at the moment. + /// + /// Returns `None` if ZK-VM mode is disabled. + pub fn execution_proof_boundary(&self) -> Option { + let zkvm_fork_epoch = self.spec.zkvm_fork_epoch()?; + + let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); + + // Calculate retention boundary + let proof_retention_epoch = + current_epoch.saturating_sub(self.spec.min_epochs_for_execution_proof_requests); + + // Return max of fork epoch and retention boundary + // This ensures: + // 1. Proofs are never required before the zkvm fork + // 2. Proofs are only retained for the configured number of epochs + Some(std::cmp::max(zkvm_fork_epoch, proof_retention_epoch)) + } + + /// Returns true if the given epoch lies within the proof retention boundary. + pub fn execution_proof_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { + self.execution_proof_boundary() + .is_some_and(|boundary_epoch| block_epoch >= boundary_epoch) + } + + /// Returns the minimum number of execution proofs required for ZK-VM mode. + pub fn min_execution_proofs_required(&self) -> Option { + self.spec.zkvm_min_proofs_required() + } + /// Collects metrics from the data availability checker. pub fn metrics(&self) -> DataAvailabilityCheckerMetrics { DataAvailabilityCheckerMetrics { @@ -1196,6 +1473,7 @@ mod test { store, custody_context, spec, + None, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index c9efb7a4149..e5158827479 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -1,5 +1,5 @@ use kzg::{Error as KzgError, KzgCommitment}; -use types::{BeaconStateError, ColumnIndex, Hash256}; +use types::{BeaconStateError, ColumnIndex, ExecutionProofId, Hash256}; #[derive(Debug)] pub enum Error { @@ -22,6 +22,27 @@ pub enum Error { BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), SlotClockError, + /// Execution proof verification failed - proof is invalid. + /// Penalize peer, a peer should not forward invalid proofs + InvalidProof { + proof_id: ExecutionProofId, + reason: String, + }, + /// No verifier registered for this proof ID. + /// Internal error; no peer penalization. + UnsupportedProofID(ExecutionProofId), + /// Error during proof verification process. + /// Internal error; no peer penalization. + ProofVerificationError(String), + /// Could not extract execution payload from block. + /// Internal error; no peer penalization. + MissingExecutionPayload, + /// Execution payload hash mismatch between proof and block. + /// Penalize peer, similar to an invalid proof. + ExecutionPayloadHashMismatch { + proof_hash: types::ExecutionBlockHash, + block_hash: types::ExecutionBlockHash, + }, } #[derive(PartialEq, Eq)] @@ -44,13 +65,18 @@ impl Error { | Error::ParentStateMissing(_) | Error::BlockReplayError(_) | Error::RebuildingStateCaches(_) - | Error::SlotClockError => ErrorCategory::Internal, + | Error::SlotClockError + | Error::UnsupportedProofID(_) + | Error::ProofVerificationError(_) + | Error::MissingExecutionPayload => ErrorCategory::Internal, Error::InvalidBlobs { .. } | Error::InvalidColumn { .. } | Error::ReconstructColumnsError { .. } | Error::BlobIndexInvalid(_) | Error::DataColumnIndexInvalid(_) - | Error::KzgCommitmentMismatch { .. } => ErrorCategory::Malicious, + | Error::KzgCommitmentMismatch { .. } + | Error::InvalidProof { .. } + | Error::ExecutionPayloadHashMismatch { .. } => ErrorCategory::Malicious, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 776fb50f619..90e9c6b3b03 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -55,6 +55,16 @@ impl CachedBlock { .blob_kzg_commitments() .map_or(0, |commitments| commitments.len()) } + + /// Get the execution payload hash if this block has an execution payload + pub fn execution_payload_hash(&self) -> Option { + self.as_block() + .message() + .body() + .execution_payload() + .ok() + .map(|payload| payload.execution_payload_ref().block_hash()) + } } /// This represents the components of a partially available block @@ -74,6 +84,7 @@ pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, + pub verified_execution_proofs: Vec, pub block: Option>, pub reconstruction_started: bool, span: Span, @@ -199,11 +210,89 @@ impl PendingComponents { Ok(()) } + /// Returns an immutable reference to the cached execution proofs. + pub fn get_cached_execution_proofs(&self) -> &[types::ExecutionProof] { + &self.verified_execution_proofs + } + + /// Check if we have a specific proof + pub fn has_proof_with_id(&self, proof_id: types::ExecutionProofId) -> bool { + self.verified_execution_proofs + .iter() + .any(|proof| proof.proof_id == proof_id) + } + + /// Get the number of unique subnet proofs we have + pub fn execution_proof_subnet_count(&self) -> usize { + self.verified_execution_proofs.len() + } + + fn execution_payload_hash(&self) -> Option { + self.block + .as_ref() + .and_then(|block| block.execution_payload_hash()) + } + + fn retain_matching_execution_proofs(&mut self) { + let Some(expected_hash) = self.execution_payload_hash() else { + return; + }; + + let before = self.verified_execution_proofs.len(); + self.verified_execution_proofs + .retain(|proof| proof.block_hash == expected_hash); + let after = self.verified_execution_proofs.len(); + if before != after { + debug!( + ?expected_hash, + dropped = before - after, + "Dropped execution proofs with mismatched payload hash" + ); + } + } + + /// Merges a single execution proof into the cache. + /// + /// Proofs are only inserted if: + /// 1. We don't already have a proof from this subnet for this block + /// 2. The proof's block_hash matches the cached block payload hash (if block exists) + pub fn merge_execution_proof(&mut self, proof: types::ExecutionProof) { + // Don't insert duplicate proofs + if self.has_proof_with_id(proof.proof_id) { + return; + } + + if let Some(expected_hash) = self.execution_payload_hash() + && proof.block_hash != expected_hash + { + debug!( + ?expected_hash, + proof_hash = ?proof.block_hash, + proof_id = ?proof.proof_id, + "Execution proof payload hash mismatch" + ); + return; + } + + self.verified_execution_proofs.push(proof); + } + + /// Merges a given set of execution proofs into the cache. + pub fn merge_execution_proofs>( + &mut self, + execution_proofs: I, + ) { + for proof in execution_proofs { + self.merge_execution_proof(proof); + } + } + /// Inserts a new block and revalidates the existing blobs against it. /// /// Blobs that don't match the new block's commitments are evicted. pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { self.insert_executed_block(block); + self.retain_matching_execution_proofs(); let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); } @@ -213,10 +302,11 @@ impl PendingComponents { /// /// WARNING: This function can potentially take a lot of time if the state needs to be /// reconstructed from disk. Ensure you are not holding any write locks while calling this. - pub fn make_available( + fn make_available( &self, spec: &Arc, num_expected_columns_opt: Option, + min_proofs_required_opt: Option, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -294,6 +384,15 @@ impl PendingComponents { return Ok(None); }; + // Check if this block needs execution proofs. + if let Some(min_proofs) = min_proofs_required_opt { + let num_proofs = self.execution_proof_subnet_count(); + if num_proofs < min_proofs { + // Not enough execution proofs yet + return Ok(None); + } + } + // Block is available, construct `AvailableExecutedBlock` let blobs_available_timestamp = match blob_data { @@ -340,6 +439,7 @@ impl PendingComponents { block_root, verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], + verified_execution_proofs: vec![], block: None, reconstruction_started: false, span, @@ -372,7 +472,9 @@ impl PendingComponents { pub fn status_str(&self, num_expected_columns_opt: Option) -> String { let block_count = if self.block.is_some() { 1 } else { 0 }; - if let Some(num_expected_columns) = num_expected_columns_opt { + let proof_count = self.execution_proof_subnet_count(); + + let base_status = if let Some(num_expected_columns) = num_expected_columns_opt { format!( "block {} data_columns {}/{}", block_count, @@ -391,6 +493,13 @@ impl PendingComponents { self.verified_blobs.iter().flatten().count(), num_expected_blobs ) + }; + + // Append execution proof count if we have any + if proof_count > 0 { + format!("{} proofs {}", base_status, proof_count) + } else { + base_status } } } @@ -528,7 +637,13 @@ impl DataAvailabilityCheckerInner { ); }); - self.check_availability_and_cache_components(block_root, pending_components, None) + let min_proofs_required_opt = self.get_min_proofs_required(epoch); + self.check_availability_and_cache_components( + block_root, + pending_components, + None, + min_proofs_required_opt, + ) } #[allow(clippy::type_complexity)] @@ -568,10 +683,61 @@ impl DataAvailabilityCheckerInner { ); }); + let min_proofs_required_opt = self.get_min_proofs_required(epoch); self.check_availability_and_cache_components( block_root, pending_components, Some(num_expected_columns), + min_proofs_required_opt, + ) + } + + /// Puts execution proofs into the availability cache as pending components. + pub fn put_verified_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + let mut execution_proofs = execution_proofs.into_iter().peekable(); + + if execution_proofs.peek().is_none() { + // No proofs to process + return Ok(Availability::MissingComponents(block_root)); + } + + // Try to get epoch from existing pending components (if block already arrived) + // Otherwise use Epoch::new(0) as placeholder (will be corrected when block arrives) + // Also the component cannot be marked as available, if the block is missing + let epoch = self + .critical + .read() + .peek(&block_root) + .and_then(|pending| pending.epoch()) + .unwrap_or_else(|| types::Epoch::new(0)); + + let pending_components = + self.update_or_insert_pending_components(block_root, epoch, |pending_components| { + pending_components.merge_execution_proofs(execution_proofs); + Ok(()) + })?; + + let num_expected_columns_opt = self.get_num_expected_columns(epoch); + let min_proofs_required_opt = self.get_min_proofs_required(epoch); + + pending_components.span.in_scope(|| { + debug!( + component = "execution_proofs", + status = pending_components.status_str(num_expected_columns_opt), + num_proofs = pending_components.execution_proof_subnet_count(), + "Component added to data availability checker" + ); + }); + + self.check_availability_and_cache_components( + block_root, + pending_components, + num_expected_columns_opt, + min_proofs_required_opt, ) } @@ -580,10 +746,12 @@ impl DataAvailabilityCheckerInner { block_root: Hash256, pending_components: MappedRwLockReadGuard<'_, PendingComponents>, num_expected_columns_opt: Option, + min_proofs_required_opt: Option, ) -> Result, AvailabilityCheckError> { if let Some(available_block) = pending_components.make_available( &self.spec, num_expected_columns_opt, + min_proofs_required_opt, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { // Explicitly drop read lock before acquiring write lock @@ -752,6 +920,7 @@ impl DataAvailabilityCheckerInner { })?; let num_expected_columns_opt = self.get_num_expected_columns(epoch); + let min_proofs_required_opt = self.get_min_proofs_required(epoch); pending_components.span.in_scope(|| { debug!( @@ -765,6 +934,7 @@ impl DataAvailabilityCheckerInner { block_root, pending_components, num_expected_columns_opt, + min_proofs_required_opt, ) } @@ -779,6 +949,16 @@ impl DataAvailabilityCheckerInner { } } + /// Returns the minimum number of execution proofs required for a block at the given epoch. + /// Returns `None` if proofs are not required (zkVM not enabled for this epoch). + fn get_min_proofs_required(&self, epoch: Epoch) -> Option { + if self.spec.is_zkvm_enabled_for_epoch(epoch) { + self.spec.zkvm_min_proofs_required() + } else { + None + } + } + /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { // clean up any lingering states in the state cache @@ -1268,7 +1448,10 @@ mod pending_components_tests { use rand::rngs::StdRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; - use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; + use types::{ + BeaconState, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkName, + MainnetEthSpec, SignedBeaconBlock, Slot, + }; type E = MainnetEthSpec; @@ -1463,6 +1646,48 @@ mod pending_components_tests { assert_cache_consistent(cache, max_len); } + #[test] + fn execution_proofs_filtered_on_block_merge() { + let (block, blobs, invalid_blobs, max_len) = pre_setup(); + let block_root = block.canonical_root(); + let slot = block.slot(); + let payload_hash = block + .message() + .body() + .execution_payload() + .expect("block has execution payload") + .execution_payload_ref() + .block_hash(); + + let proof_id_0 = ExecutionProofId::new(0).expect("proof id 0 is valid"); + let proof_id_1 = ExecutionProofId::new(1).expect("proof id 1 is valid"); + + let proof_ok = + ExecutionProof::new(proof_id_0, slot, payload_hash, block_root, vec![1, 2, 3]) + .expect("valid proof"); + let proof_bad = ExecutionProof::new( + proof_id_1, + slot, + ExecutionBlockHash::repeat_byte(42), + block_root, + vec![4, 5, 6], + ) + .expect("valid proof with mismatched hash"); + + let (pending_block, _, _) = setup_pending_components(block, blobs, invalid_blobs); + let mut cache = >::empty(block_root, max_len); + + cache.merge_execution_proofs(vec![proof_ok, proof_bad]); + assert_eq!(cache.execution_proof_subnet_count(), 2); + + cache.merge_block(pending_block); + assert_eq!(cache.execution_proof_subnet_count(), 1); + assert_eq!( + cache.get_cached_execution_proofs()[0].block_hash, + payload_hash + ); + } + #[test] fn should_not_insert_pre_execution_block_if_executed_block_exists() { let (pre_execution_block, blobs, random_blobs, max_len) = pre_setup(); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index b021df2c33b..ac2ec97538b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -99,6 +99,7 @@ pub enum BeaconChainError { ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), ObservedDataSidecarsError(ObservedDataSidecarsError), + ObservedExecutionProofError(String), AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), diff --git a/beacon_node/beacon_chain/src/execution_proof_verification.rs b/beacon_node/beacon_chain/src/execution_proof_verification.rs new file mode 100644 index 00000000000..f20d0494dda --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_verification.rs @@ -0,0 +1,625 @@ +use crate::observed_data_sidecars::{ObservationStrategy, Observe}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use slot_clock::SlotClock; +use std::marker::PhantomData; +use std::sync::Arc; +use tracing::{debug, error, warn}; +use types::{ChainSpec, EthSpec, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// An error occurred while validating a gossip execution proof. +#[derive(Debug)] +pub enum GossipExecutionProofError { + /// There was an error whilst processing the execution proof. It is not known if it is + /// valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this proof due to an internal error. It's unclear if the proof + /// is valid. + BeaconChainError(Box), + + /// The execution proof is from a slot that is later than the current slot (with respect to + /// the gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + + /// The proof corresponds to a slot older than the finalized head slot. + /// + /// ## Peer scoring + /// + /// It's unclear if this proof is valid, but this proof is for a finalized slot and is + /// therefore useless to us. + PastFinalizedSlot { + proof_slot: Slot, + finalized_slot: Slot, + }, + + /// The proof's parent block is unknown. + /// + /// ## Peer scoring + /// + /// We cannot process the proof without validating its parent, the peer isn't necessarily + /// faulty. + ParentUnknown { parent_root: Hash256 }, + + /// The proof conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this proof is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, + + /// An execution proof has already been seen for the given `(proof.block_root, + /// proof_id)` tuple over gossip or no gossip sources. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, but we do not forward it over gossip. + PriorKnown { + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + }, + + /// An execution proof has already been processed from non-gossip source and has not yet been + /// seen on the gossip network. This proof should be accepted and forwarded over gossip. + PriorKnownUnpublished, + + /// The proof verification failed (invalid zkVM proof). + /// + /// ## Peer scoring + /// + /// The proof is invalid and the peer is faulty. + ProofVerificationFailed(String), + + /// The proof size exceeds the maximum allowed size. + /// + /// ## Peer scoring + /// + /// The proof is invalid and the peer is faulty. + ProofTooLarge { size: usize, max_size: usize }, + + /// The block for this proof is not yet available. + /// + /// ## Peer scoring + /// + /// The peer may have sent a proof before we've seen the block. Not necessarily faulty. + BlockNotAvailable { block_root: Hash256 }, +} + +impl From for GossipExecutionProofError { + fn from(e: BeaconChainError) -> Self { + GossipExecutionProofError::BeaconChainError(Box::new(e)) + } +} + +/// A wrapper around an `ExecutionProof` that has been verified for propagation on the gossip +/// network. +pub struct GossipVerifiedExecutionProof { + block_root: Hash256, + execution_proof: Arc, + _phantom: PhantomData<(T, O)>, +} + +impl std::fmt::Debug + for GossipVerifiedExecutionProof +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("GossipVerifiedExecutionProof") + .field("block_root", &self.block_root) + .field("execution_proof", &self.execution_proof) + .finish() + } +} + +impl Clone for GossipVerifiedExecutionProof { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + execution_proof: self.execution_proof.clone(), + _phantom: PhantomData, + } + } +} + +impl GossipVerifiedExecutionProof { + pub fn new( + execution_proof: Arc, + chain: &BeaconChain, + ) -> Result { + validate_execution_proof_for_gossip::(execution_proof, chain) + } + + pub fn slot(&self) -> Slot { + self.execution_proof.slot + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn execution_proof(&self) -> &Arc { + &self.execution_proof + } + + pub fn subnet_id(&self) -> ExecutionProofId { + self.execution_proof.proof_id + } + + /// Get the block root for this proof. + pub fn into_inner(self) -> Arc { + self.execution_proof + } +} + +/// Validate an execution proof for gossip +pub fn validate_execution_proof_for_gossip( + execution_proof: Arc, + chain: &BeaconChain, +) -> Result, GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_slot = execution_proof.slot; + + // 1. Verify proof is not from the future + verify_proof_not_from_future_slot(chain, proof_slot)?; + + // 2. Verify proof slot is greater than finalized slot + verify_slot_greater_than_latest_finalized_slot(chain, proof_slot)?; + + // 3. Check if proof is already known via gossip + verify_is_unknown_execution_proof(chain, &execution_proof)?; + + // 4. Check if the proof is already in the DA checker cache + // If it exists in the cache, we know it has already passed validation. + if chain + .data_availability_checker + .is_execution_proof_cached(&block_root, &execution_proof) + { + if O::observe() { + observe_gossip_execution_proof(&execution_proof, chain)?; + } + return Err(GossipExecutionProofError::PriorKnownUnpublished); + } + + // 5. Verify proof size limits + verify_proof_size(&execution_proof, &chain.spec)?; + + // Note: We intentionally do NOT verify the block exists yet + // Execution proofs can arrive via gossip before their corresponding blocks, + // so we cache them in the DA checker and match them up when the block arrives. + // This is kind of similar to how blob sidecars work. + + // 6. Run zkVM proof verification + verify_zkvm_proof(&execution_proof, chain)?; + + // 7. Observe the proof to prevent reprocessing + if O::observe() { + observe_gossip_execution_proof(&execution_proof, chain)?; + } + + Ok(GossipVerifiedExecutionProof { + block_root, + execution_proof, + _phantom: PhantomData, + }) +} + +/// Verify that this execution proof has not been seen before via gossip +fn verify_is_unknown_execution_proof( + chain: &BeaconChain, + execution_proof: &ExecutionProof, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + let slot = execution_proof.slot; + + if chain + .observed_execution_proofs + .read() + .is_known(slot, block_root, proof_id) + .map_err(|e| { + GossipExecutionProofError::BeaconChainError(Box::new( + BeaconChainError::ObservedExecutionProofError(format!("{:?}", e)), + )) + })? + { + return Err(GossipExecutionProofError::PriorKnown { + slot, + block_root, + proof_id, + }); + } + + Ok(()) +} + +/// Verify that the proof size is within acceptable limits. +fn verify_proof_size( + execution_proof: &ExecutionProof, + _spec: &ChainSpec, +) -> Result<(), GossipExecutionProofError> { + use types::MAX_PROOF_DATA_BYTES; + + let proof_size = execution_proof.proof_data.len(); + if proof_size > MAX_PROOF_DATA_BYTES { + return Err(GossipExecutionProofError::ProofTooLarge { + size: proof_size, + max_size: MAX_PROOF_DATA_BYTES, + }); + } + + Ok(()) +} + +/// Mark this execution proof as observed in gossip, to prevet reprocessing +fn observe_gossip_execution_proof( + execution_proof: &ExecutionProof, + chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + let slot = execution_proof.slot; + + chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + .map_err(|e| { + GossipExecutionProofError::BeaconChainError(Box::new( + BeaconChainError::ObservedExecutionProofError(format!("{:?}", e)), + )) + })?; + + debug!( + %block_root, + %proof_id, + %slot, + "Marked execution proof as observed" + ); + + Ok(()) +} + +/// Verify that the execution proof is not from a future slot. +fn verify_proof_not_from_future_slot( + chain: &BeaconChain, + proof_slot: Slot, +) -> Result<(), GossipExecutionProofError> { + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or(BeaconChainError::UnableToReadSlot)?; + + if proof_slot > latest_permissible_slot { + return Err(GossipExecutionProofError::FutureSlot { + message_slot: proof_slot, + latest_permissible_slot, + }); + } + + Ok(()) +} + +/// Verify that the execution proof slot is greater than the latest finalized slot. +fn verify_slot_greater_than_latest_finalized_slot( + chain: &BeaconChain, + proof_slot: Slot, +) -> Result<(), GossipExecutionProofError> { + let latest_finalized_slot = chain + .head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + if proof_slot <= latest_finalized_slot { + return Err(GossipExecutionProofError::PastFinalizedSlot { + proof_slot, + finalized_slot: latest_finalized_slot, + }); + } + + Ok(()) +} + +/// Verify the zkVM proof. +/// +/// Note: This is expensive +fn verify_zkvm_proof( + execution_proof: &ExecutionProof, + chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let subnet_id = execution_proof.proof_id; + + match chain + .data_availability_checker + .verify_execution_proof_for_gossip(execution_proof) + { + Ok(true) => { + debug!(%block_root, %subnet_id, "Proof verification succeeded"); + Ok(()) + } + Ok(false) => { + warn!(%block_root, %subnet_id, "Proof verification failed: proof is invalid"); + Err(GossipExecutionProofError::ProofVerificationFailed(format!( + "zkVM proof verification failed for block_root={}, subnet_id={}", + block_root, subnet_id + ))) + } + Err(e) => { + error!(%block_root, %subnet_id, ?e, "Proof verification error"); + Err(GossipExecutionProofError::BeaconChainError(Box::new( + e.into(), + ))) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; + use types::{ExecutionBlockHash, ForkName, MainnetEthSpec}; + + type E = MainnetEthSpec; + + /// Helper to create a test execution proof + fn create_test_execution_proof( + subnet_id: ExecutionProofId, + slot: Slot, + block_root: Hash256, + ) -> ExecutionProof { + let block_hash = ExecutionBlockHash::zero(); + let proof_data = vec![0u8; 32]; // Dummy proof data + ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data) + .expect("Valid test proof") + } + + #[tokio::test] + async fn test_reject_future_slot() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let current_slot = harness.get_current_slot(); + let future_slot = current_slot + 100; + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof = create_test_execution_proof(proof_id, future_slot, Hash256::random()); + + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); + + assert!(matches!( + result.err(), + Some(GossipExecutionProofError::FutureSlot { .. }) + )); + } + + #[tokio::test] + async fn test_reject_past_finalized_slot() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // Advance to slot 1 first + harness.advance_slot(); + + // Advance chain to create finalized slot + harness + .extend_chain( + 32, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let finalized_slot = harness + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); + // Create proof for slot before finalized + let old_slot = finalized_slot.saturating_sub(1u64); + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof = create_test_execution_proof(proof_id, old_slot, Hash256::random()); + + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); + + assert!(matches!( + result.err(), + Some(GossipExecutionProofError::PastFinalizedSlot { .. }) + )); + } + + #[tokio::test] + async fn test_successful_validation() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let current_slot = harness.get_current_slot(); + let proof_id = ExecutionProofId::new(0).expect("Valid subnet id"); + + // Use a realistic block root from the chain + let block_root = harness.chain.head_beacon_block_root(); + let proof = create_test_execution_proof(proof_id, current_slot, block_root); + + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); + + match result { + Ok(_) => {} + Err(GossipExecutionProofError::FutureSlot { .. }) + | Err(GossipExecutionProofError::PastFinalizedSlot { .. }) => { + panic!("Should not fail basic validation checks"); + } + Err(_) => {} + } + } + + /// This test verifies that: + /// 1. First gossip proof is accepted and marked as observed + /// 2. Duplicate gossip proof is rejected with PriorKnown + /// 3. DoS protection: Expensive verification only happens once + #[tokio::test] + async fn test_gossip_duplicate_proof_rejected() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .zkvm_with_dummy_verifiers() + .build(); + + harness.advance_slot(); + let current_slot = harness.get_current_slot(); + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let block_root = Hash256::random(); + let proof = Arc::new(create_test_execution_proof( + proof_id, + current_slot, + block_root, + )); + + let result1 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + assert!(result1.is_ok()); + + // Should now be rejected as duplicate + let result2 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + + assert!( + matches!( + result2.err(), + Some(GossipExecutionProofError::PriorKnown { slot, block_root: br, proof_id: sid }) + if slot == current_slot && br == block_root && sid == proof_id + ), + "Duplicate proof must be rejected with PriorKnown error" + ); + + assert!( + harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, proof_id) + .unwrap(), + "Proof should be marked as observed" + ); + } + + /// Test that proofs in the DA checker cache are detected and marked as observed. + /// + /// When a proof arrives via gossip but is already in the DA checker cache (from RPC), + /// we should: + /// 1. Accept it for gossip propagation + /// 2. Mark it as observed to prevent reprocessing + /// 3. Return PriorKnownUnpublished + #[tokio::test] + async fn test_da_cached_proof_accepted_and_observed() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let subnet_id = ExecutionProofId::new(0).expect("Valid subnet id"); + let current_slot = harness.get_current_slot(); + let block_root = Hash256::random(); + + let proof = Arc::new(create_test_execution_proof( + subnet_id, + current_slot, + block_root, + )); + + // Put the proof directly into the DA checker cache (this can happen if it arritves via RPC) + harness + .chain + .data_availability_checker + .put_rpc_execution_proofs(block_root, vec![proof.clone()]) + .expect("Should put proof in DA cache"); + + // Verify it's in the cache + assert!( + harness + .chain + .data_availability_checker + .is_execution_proof_cached(&block_root, &proof), + "Proof should be in DA cache" + ); + + // Verify it's NOT in observed cache yet + assert!( + !harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, subnet_id) + .unwrap(), + "Proof should not be in observed cache initially" + ); + + // Now it arrives via gossip + let result = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + + // Should be rejected with PriorKnownUnpublished (safe to propagate) + assert!( + matches!( + result.as_ref().err(), + Some(GossipExecutionProofError::PriorKnownUnpublished) + ), + "DA cached proof should return PriorKnownUnpublished, got: {:?}", + result + ); + + // Should now be marked as observed + assert!( + harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, subnet_id) + .unwrap(), + "Proof should be marked as observed after DA cache check" + ); + + // Second gossip attempt should be rejected as PriorKnown (not PriorKnownUnpublished) + let result2 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + + assert!( + matches!( + result2.err(), + Some(GossipExecutionProofError::PriorKnown { .. }) + ), + "Second gossip should be rejected as PriorKnown (already observed)" + ); + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4ac3e54742d..4e310c4556d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -24,6 +24,7 @@ mod early_attester_cache; mod errors; pub mod events; pub mod execution_payload; +pub mod execution_proof_verification; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; @@ -41,6 +42,7 @@ pub mod observed_aggregates; mod observed_attesters; pub mod observed_block_producers; pub mod observed_data_sidecars; +pub mod observed_execution_proofs; pub mod observed_operations; mod observed_slashable; pub mod persisted_beacon_chain; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index bd232f2e8a2..e290cf510f0 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -120,6 +120,7 @@ pub enum Notification { Finalization(FinalizationNotification), Reconstruction, PruneBlobs(Epoch), + PruneExecutionProofs(Epoch), ManualFinalization(ManualFinalizationNotification), ManualCompaction, } @@ -251,6 +252,28 @@ impl, Cold: ItemStore> BackgroundMigrator>, + execution_proof_boundary: Epoch, + ) { + if let Err(e) = db.try_prune_execution_proofs(false, execution_proof_boundary) { + error!( + error = ?e, + "Execution proof pruning failed" + ); + } + } + /// If configured to run in the background, send `notif` to the background thread. /// /// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise. @@ -440,11 +463,15 @@ impl, Cold: ItemStore> BackgroundMigrator reconstruction_notif = Some(notif), Notification::Finalization(fin) => finalization_notif = Some(fin), Notification::ManualFinalization(fin) => manual_finalization_notif = Some(fin), Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + Notification::PruneExecutionProofs(epb) => { + prune_execution_proofs_notif = Some(epb) + } Notification::ManualCompaction => manual_compaction_notif = Some(notif), } // Read the rest of the messages in the channel, taking the best of each type. @@ -475,6 +502,10 @@ impl, Cold: ItemStore> BackgroundMigrator { prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab)); } + Notification::PruneExecutionProofs(epb) => { + prune_execution_proofs_notif = + std::cmp::max(prune_execution_proofs_notif, Some(epb)); + } } } // Run finalization and blob pruning migrations first, then a reconstruction batch. @@ -489,6 +520,9 @@ impl, Cold: ItemStore> BackgroundMigrator Self { + Self { slot, block_root } + } +} + +/// Maintains a cache of seen execution proofs that were received over gossip. +/// +/// The cache tracks (slot, block_root, proof_id) tuples and prunes entries from finalized slots. +/// +/// ## DoS Resistance +/// +/// This cache is critical for preventing DoS attacks where an attacker repeatedly gossips +/// the same execution proof. zkVM verification is expensive (50-100ms), so we must avoid +/// re-verifying proofs we've already seen. +/// +/// ## Pruning +/// +/// Call `prune` on finalization to remove entries from finalized slots. This basically matches the +/// pattern used for observed blobs and data columns. +pub struct ObservedExecutionProofs { + /// The finalized slot. Proofs at or below this slot are rejected. + finalized_slot: Slot, + /// Map from (slot, block_root) to the set of subnet IDs we've seen for that block. + items: HashMap>, +} + +impl ObservedExecutionProofs { + /// Create a new cache with the given finalized slot. + /// + /// Proofs at or below `finalized_slot` will be rejected. + pub fn new(finalized_slot: Slot) -> Self { + Self { + finalized_slot, + items: HashMap::new(), + } + } + + /// Observe an execution proof from gossip. + /// + /// Returns `true` if the proof was already observed (duplicate), `false` if it's new. + /// + /// Returns an error if the proof's slot is at or below the finalized slot. + /// Note: This shouldn't happen because it means we've received a proof for + /// a finalized block + pub fn observe_proof( + &mut self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> Result { + // Reject finalized proofs + if self.finalized_slot > 0 && slot <= self.finalized_slot { + return Err(Error::FinalizedExecutionProof { + slot, + finalized_slot: self.finalized_slot, + }); + } + + let key = ProofKey::new(slot, block_root); + let proof_ids = self.items.entry(key).or_default(); + + let was_duplicate = !proof_ids.insert(proof_id); + + Ok(was_duplicate) + } + + /// Check if we have already observed this proof. + /// + /// Returns `true` if the proof has been seen, `false` if it's new. + /// + /// Returns an error if the proof's slot is at or below the finalized slot. + pub fn is_known( + &self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> Result { + // Reject finalized proofs + if self.finalized_slot > 0 && slot <= self.finalized_slot { + return Err(Error::FinalizedExecutionProof { + slot, + finalized_slot: self.finalized_slot, + }); + } + + let key = ProofKey::new(slot, block_root); + let is_known = self + .items + .get(&key) + .is_some_and(|proof_ids| proof_ids.contains(&proof_id)); + + Ok(is_known) + } + + /// Prune execution proof observations for slots less than or equal to the given slot. + /// + /// This matches the pruning behavior of observed blobs and data columns. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|key, _| key.slot > finalized_slot); + } + + /// Get the current finalized slot boundary. + /// + /// Proofs at or below this slot will be rejected. + pub fn finalized_slot(&self) -> Slot { + self.finalized_slot + } + + /// Get the number of unique (slot, block_root) keys being tracked. + pub fn len(&self) -> usize { + self.items.len() + } + + /// Check if the cache is empty. + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } + + /// Clear all entries from the cache. + #[cfg(test)] + pub fn clear(&mut self) { + self.items.clear(); + } +} + +impl Default for ObservedExecutionProofs { + fn default() -> Self { + Self::new(Slot::new(0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::FixedBytesExtended; + + fn test_proof_key(slot: u64) -> (Slot, Hash256, ExecutionProofId) { + ( + Slot::new(slot), + Hash256::from_low_u64_be(slot), + ExecutionProofId::new(0).unwrap(), + ) + } + + #[test] + fn test_observe_new_proof() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let (slot, block_root, subnet_id) = test_proof_key(10); + + // First observation should return false (not a duplicate) + assert_eq!( + cache.observe_proof(slot, block_root, subnet_id), + Ok(false), + "first observation should not be duplicate" + ); + + // Second observation should return true (is a duplicate) + assert_eq!( + cache.observe_proof(slot, block_root, subnet_id), + Ok(true), + "second observation should be duplicate" + ); + } + + #[test] + fn test_observe_different_subnets() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root = Hash256::from_low_u64_be(10); + let proof_0 = ExecutionProofId::new(0).unwrap(); + let proof_1 = ExecutionProofId::new(1).unwrap(); + + assert_eq!( + cache.observe_proof(slot, block_root, proof_0), + Ok(false), + "proof 0 is new" + ); + + // Observe proof from subnet 1 (same block, different proofID) + assert_eq!( + cache.observe_proof(slot, block_root, proof_1), + Ok(false), + "proof 1 is new" + ); + + // Re-observe proof 0 + assert_eq!( + cache.observe_proof(slot, block_root, proof_0), + Ok(true), + "proof 0 is duplicate" + ); + + assert!(cache.is_known(slot, block_root, proof_0).unwrap()); + assert!(cache.is_known(slot, block_root, proof_1).unwrap()); + } + + #[test] + fn test_is_known() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let (slot, block_root, proof_id) = test_proof_key(10); + + // Before observation + assert_eq!( + cache.is_known(slot, block_root, proof_id), + Ok(false), + "not yet observed" + ); + + // After observation + cache.observe_proof(slot, block_root, proof_id).unwrap(); + assert_eq!( + cache.is_known(slot, block_root, proof_id), + Ok(true), + "now observed" + ); + } + + #[test] + fn test_reject_finalized_proofs() { + let finalized_slot = Slot::new(100); + let mut cache = ObservedExecutionProofs::new(finalized_slot); + + let old_slot = Slot::new(100); + let block_root = Hash256::from_low_u64_be(100); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Observing finalized proof should error + assert_eq!( + cache.observe_proof(old_slot, block_root, proof_id), + Err(Error::FinalizedExecutionProof { + slot: old_slot, + finalized_slot, + }), + "finalized proofs should be rejected" + ); + + // Checking finalized proof should error + assert_eq!( + cache.is_known(old_slot, block_root, proof_id), + Err(Error::FinalizedExecutionProof { + slot: old_slot, + finalized_slot, + }), + "finalized proofs should be rejected in is_known" + ); + } + + #[test] + fn test_pruning() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + + // Add proofs at different slots + for slot in 0..100 { + let (s, br, pid) = test_proof_key(slot); + cache.observe_proof(s, br, pid).unwrap(); + } + + assert_eq!(cache.len(), 100, "should have 100 entries"); + + // Prune at finalized_slot = 50 + // Should remove slots <= 50, keep slots > 50 + let finalized_slot = Slot::new(50); + cache.prune(finalized_slot); + + assert_eq!( + cache.finalized_slot(), + finalized_slot, + "finalized slot should be updated" + ); + + // Check that finalized entries were removed + let old_slot = Slot::new(50); + let old_block_root = Hash256::from_low_u64_be(50); + let proof_id = ExecutionProofId::new(0).unwrap(); + + assert!( + cache.is_known(old_slot, old_block_root, proof_id).is_err(), + "finalized entries should be rejected after pruning" + ); + + // Check that non-finalized entries are still present + let recent_slot = Slot::new(51); + let recent_block_root = Hash256::from_low_u64_be(51); + assert!( + cache + .is_known(recent_slot, recent_block_root, proof_id) + .unwrap(), + "non-finalized entries should still be present" + ); + } + + #[test] + fn test_prune_removes_exact_boundary() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + + // Add proofs at slots 50, 51, 52 + for slot in 50..=52 { + let (s, br, pid) = test_proof_key(slot); + cache.observe_proof(s, br, pid).unwrap(); + } + + // Prune at finalized_slot = 50 + // Should remove slots <= 50, keep slots > 50 + cache.prune(Slot::new(50)); + + assert_eq!(cache.finalized_slot(), Slot::new(50)); + + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Slot 50 should be rejected (finalized) + assert!( + cache + .is_known(Slot::new(50), Hash256::from_low_u64_be(50), proof_id) + .is_err() + ); + + // Slot 51 should still be present (> finalized) + assert!( + cache + .is_known(Slot::new(51), Hash256::from_low_u64_be(51), proof_id) + .unwrap() + ); + + // Slot 52 should still be present + assert!( + cache + .is_known(Slot::new(52), Hash256::from_low_u64_be(52), proof_id) + .unwrap() + ); + } + + #[test] + fn test_different_blocks_same_slot() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root_a = Hash256::from_low_u64_be(100); + let block_root_b = Hash256::from_low_u64_be(200); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Observe proof for block A + cache.observe_proof(slot, block_root_a, proof_id).unwrap(); + + // Proof for block B should be new (different block_root) + assert_eq!( + cache.observe_proof(slot, block_root_b, proof_id), + Ok(false), + "different block_root should not be duplicate" + ); + + assert!(cache.is_known(slot, block_root_a, proof_id).unwrap()); + assert!(cache.is_known(slot, block_root_b, proof_id).unwrap()); + } + + #[test] + fn test_len_counts_blocks_not_subnets() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root = Hash256::from_low_u64_be(10); + + // Add multiple proof IDs for same block + for i in 0..8 { + let proof_id = ExecutionProofId::new(i).unwrap(); + cache.observe_proof(slot, block_root, proof_id).unwrap(); + } + + // Length should be 1 (one unique (slot, block_root) key) + assert_eq!(cache.len(), 1, "len counts unique keys, not proofIDs"); + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6d17d6d85c5..e5aa6da52ed 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -229,6 +229,7 @@ pub struct Builder { testing_slot_clock: Option, validator_monitor_config: Option, genesis_state_builder: Option>, + zkvm_execution_layer_config: Option, node_custody_type: NodeCustodyType, runtime: TestRuntime, } @@ -375,6 +376,7 @@ where testing_slot_clock: None, validator_monitor_config: None, genesis_state_builder: None, + zkvm_execution_layer_config: None, node_custody_type: NodeCustodyType::Fullnode, runtime, } @@ -549,6 +551,13 @@ where self } + /// Enable zkVM execution proof verification with dummy verifiers for testing. + pub fn zkvm_with_dummy_verifiers(mut self) -> Self { + self.zkvm_execution_layer_config = + Some(zkvm_execution_layer::ZKVMExecutionLayerConfig::default()); + self + } + pub fn with_genesis_state_builder( mut self, f: impl FnOnce(InteropGenesisBuilder) -> InteropGenesisBuilder, @@ -590,6 +599,12 @@ where .validator_monitor_config(validator_monitor_config) .rng(Box::new(StdRng::seed_from_u64(42))); + builder = if let Some(zkvm_config) = self.zkvm_execution_layer_config { + builder.zkvm_execution_layer_config(Some(zkvm_config)) + } else { + builder + }; + builder = if let Some(mutator) = self.initial_mutator { mutator(builder) } else { diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index db7f7dbdbbd..bb44869971d 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -105,9 +105,9 @@ async fn schema_stability() { fn check_db_columns() { let current_columns: Vec<&'static str> = DBColumn::iter().map(|c| c.as_str()).collect(); let expected_columns = vec![ - "bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", - "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", - "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", + "bma", "blk", "blb", "bdc", "bdi", "bep", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", + "bcs", "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", + "bhr", "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", ]; assert_eq!(expected_columns, current_columns); } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 1cdf3693ff2..f98d57e5cb6 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -114,6 +114,7 @@ pub struct BeaconProcessorQueueLengths { unknown_light_client_update_queue: usize, rpc_block_queue: usize, rpc_blob_queue: usize, + rpc_execution_proof_queue: usize, rpc_custody_column_queue: usize, column_reconstruction_queue: usize, chain_segment_queue: usize, @@ -121,11 +122,14 @@ pub struct BeaconProcessorQueueLengths { gossip_block_queue: usize, gossip_blob_queue: usize, gossip_data_column_queue: usize, + gossip_execution_proof_queue: usize, delayed_block_queue: usize, status_queue: usize, block_brange_queue: usize, block_broots_queue: usize, blob_broots_queue: usize, + execution_proof_broots_queue: usize, + execution_proof_brange_queue: usize, blob_brange_queue: usize, dcbroots_queue: usize, dcbrange_queue: usize, @@ -178,6 +182,7 @@ impl BeaconProcessorQueueLengths { unknown_light_client_update_queue: 128, rpc_block_queue: 1024, rpc_blob_queue: 1024, + rpc_execution_proof_queue: 1024, // We don't request more than `PARENT_DEPTH_TOLERANCE` (32) lookups, so we can limit // this queue size. With 48 max blobs per block, each column sidecar list could be up to 12MB. rpc_custody_column_queue: 64, @@ -187,11 +192,14 @@ impl BeaconProcessorQueueLengths { gossip_block_queue: 1024, gossip_blob_queue: 1024, gossip_data_column_queue: 1024, + gossip_execution_proof_queue: 1024, delayed_block_queue: 1024, status_queue: 1024, block_brange_queue: 1024, block_broots_queue: 1024, blob_broots_queue: 1024, + execution_proof_broots_queue: 1024, + execution_proof_brange_queue: 1024, blob_brange_queue: 1024, dcbroots_queue: 1024, dcbrange_queue: 1024, @@ -579,6 +587,7 @@ pub enum Work { GossipBlock(AsyncFn), GossipBlobSidecar(AsyncFn), GossipDataColumnSidecar(AsyncFn), + GossipExecutionProof(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -597,6 +606,9 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcExecutionProofs { + process_fn: AsyncFn, + }, RpcCustodyColumn(AsyncFn), ColumnReconstruction(AsyncFn), IgnoredRpcBlock { @@ -609,6 +621,8 @@ pub enum Work { BlocksByRootsRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), + ExecutionProofsByRootsRequest(BlockingFn), + ExecutionProofsByRangeRequest(BlockingFn), DataColumnsByRootsRequest(BlockingFn), DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), @@ -641,6 +655,7 @@ pub enum WorkType { GossipBlock, GossipBlobSidecar, GossipDataColumnSidecar, + GossipExecutionProof, DelayedImportBlock, GossipVoluntaryExit, GossipProposerSlashing, @@ -651,6 +666,7 @@ pub enum WorkType { GossipLightClientOptimisticUpdate, RpcBlock, RpcBlobs, + RpcExecutionProofs, RpcCustodyColumn, ColumnReconstruction, IgnoredRpcBlock, @@ -661,6 +677,8 @@ pub enum WorkType { BlocksByRootsRequest, BlobsByRangeRequest, BlobsByRootsRequest, + ExecutionProofsByRootsRequest, + ExecutionProofsByRangeRequest, DataColumnsByRootsRequest, DataColumnsByRangeRequest, GossipBlsToExecutionChange, @@ -688,6 +706,7 @@ impl Work { Work::GossipBlock(_) => WorkType::GossipBlock, Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::GossipExecutionProof(_) => WorkType::GossipExecutionProof, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, @@ -701,6 +720,7 @@ impl Work { Work::GossipBlsToExecutionChange(_) => WorkType::GossipBlsToExecutionChange, Work::RpcBlock { .. } => WorkType::RpcBlock, Work::RpcBlobs { .. } => WorkType::RpcBlobs, + Work::RpcExecutionProofs { .. } => WorkType::RpcExecutionProofs, Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, Work::ColumnReconstruction(_) => WorkType::ColumnReconstruction, Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, @@ -711,6 +731,8 @@ impl Work { Work::BlocksByRootsRequest(_) => WorkType::BlocksByRootsRequest, Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, + Work::ExecutionProofsByRootsRequest(_) => WorkType::ExecutionProofsByRootsRequest, + Work::ExecutionProofsByRangeRequest(_) => WorkType::ExecutionProofsByRangeRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, @@ -865,6 +887,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut rpc_execution_proof_queue = FifoQueue::new(queue_lengths.rpc_execution_proof_queue); let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); let mut column_reconstruction_queue = LifoQueue::new(queue_lengths.column_reconstruction_queue); @@ -873,12 +896,18 @@ impl BeaconProcessor { let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let mut gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); + let mut gossip_execution_proof_queue = + FifoQueue::new(queue_lengths.gossip_execution_proof_queue); let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); let mut block_brange_queue = FifoQueue::new(queue_lengths.block_brange_queue); let mut block_broots_queue = FifoQueue::new(queue_lengths.block_broots_queue); let mut blob_broots_queue = FifoQueue::new(queue_lengths.blob_broots_queue); + let mut execution_proof_broots_queue = + FifoQueue::new(queue_lengths.execution_proof_broots_queue); + let mut execution_proof_brange_queue = + FifoQueue::new(queue_lengths.execution_proof_brange_queue); let mut blob_brange_queue = FifoQueue::new(queue_lengths.blob_brange_queue); let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); @@ -1039,6 +1068,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = rpc_blob_queue.pop() { Some(item) + } else if let Some(item) = rpc_execution_proof_queue.pop() { + Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { @@ -1055,6 +1086,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = gossip_data_column_queue.pop() { Some(item) + } else if let Some(item) = gossip_execution_proof_queue.pop() { + Some(item) } else if let Some(item) = column_reconstruction_queue.pop() { Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. @@ -1198,6 +1231,10 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = blob_broots_queue.pop() { Some(item) + } else if let Some(item) = execution_proof_broots_queue.pop() { + Some(item) + } else if let Some(item) = execution_proof_brange_queue.pop() { + Some(item) } else if let Some(item) = dcbroots_queue.pop() { Some(item) } else if let Some(item) = dcbrange_queue.pop() { @@ -1325,6 +1362,9 @@ impl BeaconProcessor { Work::GossipDataColumnSidecar { .. } => { gossip_data_column_queue.push(work, work_id) } + Work::GossipExecutionProof { .. } => { + gossip_execution_proof_queue.push(work, work_id) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id) } @@ -1351,6 +1391,9 @@ impl BeaconProcessor { rpc_block_queue.push(work, work_id) } Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id), + Work::RpcExecutionProofs { .. } => { + rpc_execution_proof_queue.push(work, work_id) + } Work::RpcCustodyColumn { .. } => { rpc_custody_column_queue.push(work, work_id) } @@ -1393,6 +1436,12 @@ impl BeaconProcessor { Work::BlobsByRootsRequest { .. } => { blob_broots_queue.push(work, work_id) } + Work::ExecutionProofsByRootsRequest { .. } => { + execution_proof_broots_queue.push(work, work_id) + } + Work::ExecutionProofsByRangeRequest { .. } => { + execution_proof_brange_queue.push(work, work_id) + } Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } @@ -1424,6 +1473,7 @@ impl BeaconProcessor { WorkType::GossipBlock => gossip_block_queue.len(), WorkType::GossipBlobSidecar => gossip_blob_queue.len(), WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::GossipExecutionProof => gossip_execution_proof_queue.len(), WorkType::DelayedImportBlock => delayed_block_queue.len(), WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), @@ -1438,6 +1488,7 @@ impl BeaconProcessor { } WorkType::RpcBlock => rpc_block_queue.len(), WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), + WorkType::RpcExecutionProofs => rpc_execution_proof_queue.len(), WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), WorkType::ColumnReconstruction => column_reconstruction_queue.len(), WorkType::ChainSegment => chain_segment_queue.len(), @@ -1447,6 +1498,12 @@ impl BeaconProcessor { WorkType::BlocksByRootsRequest => block_broots_queue.len(), WorkType::BlobsByRangeRequest => blob_brange_queue.len(), WorkType::BlobsByRootsRequest => blob_broots_queue.len(), + WorkType::ExecutionProofsByRootsRequest => { + execution_proof_broots_queue.len() + } + WorkType::ExecutionProofsByRangeRequest => { + execution_proof_brange_queue.len() + } WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), WorkType::GossipBlsToExecutionChange => { @@ -1594,16 +1651,20 @@ impl BeaconProcessor { } => task_spawner.spawn_async(process_fn), Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } + | Work::RpcExecutionProofs { process_fn } | Work::RpcCustodyColumn(process_fn) | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) - | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { + | Work::GossipDataColumnSidecar(work) + | Work::GossipExecutionProof(work) => task_spawner.spawn_async(async move { work.await; }), Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) + | Work::ExecutionProofsByRootsRequest(process_fn) + | Work::ExecutionProofsByRangeRequest(process_fn) | Work::DataColumnsByRootsRequest(process_fn) | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3c4b2572c9a..e6f50b4e232 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -39,6 +39,8 @@ tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } [dev-dependencies] operation_pool = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c48021e45d4..41f8a8f6c8f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -194,6 +194,18 @@ where Kzg::new_from_trusted_setup_no_precomp(&config.trusted_setup).map_err(kzg_err_msg)? }; + // Modify spec if zkvm mode is enabled via CLI + let spec = if let Some(zkvm_config) = &config.zkvm_execution_layer { + let mut modified_spec = (*spec).clone(); + + modified_spec.zkvm_enabled = true; + modified_spec.zkvm_min_proofs_required = zkvm_config.min_proofs_required; + + Arc::new(modified_spec) + } else { + spec + }; + let ordered_custody_column_indices = compute_ordered_custody_column_indices::(node_id, &spec).map_err(|e| { format!("Failed to compute ordered custody column indices: {:?}", e) @@ -210,6 +222,7 @@ where .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) + .zkvm_execution_layer_config(config.zkvm_execution_layer.clone()) .node_custody_type(config.chain.node_custody_type) .ordered_custody_column_indices(ordered_custody_column_indices) .validator_monitor_config(config.validator_monitor.clone()) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index aeaa196df86..c62e3afb2e2 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -69,6 +69,7 @@ pub struct Config { pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, pub execution_layer: Option, + pub zkvm_execution_layer: Option, pub trusted_setup: Vec, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, @@ -94,6 +95,7 @@ impl Default for Config { network: NetworkConfig::default(), chain: <_>::default(), execution_layer: None, + zkvm_execution_layer: None, trusted_setup: get_trusted_setup(), beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 571dab10273..c559b98edb9 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -39,6 +39,7 @@ sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } state_processing = { workspace = true } store = { workspace = true } sysinfo = { workspace = true } diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs index 059573c3175..3c4021fa611 100644 --- a/beacon_node/http_api/src/beacon/pool.rs +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -1,15 +1,21 @@ use crate::task_spawner::{Priority, TaskSpawner}; -use crate::utils::{NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter}; +use crate::utils::{ + NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter, SyncTxFilter, +}; use crate::version::{ ResponseIncludesVersion, V1, V2, add_consensus_version_header, beacon_response, unsupported_version_rejection, }; use crate::{sync_committees, utils}; +use beacon_chain::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; +use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::observed_operations::ObservationOutcome; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; use lighthouse_network::PubsubMessage; -use network::NetworkMessage; +use network::{NetworkMessage, SyncMessage}; use operation_pool::ReceivedPreCapella; use slot_clock::SlotClock; use std::collections::HashSet; @@ -17,7 +23,7 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, info, warn}; use types::{ - Attestation, AttestationData, AttesterSlashing, ForkName, ProposerSlashing, + Attestation, AttestationData, AttesterSlashing, ExecutionProof, ForkName, ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, SyncCommitteeMessage, }; use warp::filters::BoxedFilter; @@ -520,3 +526,127 @@ pub fn post_beacon_pool_attestations_v2( ) .boxed() } + +/// POST beacon/pool/execution_proofs +/// +/// Submits an execution proof to the beacon node. +/// The proof will be validated and stored in the data availability checker. +/// If valid, the proof will be published to the gossip network. +/// If the proof makes a block available, the block will be imported. +pub fn post_beacon_pool_execution_proofs( + network_tx_filter: &NetworkTxFilter, + sync_tx_filter: &SyncTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("execution_proofs")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .and(sync_tx_filter.clone()) + .then( + |_task_spawner: TaskSpawner, + chain: Arc>, + proof: ExecutionProof, + network_tx_filter: UnboundedSender>, + sync_tx_filter: UnboundedSender>| async move { + let result = + publish_execution_proof(chain, proof, network_tx_filter, sync_tx_filter).await; + convert_rejection(result.map(|()| warp::reply::json(&()))).await + }, + ) + .boxed() +} + +/// Validate, publish, and process an execution proof. +async fn publish_execution_proof( + chain: Arc>, + proof: ExecutionProof, + network_tx: UnboundedSender>, + sync_tx: UnboundedSender>, +) -> Result<(), warp::Rejection> { + let proof = Arc::new(proof); + + // Validate the proof using the same logic as gossip validation + let verified_proof: GossipVerifiedExecutionProof = + GossipVerifiedExecutionProof::new(proof.clone(), &chain).map_err(|e| match e { + GossipExecutionProofError::PriorKnown { + slot, + block_root, + proof_id, + } => { + debug!( + %slot, + %block_root, + %proof_id, + "Execution proof already known" + ); + warp_utils::reject::custom_bad_request(format!( + "proof already known for slot {} block_root {} proof_id {}", + slot, block_root, proof_id + )) + } + GossipExecutionProofError::PriorKnownUnpublished => { + // Proof is valid but was received via non-gossip source + // It's in the DA checker, so we should publish it to gossip + warp_utils::reject::custom_bad_request( + "proof already received but not yet published".to_string(), + ) + } + _ => warp_utils::reject::object_invalid(format!("proof verification failed: {:?}", e)), + })?; + + let slot = verified_proof.slot(); + let block_root = verified_proof.block_root(); + let proof_id = verified_proof.subnet_id(); + + // Publish the proof to the gossip network + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::ExecutionProof(verified_proof.clone().into_inner()), + )?; + + // Store the proof in the data availability checker and check if block is now available. + // This properly triggers block import if all components are now available. + match chain + .process_rpc_execution_proofs(slot, block_root, vec![verified_proof.into_inner()]) + .await + { + Ok(status) => { + info!( + %slot, + %block_root, + %proof_id, + ?status, + "Execution proof submitted and published" + ); + + if let AvailabilityProcessingStatus::Imported(_) = status { + chain.recompute_head_at_current_slot().await; + + // Notify that block was imported via HTTP API + if let Err(e) = sync_tx.send(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }) { + debug!(error = %e, "Could not send message to the sync service") + }; + } + } + Err(e) => { + // Log the error but don't fail the request - the proof was already + // published to gossip and stored in the DA checker. The error is + // likely due to the block already being imported or similar. + debug!( + %slot, + %block_root, + %proof_id, + error = ?e, + "Error processing execution proof availability (proof was still published)" + ); + } + } + + Ok(()) +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index ea8b47f91ef..382eb329a85 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -5,14 +5,17 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkip use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; -use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; +use eth2::types::{ + BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery, ExecutionProofIdsQuery, +}; use fixed_bytes::FixedBytesExtended; +use ssz_types::RuntimeVariableList; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ - BlobSidecarList, DataColumnSidecarList, EthSpec, ForkName, Hash256, SignedBeaconBlock, - SignedBlindedBeaconBlock, Slot, + BlobSidecarList, DataColumnSidecarList, EthSpec, ExecutionProof, ExecutionProofId, ForkName, + Hash256, MAX_PROOFS, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; @@ -30,6 +33,12 @@ type DataColumnsResponse = ( Finalized, ); +type ExecutionProofsResponse = ( + RuntimeVariableList>, + ExecutionOptimistic, + Finalized, +); + impl BlockId { pub fn from_slot(slot: Slot) -> Self { Self(CoreBlockId::Slot(slot)) @@ -312,6 +321,53 @@ impl BlockId { )) } + pub fn get_execution_proofs( + &self, + query: ExecutionProofIdsQuery, + chain: &BeaconChain, + ) -> Result { + if !chain.spec.is_zkvm_enabled() { + return Err(warp_utils::reject::custom_bad_request( + "zkvm is not enabled for this node".to_string(), + )); + } + + let (root, execution_optimistic, finalized) = self.root(chain)?; + let _block = BlockId::blinded_block_by_root(&root, chain)?.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) + })?; + + let mut proofs = chain + .store + .get_execution_proofs(&root) + .map_err(warp_utils::reject::unhandled_error)?; + + if proofs.is_empty() { + return Err(warp_utils::reject::custom_not_found(format!( + "no execution proofs stored for block {root}" + ))); + } + + let proof_ids = query + .proof_ids + .map(|ids| { + ids.into_iter() + .map(ExecutionProofId::new) + .collect::, _>>() + }) + .transpose() + .map_err(warp_utils::reject::custom_bad_request)?; + + if let Some(proof_ids) = proof_ids { + proofs.retain(|proof| proof_ids.contains(&proof.proof_id)); + } + + let proof_list = RuntimeVariableList::new(proofs, MAX_PROOFS) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))?; + + Ok((proof_list, execution_optimistic, finalized)) + } + #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 58cd2a3bdbc..2395a5235c4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -210,6 +210,7 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( }) .boxed(); + // Create a `warp` filter that provides access to the sync sender channel. + let sync_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.sync_send()); + let sync_tx_filter = warp::any() + .map(move || sync_tx.clone()) + .and_then(|sync_tx| async move { + match sync_tx { + Some(sync_tx) => Ok(sync_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (sync_tx).".to_string(), + )), + } + }) + .boxed(); + // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = warp::any() @@ -1394,6 +1412,53 @@ pub fn serve( }, ); + // GET beacon/execution_proofs/{block_id} + let get_execution_proofs = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("execution_proofs")) + .and(block_id_or_err) + .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) + .then( + |block_id: BlockId, + proof_ids_res: Result, + task_spawner: TaskSpawner, + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let proof_ids = proof_ids_res?; + let (proofs, execution_optimistic, finalized) = + block_id.get_execution_proofs(proof_ids, &chain)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(proofs.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::No, + execution_optimistic, + finalized, + &proofs, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } + }) + }, + ); + /* * beacon/pool */ @@ -1465,6 +1530,10 @@ pub fn serve( let post_beacon_pool_bls_to_execution_changes = post_beacon_pool_bls_to_execution_changes(&network_tx_filter, &beacon_pool_path); + // POST beacon/pool/execution_proofs + let post_beacon_pool_execution_proofs = + post_beacon_pool_execution_proofs(&network_tx_filter, &sync_tx_filter, &beacon_pool_path); + let beacon_rewards_path = eth_v1 .clone() .and(warp::path("beacon")) @@ -3283,6 +3352,7 @@ pub fn serve( .uor(get_beacon_block_root) .uor(get_blob_sidecars) .uor(get_blobs) + .uor(get_execution_proofs) .uor(get_beacon_pool_attestations) .uor(get_beacon_pool_attester_slashings) .uor(get_beacon_pool_proposer_slashings) @@ -3356,6 +3426,7 @@ pub fn serve( .uor(post_beacon_pool_voluntary_exits) .uor(post_beacon_pool_sync_committees) .uor(post_beacon_pool_bls_to_execution_changes) + .uor(post_beacon_pool_execution_proofs) .uor(post_beacon_state_validators) .uor(post_beacon_state_validator_balances) .uor(post_beacon_state_validator_identities) diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs index f2b859ebe59..4dfba8a8636 100644 --- a/beacon_node/http_api/src/utils.rs +++ b/beacon_node/http_api/src/utils.rs @@ -3,7 +3,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::EndpointVersion; use lighthouse_network::PubsubMessage; use lighthouse_network::rpc::methods::MetaData; -use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use network::{NetworkMessage, SyncMessage, ValidatorSubscriptionMessage}; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::mpsc::{Sender, UnboundedSender}; @@ -20,6 +20,8 @@ pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner< pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; pub type NetworkTxFilter = BoxedFilter<(UnboundedSender::EthSpec>>,)>; +pub type SyncTxFilter = + BoxedFilter<(UnboundedSender::EthSpec>>,)>; pub type OptionalConsensusVersionHeaderFilter = BoxedFilter<(Option,)>; pub fn from_meta_data( diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ed7abead18a..ac4f42c07e9 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -46,8 +46,9 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - Domain, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, RelativeEpoch, SelectionProof, - SignedRoot, SingleAttestation, Slot, attestation::AttestationBase, + Domain, EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, MainnetEthSpec, + RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, Slot, + attestation::AttestationBase, }; type E = MainnetEthSpec; @@ -94,6 +95,7 @@ struct ApiTesterConfig { spec: ChainSpec, retain_historic_states: bool, node_custody_type: NodeCustodyType, + enable_zkvm: bool, } impl Default for ApiTesterConfig { @@ -104,6 +106,7 @@ impl Default for ApiTesterConfig { spec, retain_historic_states: false, node_custody_type: NodeCustodyType::Fullnode, + enable_zkvm: false, } } } @@ -113,6 +116,13 @@ impl ApiTesterConfig { self.retain_historic_states = true; self } + + fn with_zkvm(mut self) -> Self { + // TODO(zkproofs): shouldn't need both of these to be enabled + self.enable_zkvm = true; + self.spec.zkvm_enabled = true; + self + } } impl ApiTester { @@ -129,10 +139,15 @@ impl ApiTester { Self::new_from_config(config).await } + pub async fn new_with_zkvm() -> Self { + let config = ApiTesterConfig::default().with_zkvm(); + Self::new_from_config(config).await + } + pub async fn new_from_config(config: ApiTesterConfig) -> Self { let spec = Arc::new(config.spec); - let mut harness = BeaconChainHarness::builder(MainnetEthSpec) + let mut builder = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .chain_config(ChainConfig { reconstruct_historic_states: config.retain_historic_states, @@ -142,8 +157,13 @@ impl ApiTester { .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() - .node_custody_type(config.node_custody_type) - .build(); + .node_custody_type(config.node_custody_type); + + if config.enable_zkvm { + builder = builder.zkvm_with_dummy_verifiers(); + } + + let mut harness = builder.build(); harness .mock_execution_layer @@ -1944,6 +1964,60 @@ impl ApiTester { self } + pub async fn test_get_execution_proofs(self, filter: bool) -> Self { + let head = self.chain.head_snapshot(); + let block_root = head.beacon_block_root; + + let proof_ids = [ + ExecutionProofId::new(0).expect("Valid proof id"), + ExecutionProofId::new(1).expect("Valid proof id"), + ]; + let proofs = proof_ids + .iter() + .map(|proof_id| self.create_test_execution_proof_with_id(*proof_id)) + .collect::>(); + + self.chain + .store + .put_execution_proofs(&block_root, &proofs) + .unwrap(); + + let filter_ids = filter.then(|| vec![proof_ids[1].as_u8()]); + let result = match self + .client + .get_execution_proofs(CoreBlockId::Root(block_root), filter_ids.as_deref()) + .await + { + Ok(result) => result.unwrap().into_data(), + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + if filter { + assert_eq!(result.len(), 1); + assert_eq!(result[0].proof_id, proof_ids[1]); + } else { + assert_eq!(result.len(), proofs.len()); + } + + self + } + + pub async fn test_get_execution_proofs_zkvm_disabled(self) -> Self { + let block_id = BlockId(CoreBlockId::Head); + let (block_root, _, _) = block_id.root(&self.chain).unwrap(); + let result = self + .client + .get_execution_proofs(CoreBlockId::Root(block_root), None) + .await; + + match result { + Ok(response) => panic!("query should fail: {response:?}"), + Err(e) => assert_eq!(e.status().unwrap(), 400), + } + + self + } + pub async fn test_get_blobs_post_fulu_full_node(self, versioned_hashes: bool) -> Self { let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); @@ -2732,6 +2806,90 @@ impl ApiTester { self } + /// Helper to create a test execution proof for the head block + fn create_test_execution_proof(&self) -> ExecutionProof { + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + self.create_test_execution_proof_with_id(proof_id) + } + + fn create_test_execution_proof_with_id(&self, proof_id: ExecutionProofId) -> ExecutionProof { + let head = self.chain.head_snapshot(); + let block_root = head.beacon_block_root; + let slot = head.beacon_block.slot(); + let block_hash = head + .beacon_block + .message() + .body() + .execution_payload() + .map(|p| p.block_hash()) + .unwrap_or_else(|_| ExecutionBlockHash::zero()); + + let proof_data = vec![0u8; 32]; // Dummy proof data + + ExecutionProof::new(proof_id, slot, block_hash, block_root, proof_data) + .expect("Valid test proof") + } + + pub async fn test_post_beacon_pool_execution_proofs_valid(mut self) -> Self { + let proof = self.create_test_execution_proof(); + + self.client + .post_beacon_pool_execution_proofs(&proof) + .await + .unwrap(); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "valid proof should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_execution_proofs_invalid_duplicate(mut self) -> Self { + let proof = self.create_test_execution_proof(); + + // First submission should succeed + self.client + .post_beacon_pool_execution_proofs(&proof) + .await + .unwrap(); + + // Consume the network message + self.network_rx.network_recv.recv().await; + + // Duplicate submission should fail + let result = self.client.post_beacon_pool_execution_proofs(&proof).await; + + assert!(result.is_err(), "duplicate proof should be rejected"); + + assert!( + self.network_rx.network_recv.recv().now_or_never().is_none(), + "duplicate proof should not be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_execution_proofs_invalid_future_slot(self) -> Self { + let head = self.chain.head_snapshot(); + let block_root = head.beacon_block_root; + let future_slot = self.chain.slot().unwrap() + 100u64; + let block_hash = ExecutionBlockHash::zero(); + + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof_data = vec![0u8; 32]; + + let proof = ExecutionProof::new(proof_id, future_slot, block_hash, block_root, proof_data) + .expect("Valid test proof"); + + let result = self.client.post_beacon_pool_execution_proofs(&proof).await; + + assert!(result.is_err(), "future slot proof should be rejected"); + + self + } + pub async fn test_get_config_fork_schedule(self) -> Self { let result = self.client.get_config_fork_schedule().await.unwrap().data; @@ -7262,6 +7420,30 @@ async fn beacon_pools_post_voluntary_exits_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_execution_proofs_valid() { + ApiTester::new_with_zkvm() + .await + .test_post_beacon_pool_execution_proofs_valid() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_execution_proofs_invalid_duplicate() { + ApiTester::new_with_zkvm() + .await + .test_post_beacon_pool_execution_proofs_invalid_duplicate() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_execution_proofs_invalid_future_slot() { + ApiTester::new_with_zkvm() + .await + .test_post_beacon_pool_execution_proofs_invalid_future_slot() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn config_get() { ApiTester::new() @@ -7975,6 +8157,7 @@ async fn get_blobs_post_fulu_supernode() { retain_historic_states: false, spec: E::default_spec(), node_custody_type: NodeCustodyType::Supernode, + enable_zkvm: false, }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); @@ -8045,6 +8228,24 @@ async fn get_blob_sidecars_pre_deneb() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_execution_proofs() { + ApiTester::new_with_zkvm() + .await + .test_get_execution_proofs(false) + .await + .test_get_execution_proofs(true) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_execution_proofs_zkvm_disabled() { + ApiTester::new() + .await + .test_get_execution_proofs_zkvm_disabled() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_liveness_epoch() { ApiTester::new() diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 4c285ea86c8..f2268b39b26 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -29,6 +29,8 @@ pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// The ENR field specifying the peerdas custody group count. pub const PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY: &str = "cgc"; +/// The ENR field specifying whether zkVM execution proofs are enabled. +pub const ZKVM_ENABLED_ENR_KEY: &str = "zkvm"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -43,6 +45,9 @@ pub trait Eth2Enr { /// The peerdas custody group count associated with the ENR. fn custody_group_count(&self, spec: &ChainSpec) -> Result; + /// Whether zkVM execution proofs are enabled for this node. + fn zkvm_enabled(&self) -> bool; + /// The next fork digest associated with the ENR. fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; @@ -85,6 +90,13 @@ impl Eth2Enr for Enr { } } + fn zkvm_enabled(&self) -> bool { + // If the key exists and is true, zkVM is enabled, otherwise false + self.get_decodable::(ZKVM_ENABLED_ENR_KEY) + .and_then(|result| result.ok()) + .unwrap_or(false) + } + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) .ok_or("ENR next fork digest non-existent")? @@ -278,6 +290,10 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); + if spec.is_zkvm_enabled() { + builder.add_value(ZKVM_ENABLED_ENR_KEY, &true); + } + // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); @@ -308,11 +324,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and - // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will - // likely only be true for non-validating nodes. + // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY and ZKVM_ENABLED_ENR_KEY key to match, + // otherwise we use a new ENR. This will likely only be true for non-validating nodes. && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) && local_enr.get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) + && local_enr.get_decodable::(ZKVM_ENABLED_ENR_KEY) == disk_enr.get_decodable(ZKVM_ENABLED_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index a8c87523a54..df6e0740bb5 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -15,7 +15,10 @@ pub use libp2p::identity::{Keypair, PublicKey}; use network_utils::enr_ext::{CombinedKeyExt, EnrExt, peer_id_to_node_id}; use alloy_rlp::bytes::Bytes; -use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; +use enr::{ + ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY, + ZKVM_ENABLED_ENR_KEY, +}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::core::transport::PortUse; @@ -560,6 +563,12 @@ impl Discovery { } // Data column subnets are computed from node ID. No subnet bitfield in the ENR. Subnet::DataColumn(_) => return Ok(()), + // Execution proof uses a simple boolean flag in the ENR + Subnet::ExecutionProof => { + self.discv5 + .enr_insert(ZKVM_ENABLED_ENR_KEY, &value) + .map_err(|e| format!("{:?}", e))?; + } } // replace the global version @@ -904,6 +913,7 @@ impl Discovery { Subnet::Attestation(_) => "attestation", Subnet::SyncCommittee(_) => "sync_committee", Subnet::DataColumn(_) => "data_column", + Subnet::ExecutionProof => "execution_proof", }; if let Some(v) = metrics::get_int_counter( diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 6e841c25a50..dc1ac54e97b 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -41,6 +41,10 @@ where false } } + Subnet::ExecutionProof => { + // Check if ENR advertises zkVM support + enr.zkvm_enabled() + } }); if !predicate { diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index b2ed6524861..57a5fa68a23 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -20,6 +20,8 @@ pub struct Config { pub metrics_enabled: bool, /// Whether quic is enabled. pub quic_enabled: bool, + /// Whether execution proofs are enabled. + pub execution_proof_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -40,6 +42,7 @@ impl Default for Config { discovery_enabled: true, metrics_enabled: false, quic_enabled: true, + execution_proof_enabled: false, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 3cfe2b3c3b7..52b98d4d3c7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,7 +1,7 @@ //! Implementation of Lighthouse's peer management system. use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; -use crate::service::TARGET_SUBNET_PEERS; +use crate::service::{TARGET_EXECUTION_PROOF_PEERS, TARGET_SUBNET_PEERS}; use crate::{Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery, metrics}; use delay_map::HashSetDelay; use discv5::Enr; @@ -113,6 +113,8 @@ pub struct PeerManager { /// discovery queries for subnet peers if we disconnect from existing sync /// committee subnet peers. sync_committee_subnets: HashMap, + /// Keeps track of whether this node has zkVM execution proof support enabled. + execution_proof_enabled: bool, /// A mapping of all custody groups to column subnets to avoid re-computation. subnets_by_custody_group: HashMap>, /// The heartbeat interval to perform routine maintenance. @@ -162,6 +164,7 @@ impl PeerManager { let config::Config { discovery_enabled, metrics_enabled, + execution_proof_enabled, target_peer_count, status_interval, ping_interval_inbound, @@ -199,6 +202,7 @@ impl PeerManager { target_peers: target_peer_count, temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), + execution_proof_enabled, subnets_by_custody_group, heartbeat, discovery_enabled, @@ -601,6 +605,8 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -621,6 +627,8 @@ impl PeerManager { Protocol::BlobsByRoot => return, Protocol::DataColumnsByRoot => return, Protocol::DataColumnsByRange => return, + Protocol::ExecutionProofsByRoot => return, + Protocol::ExecutionProofsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -644,6 +652,8 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, @@ -1004,6 +1014,46 @@ impl PeerManager { } } + /// Run discovery query for zkVM-enabled peers if we fall below `TARGET_EXECUTION_PROOF_PEERS`. + fn maintain_execution_proof_peers(&mut self) { + // Only maintain peers if zkVM is enabled + if !self.execution_proof_enabled { + return; + } + + // Check if we have enough zkVM-enabled peers + // Count peers subscribed to the execution_proof gossip topic + // TODO(zkproofs): Note that since peers do not advertise whether + // they are proof generating, we cannot favour them. This is + // fine for optional proofs and mandatory proofs will imply + // that the builder who is well connected will propagate it + // to most of the network. + let zkvm_peer_count = self + .network_globals + .peers + .read() + .connected_peers() + .filter(|(_, info)| { + // Check if peer is subscribed to ExecutionProof gossip topic + info.on_subnet_gossipsub(&Subnet::ExecutionProof) + }) + .count(); + + if zkvm_peer_count < TARGET_EXECUTION_PROOF_PEERS { + debug!( + current_peers = zkvm_peer_count, + target = TARGET_EXECUTION_PROOF_PEERS, + "Making discovery query for zkVM-enabled peers" + ); + self.events.push(PeerManagerEvent::DiscoverSubnetPeers(vec![ + SubnetDiscovery { + subnet: Subnet::ExecutionProof, + min_ttl: None, + }, + ])); + } + } + fn maintain_trusted_peers(&mut self) { let trusted_peers = self.trusted_peers.clone(); for trusted_peer in trusted_peers { @@ -1081,6 +1131,10 @@ impl PeerManager { Subnet::DataColumn(id) => { peer_info.custody_subnets.insert(id); } + Subnet::ExecutionProof => { + // ExecutionProof uses a single topic, not subnet-based + // So there is no subnet assignment to track + } } } @@ -1449,6 +1503,9 @@ impl PeerManager { // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); + // Maintain minimum count for zkVM-enabled peers (if zkVM is enabled). + self.maintain_execution_proof_peers(); + // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 87337cafcf5..142c62c966e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,5 +1,5 @@ use crate::discovery::CombinedKey; -use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; +use crate::discovery::enr::{PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, ZKVM_ENABLED_ENR_KEY}; use crate::{Enr, Gossipsub, PeerId, SyncInfo, metrics, multiaddr::Multiaddr, types::Subnet}; use itertools::Itertools; use logging::crit; @@ -799,6 +799,26 @@ impl PeerDB { supernode: bool, spec: &ChainSpec, enr_key: CombinedKey, + ) -> PeerId { + self.__add_connected_peer_with_opts_testing_only(supernode, false, spec, enr_key) + } + + /// Updates the connection state with zkvm option. MUST ONLY BE USED IN TESTS. + pub fn __add_connected_zkvm_peer_testing_only( + &mut self, + spec: &ChainSpec, + enr_key: CombinedKey, + ) -> PeerId { + self.__add_connected_peer_with_opts_testing_only(false, true, spec, enr_key) + } + + /// Updates the connection state with options. MUST ONLY BE USED IN TESTS. + fn __add_connected_peer_with_opts_testing_only( + &mut self, + supernode: bool, + zkvm_enabled: bool, + spec: &ChainSpec, + enr_key: CombinedKey, ) -> PeerId { let mut enr = Enr::builder().build(&enr_key).unwrap(); let peer_id = enr.peer_id(); @@ -812,6 +832,11 @@ impl PeerDB { .expect("u64 can be encoded"); } + if zkvm_enabled { + enr.insert(ZKVM_ENABLED_ENR_KEY, &true, &enr_key) + .expect("bool can be encoded"); + } + self.update_connection_state( &peer_id, NewConnectionState::Connected { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index c289cb9a69c..38cbd6e7782 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -92,6 +92,15 @@ impl PeerInfo { /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. /// Also returns true if the peer is assigned to custody a given data column `Subnet` computed from the metadata `custody_group_count` field or ENR `cgc` field. pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { + // ExecutionProof capability is advertised via ENR zkvm flag, not metadata. + // Check this separately since it doesn't depend on metadata presence. + if let Subnet::ExecutionProof = subnet { + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled(); + } + return false; + } + if let Some(meta_data) = &self.meta_data { match subnet { Subnet::Attestation(id) => { @@ -105,6 +114,9 @@ impl PeerInfo { Subnet::DataColumn(subnet_id) => { return self.is_assigned_to_custody_subnet(subnet_id); } + Subnet::ExecutionProof => { + unreachable!("zkvm flag is only in the ENR") + } } } false @@ -272,6 +284,11 @@ impl PeerInfo { return true; } + // Check if the peer has zkVM enabled (execution proof support) + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled(); + } + false } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 48a29699c8f..aa0fe8a3d9d 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -16,12 +16,12 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, - ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, - SignedBeaconBlockGloas, + BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, + ExecutionProof, ForkContext, ForkName, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, }; use unsigned_varint::codec::Uvi; @@ -80,6 +80,8 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -360,6 +362,8 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), + RequestType::ExecutionProofsByRoot(req) => req.as_ssz_bytes(), + RequestType::ExecutionProofsByRange(req) => req.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), @@ -568,6 +572,19 @@ fn handle_rpc_request( )?, }, ))), + SupportedProtocol::ExecutionProofsByRootV1 => { + let request = ExecutionProofsByRootRequest::from_ssz_bytes(decoded_buffer) + .map_err(RPCError::SSZDecodeError)?; + + request.validate(spec).map_err(RPCError::InvalidData)?; + + Ok(Some(RequestType::ExecutionProofsByRoot(request))) + } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RequestType::ExecutionProofsByRange( + ExecutionProofsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -731,6 +748,16 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::ExecutionProofsByRootV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRoot(Arc::new( + ExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRange(Arc::new( + ExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -912,8 +939,9 @@ mod tests { use fixed_bytes::FixedBytesExtended; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, FullPayload, KzgCommitment, KzgProof, - SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, FullPayload, + KzgCommitment, KzgProof, SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, + data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; @@ -1110,6 +1138,18 @@ mod tests { .unwrap() } + fn execution_proofs_by_root_request( + _fork_name: ForkName, + _spec: &ChainSpec, + ) -> ExecutionProofsByRootRequest { + ExecutionProofsByRootRequest::new( + Hash256::zero(), + vec![ExecutionProofId::new(0).unwrap()], + 2, + ) + .unwrap() + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -1264,6 +1304,15 @@ mod tests { RequestType::DataColumnsByRange(dcbrange) => { assert_eq!(decoded, RequestType::DataColumnsByRange(dcbrange)) } + RequestType::ExecutionProofsByRoot(exec_proofs) => { + assert_eq!(decoded, RequestType::ExecutionProofsByRoot(exec_proofs)) + } + RequestType::ExecutionProofsByRange(exec_proofs_range) => { + assert_eq!( + decoded, + RequestType::ExecutionProofsByRange(exec_proofs_range) + ) + } RequestType::Ping(ping) => { assert_eq!(decoded, RequestType::Ping(ping)) } @@ -2005,6 +2054,10 @@ mod tests { RequestType::BlocksByRoot(bbroot_request_v1(fork_name, &chain_spec)), RequestType::BlocksByRoot(bbroot_request_v2(fork_name, &chain_spec)), RequestType::DataColumnsByRoot(dcbroot_request(fork_name, &chain_spec)), + RequestType::ExecutionProofsByRoot(execution_proofs_by_root_request( + fork_name, + &chain_spec, + )), ] }; for fork_name in ForkName::list_all() { diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index b0ee6fea64b..99c0f33da31 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -93,6 +93,8 @@ pub struct RateLimiterConfig { pub(super) blobs_by_root_quota: Quota, pub(super) data_columns_by_root_quota: Quota, pub(super) data_columns_by_range_quota: Quota, + pub(super) execution_proofs_by_root_quota: Quota, + pub(super) execution_proofs_by_range_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -122,6 +124,12 @@ impl RateLimiterConfig { Quota::n_every(NonZeroU64::new(16384).unwrap(), 10); pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(NonZeroU64::new(16384).unwrap(), 10); + // TODO(zkproofs): Configure this to be less arbitrary + pub const DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + // TODO(zkproofs): Configure this to be less arbitrary + pub const DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -141,6 +149,8 @@ impl Default for RateLimiterConfig { blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, + execution_proofs_by_root_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA, + execution_proofs_by_range_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -179,6 +189,14 @@ impl Debug for RateLimiterConfig { "data_columns_by_root", fmt_q!(&self.data_columns_by_root_quota), ) + .field( + "execution_proofs_by_root", + fmt_q!(&self.execution_proofs_by_root_quota), + ) + .field( + "execution_proofs_by_range", + fmt_q!(&self.execution_proofs_by_range_quota), + ) .finish() } } @@ -201,6 +219,8 @@ impl FromStr for RateLimiterConfig { let mut blobs_by_root_quota = None; let mut data_columns_by_root_quota = None; let mut data_columns_by_range_quota = None; + let mut execution_proofs_by_root_quota = None; + let mut execution_proofs_by_range_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -222,6 +242,12 @@ impl FromStr for RateLimiterConfig { Protocol::DataColumnsByRange => { data_columns_by_range_quota = data_columns_by_range_quota.or(quota) } + Protocol::ExecutionProofsByRoot => { + execution_proofs_by_root_quota = execution_proofs_by_root_quota.or(quota) + } + Protocol::ExecutionProofsByRange => { + execution_proofs_by_range_quota = execution_proofs_by_range_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -257,6 +283,10 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA), data_columns_by_range_quota: data_columns_by_range_quota .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), + execution_proofs_by_root_quota: execution_proofs_by_root_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA), + execution_proofs_by_range_quota: execution_proofs_by_range_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index a9b4aa2fbad..966106b6f69 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -16,9 +16,9 @@ use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, - ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, - blob_sidecar::BlobSidecar, + ExecutionProof, ExecutionProofId, ForkContext, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, + Slot, blob_sidecar::BlobSidecar, }; /// Maximum length of error message. @@ -546,6 +546,93 @@ impl DataColumnsByRootRequest { } } +/// Request execution proofs by block root and proof IDs. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRootRequest { + /// The block root we need proofs for + pub block_root: Hash256, + /// How many additional proofs we need + /// TODO(zkproofs): Remove. This can be inferred since `MIN_PROOFS_REQUIRED` + /// is a global value + pub count_needed: u64, + /// Proof IDs we already have (responder should exclude these) + pub already_have: Vec, +} + +impl ExecutionProofsByRootRequest { + pub fn new( + block_root: Hash256, + already_have: Vec, + count_needed: usize, + ) -> Result { + if already_have.len() > types::execution_proof::MAX_PROOFS { + return Err("Too many proof IDs in already_have"); + } + if count_needed == 0 { + return Err("count_needed must be > 0"); + } + if count_needed > types::execution_proof::MAX_PROOFS { + return Err("count_needed too large"); + } + Ok(Self { + block_root, + count_needed: count_needed as u64, + already_have, + }) + } + + pub fn validate(&self, _spec: &ChainSpec) -> Result<(), String> { + if self.already_have.len() > types::execution_proof::MAX_PROOFS { + return Err("Too many proof IDs in already_have".to_string()); + } + if self.count_needed == 0 { + return Err("count_needed must be > 0".to_string()); + } + if self.count_needed > types::execution_proof::MAX_PROOFS as u64 { + return Err(format!( + "count_needed too large: {} > {}", + self.count_needed, + types::execution_proof::MAX_PROOFS + )); + } + Ok(()) + } + + pub fn max_requested(&self) -> usize { + self.count_needed as usize + } +} + +/// Request execution proofs for a range of slots. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRangeRequest { + /// The starting slot to request execution proofs. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, +} + +impl ExecutionProofsByRangeRequest { + pub fn max_proofs_requested(&self) -> u64 { + // Each slot could have up to MAX_PROOFS execution proofs + self.count + .saturating_mul(types::execution_proof::MAX_PROOFS as u64) + } + + pub fn ssz_min_len() -> usize { + ExecutionProofsByRangeRequest { + start_slot: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /// Request a number of beacon data columns from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientUpdatesByRangeRequest { @@ -613,6 +700,12 @@ pub enum RpcSuccessResponse { /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. DataColumnsByRange(Arc>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Arc), + + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Arc), + /// A PONG response to a PING request. Pong(Ping), @@ -641,6 +734,12 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + /// Execution proofs by root stream termination. + ExecutionProofsByRoot, + + /// Execution proofs by range stream termination. + ExecutionProofsByRange, + /// Light client updates by range stream termination. LightClientUpdatesByRange, } @@ -654,6 +753,8 @@ impl ResponseTermination { ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::ExecutionProofsByRoot => Protocol::ExecutionProofsByRoot, + ResponseTermination::ExecutionProofsByRange => Protocol::ExecutionProofsByRange, ResponseTermination::LightClientUpdatesByRange => Protocol::LightClientUpdatesByRange, } } @@ -749,6 +850,8 @@ impl RpcSuccessResponse { RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, + RpcSuccessResponse::ExecutionProofsByRoot(_) => Protocol::ExecutionProofsByRoot, + RpcSuccessResponse::ExecutionProofsByRange(_) => Protocol::ExecutionProofsByRange, RpcSuccessResponse::Pong(_) => Protocol::Ping, RpcSuccessResponse::MetaData(_) => Protocol::MetaData, RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -773,7 +876,12 @@ impl RpcSuccessResponse { Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), - Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + // TODO(zkproofs): Change this when we add Slot to ExecutionProof + Self::ExecutionProofsByRoot(_) + | Self::ExecutionProofsByRange(_) + | Self::MetaData(_) + | Self::Status(_) + | Self::Pong(_) => None, } } } @@ -833,6 +941,16 @@ impl std::fmt::Display for RpcSuccessResponse { sidecar.slot() ) } + RpcSuccessResponse::ExecutionProofsByRoot(proof) => { + write!(f, "ExecutionProofsByRoot: Block root: {}", proof.block_root) + } + RpcSuccessResponse::ExecutionProofsByRange(proof) => { + write!( + f, + "ExecutionProofsByRange: Block root: {}", + proof.block_root + ) + } RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RpcSuccessResponse::MetaData(metadata) => { write!(f, "Metadata: {}", metadata.seq_number()) @@ -943,3 +1061,25 @@ impl std::fmt::Display for DataColumnsByRootRequest { ) } } + +impl std::fmt::Display for ExecutionProofsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRoot: Block Root: {}, Already Have: {}, Count Needed: {}", + self.block_root, + self.already_have.len(), + self.count_needed + ) + } +} + +impl std::fmt::Display for ExecutionProofsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRange: Start Slot: {}, Count: {}", + self.start_slot, self.count + ) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 366515d42f6..0a37db0d210 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,10 +18,11 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, - EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, + EmptyBlock, Epoch, EthSpec, EthSpecId, ExecutionProof, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -250,6 +251,12 @@ pub enum Protocol { /// The `DataColumnSidecarsByRange` protocol name. #[strum(serialize = "data_column_sidecars_by_range")] DataColumnsByRange, + /// The `ExecutionProofsByRoot` protocol name. + #[strum(serialize = "execution_proofs_by_root")] + ExecutionProofsByRoot, + /// The `ExecutionProofsByRange` protocol name. + #[strum(serialize = "execution_proofs_by_range")] + ExecutionProofsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -280,6 +287,8 @@ impl Protocol { Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), + Protocol::ExecutionProofsByRoot => Some(ResponseTermination::ExecutionProofsByRoot), + Protocol::ExecutionProofsByRange => Some(ResponseTermination::ExecutionProofsByRange), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -310,6 +319,8 @@ pub enum SupportedProtocol { BlobsByRootV1, DataColumnsByRootV1, DataColumnsByRangeV1, + ExecutionProofsByRootV1, + ExecutionProofsByRangeV1, PingV1, MetaDataV1, MetaDataV2, @@ -334,6 +345,8 @@ impl SupportedProtocol { SupportedProtocol::BlobsByRootV1 => "1", SupportedProtocol::DataColumnsByRootV1 => "1", SupportedProtocol::DataColumnsByRangeV1 => "1", + SupportedProtocol::ExecutionProofsByRootV1 => "1", + SupportedProtocol::ExecutionProofsByRangeV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -358,6 +371,8 @@ impl SupportedProtocol { SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, + SupportedProtocol::ExecutionProofsByRootV1 => Protocol::ExecutionProofsByRoot, + SupportedProtocol::ExecutionProofsByRangeV1 => Protocol::ExecutionProofsByRange, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -408,6 +423,18 @@ impl SupportedProtocol { ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.spec.is_zkvm_enabled() { + supported.extend_from_slice(&[ + ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + ), + ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + ), + ]); + } supported } } @@ -520,6 +547,11 @@ impl ProtocolId { DataColumnsByRangeRequest::ssz_min_len(), DataColumnsByRangeRequest::ssz_max_len::(), ), + Protocol::ExecutionProofsByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), + Protocol::ExecutionProofsByRange => RpcLimits::new( + ExecutionProofsByRangeRequest::ssz_min_len(), + ExecutionProofsByRangeRequest::ssz_max_len(), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -556,6 +588,8 @@ impl ProtocolId { Protocol::DataColumnsByRange => { rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } + Protocol::ExecutionProofsByRoot => rpc_execution_proof_limits(), + Protocol::ExecutionProofsByRange => rpc_execution_proof_limits(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -597,6 +631,8 @@ impl ProtocolId { | SupportedProtocol::StatusV2 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 + | SupportedProtocol::ExecutionProofsByRootV1 + | SupportedProtocol::ExecutionProofsByRangeV1 | SupportedProtocol::PingV1 | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 @@ -646,6 +682,11 @@ pub fn rpc_data_column_limits( ) } +pub fn rpc_execution_proof_limits() -> RpcLimits { + // TODO(zkproofs): Can max proof size change over hardforks? + RpcLimits::new(ExecutionProof::min_size(), ExecutionProof::max_size()) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -725,6 +766,8 @@ pub enum RequestType { BlobsByRoot(BlobsByRootRequest), DataColumnsByRoot(DataColumnsByRootRequest), DataColumnsByRange(DataColumnsByRangeRequest), + ExecutionProofsByRoot(ExecutionProofsByRootRequest), + ExecutionProofsByRange(ExecutionProofsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -748,6 +791,8 @@ impl RequestType { RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::ExecutionProofsByRoot(req) => req.max_requested() as u64, + RequestType::ExecutionProofsByRange(req) => req.max_proofs_requested(), RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -777,6 +822,8 @@ impl RequestType { RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, + RequestType::ExecutionProofsByRoot(_) => SupportedProtocol::ExecutionProofsByRootV1, + RequestType::ExecutionProofsByRange(_) => SupportedProtocol::ExecutionProofsByRangeV1, RequestType::Ping(_) => SupportedProtocol::PingV1, RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -808,6 +855,8 @@ impl RequestType { RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::ExecutionProofsByRoot(_) => ResponseTermination::ExecutionProofsByRoot, + RequestType::ExecutionProofsByRange(_) => ResponseTermination::ExecutionProofsByRange, RequestType::Status(_) => unreachable!(), RequestType::Goodbye(_) => unreachable!(), RequestType::Ping(_) => unreachable!(), @@ -854,6 +903,14 @@ impl RequestType { SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy, )], + RequestType::ExecutionProofsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )], + RequestType::ExecutionProofsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + )], RequestType::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -892,6 +949,8 @@ impl RequestType { RequestType::BlobsByRoot(_) => false, RequestType::DataColumnsByRoot(_) => false, RequestType::DataColumnsByRange(_) => false, + RequestType::ExecutionProofsByRoot(_) => false, + RequestType::ExecutionProofsByRange(_) => false, RequestType::Ping(_) => true, RequestType::MetaData(_) => true, RequestType::LightClientBootstrap(_) => true, @@ -1005,6 +1064,12 @@ impl std::fmt::Display for RequestType { RequestType::DataColumnsByRange(req) => { write!(f, "Data columns by range: {:?}", req) } + RequestType::ExecutionProofsByRoot(req) => { + write!(f, "Execution proofs by root: {:?}", req) + } + RequestType::ExecutionProofsByRange(req) => { + write!(f, "Execution proofs by range: {:?}", req) + } RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), RequestType::MetaData(_) => write!(f, "MetaData request"), RequestType::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 8b364f506cc..9dfbc668c89 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -105,6 +105,10 @@ pub struct RPCRateLimiter { dcbroot_rl: Limiter, /// DataColumnsByRange rate limiter. dcbrange_rl: Limiter, + /// ExecutionProofsByRoot rate limiter. + execution_proofs_by_root_rl: Limiter, + /// ExecutionProofsByRange rate limiter. + execution_proofs_by_range_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -148,6 +152,10 @@ pub struct RPCRateLimiterBuilder { dcbroot_quota: Option, /// Quota for the DataColumnsByRange protocol. dcbrange_quota: Option, + /// Quota for the ExecutionProofsByRoot protocol. + execution_proofs_by_root_quota: Option, + /// Quota for the ExecutionProofsByRange protocol. + execution_proofs_by_range_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -173,6 +181,8 @@ impl RPCRateLimiterBuilder { Protocol::BlobsByRoot => self.blbroot_quota = q, Protocol::DataColumnsByRoot => self.dcbroot_quota = q, Protocol::DataColumnsByRange => self.dcbrange_quota = q, + Protocol::ExecutionProofsByRoot => self.execution_proofs_by_root_quota = q, + Protocol::ExecutionProofsByRange => self.execution_proofs_by_range_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -221,6 +231,14 @@ impl RPCRateLimiterBuilder { .dcbrange_quota .ok_or("DataColumnsByRange quota not specified")?; + let execution_proofs_by_root_quota = self + .execution_proofs_by_root_quota + .ok_or("ExecutionProofsByRoot quota not specified")?; + + let execution_proofs_by_range_quota = self + .execution_proofs_by_range_quota + .ok_or("ExecutionProofsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -232,6 +250,8 @@ impl RPCRateLimiterBuilder { let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; + let execution_proofs_by_root_rl = Limiter::from_quota(execution_proofs_by_root_quota)?; + let execution_proofs_by_range_rl = Limiter::from_quota(execution_proofs_by_range_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -255,6 +275,8 @@ impl RPCRateLimiterBuilder { blbroot_rl, dcbroot_rl, dcbrange_rl, + execution_proofs_by_root_rl, + execution_proofs_by_range_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -308,6 +330,8 @@ impl RPCRateLimiter { blobs_by_root_quota, data_columns_by_root_quota, data_columns_by_range_quota, + execution_proofs_by_root_quota, + execution_proofs_by_range_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -325,6 +349,14 @@ impl RPCRateLimiter { .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) + .set_quota( + Protocol::ExecutionProofsByRoot, + execution_proofs_by_root_quota, + ) + .set_quota( + Protocol::ExecutionProofsByRange, + execution_proofs_by_range_quota, + ) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -372,6 +404,8 @@ impl RPCRateLimiter { Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, Protocol::DataColumnsByRange => &mut self.dcbrange_rl, + Protocol::ExecutionProofsByRoot => &mut self.execution_proofs_by_root_rl, + Protocol::ExecutionProofsByRange => &mut self.execution_proofs_by_range_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, @@ -396,6 +430,8 @@ impl RPCRateLimiter { blbroot_rl, dcbroot_rl, dcbrange_rl, + execution_proofs_by_root_rl, + execution_proofs_by_range_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -413,6 +449,8 @@ impl RPCRateLimiter { blbroot_rl.prune(time_since_start); dcbrange_rl.prune(time_since_start); dcbroot_rl.prune(time_since_start); + execution_proofs_by_root_rl.prune(time_since_start); + execution_proofs_by_range_rl.prune(time_since_start); lc_bootstrap_rl.prune(time_since_start); lc_optimistic_update_rl.prune(time_since_start); lc_finality_update_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index f1a4d87de76..ca3a5b78bd9 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,7 +3,7 @@ use libp2p::PeerId; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ - BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ExecutionProof, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; @@ -22,6 +22,8 @@ pub enum SyncRequestId { SingleBlock { id: SingleLookupReqId }, /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, + /// Request searching for execution proofs given a block hash and proof IDs. + SingleExecutionProof { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. DataColumnsByRoot(DataColumnsByRootRequestId), /// Blocks by range request @@ -30,6 +32,8 @@ pub enum SyncRequestId { BlobsByRange(BlobsByRangeRequestId), /// Data columns by range request DataColumnsByRange(DataColumnsByRangeRequestId), + /// Execution proofs by range request + ExecutionProofsByRange(ExecutionProofsByRangeRequestId), } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. @@ -75,6 +79,17 @@ pub enum DataColumnsByRangeRequester { CustodyBackfillSync(CustodyBackFillBatchRequestId), } +/// Request ID for execution_proofs_by_range requests during range sync. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct ExecutionProofsByRangeRequestId { + /// Id to identify this attempt at an execution_proofs_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request. + pub parent_request_id: ComponentsByRangeRequestId, + /// The peer id associated with the request. + pub peer: PeerId, +} + /// Block components by range request for range sync. Includes an ID for downstream consumers to /// handle retries and tie all their sub requests together. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -164,6 +179,10 @@ pub enum Response { BlobsByRoot(Option>>), /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. DataColumnsByRoot(Option>>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Option>), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Option>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -201,6 +220,14 @@ impl std::convert::From> for RpcResponse { Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(d)), None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, + Response::ExecutionProofsByRoot(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRoot(p)), + None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRoot), + }, + Response::ExecutionProofsByRange(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRange(p)), + None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRange), + }, Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) @@ -237,6 +264,12 @@ macro_rules! impl_display { impl_display!(BlocksByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(BlobsByRangeRequestId, "{}/{}", id, parent_request_id); impl_display!(DataColumnsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!( + ExecutionProofsByRangeRequestId, + "{}/{}", + id, + parent_request_id +); impl_display!(ComponentsByRangeRequestId, "{}/{}", id, requester); impl_display!(DataColumnsByRootRequestId, "{}/{}", id, requester); impl_display!(SingleLookupReqId, "{}/Lookup/{}", req_id, lookup_id); diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 120b9e6c245..227317f79ea 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -44,6 +44,8 @@ pub struct GossipCache { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// Timeout for execution proofs. + execution_proof: Option, } #[derive(Default)] @@ -75,6 +77,8 @@ pub struct GossipCacheBuilder { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// Timeout for execution proofs. + execution_proof: Option, } #[allow(dead_code)] @@ -151,6 +155,12 @@ impl GossipCacheBuilder { self } + /// Timeout for execution proof messages. + pub fn execution_proof_timeout(mut self, timeout: Duration) -> Self { + self.execution_proof = Some(timeout); + self + } + pub fn build(self) -> GossipCache { let GossipCacheBuilder { default_timeout, @@ -167,6 +177,7 @@ impl GossipCacheBuilder { bls_to_execution_change, light_client_finality_update, light_client_optimistic_update, + execution_proof, } = self; GossipCache { expirations: DelayQueue::default(), @@ -184,6 +195,7 @@ impl GossipCacheBuilder { bls_to_execution_change: bls_to_execution_change.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), + execution_proof: execution_proof.or(default_timeout), } } } @@ -211,6 +223,7 @@ impl GossipCache { GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, + GossipKind::ExecutionProof => self.execution_proof, }; let Some(expire_timeout) = expire_timeout else { return; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 4eebda1decb..4e8be98a509 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -53,6 +53,10 @@ pub mod utils; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 3; +/// The number of peers we target for execution proof peer discovery. +/// Set to 1 since we don't expect many nodes to run it +pub const TARGET_EXECUTION_PROOF_PEERS: usize = 1; + const MAX_IDENTIFY_ADDRESSES: usize = 10; /// The types of events than can be obtained from polling the behaviour. @@ -255,6 +259,7 @@ impl Network { // .signed_contribution_and_proof_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry .bls_to_execution_change_timeout(half_epoch * 2) + .execution_proof_timeout(slot_duration) .build() }; @@ -406,13 +411,23 @@ impl Network { }; let peer_manager = { - let peer_manager_cfg = PeerManagerCfg { + let mut peer_manager_cfg = PeerManagerCfg { discovery_enabled: !config.disable_discovery, quic_enabled: !config.disable_quic_support, metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, + execution_proof_enabled: ctx.chain_spec.is_zkvm_enabled(), ..Default::default() }; + // TODO(zkproofs): We decrease the slot time, so we want to + // correspondingly decrease the status interval at which a node will + // check if it needs to sync with others. + let epoch_secs = ctx + .chain_spec + .seconds_per_slot + .saturating_mul(E::slots_per_epoch()) + .max(1); + peer_manager_cfg.status_interval = peer_manager_cfg.status_interval.min(epoch_secs); PeerManager::new(peer_manager_cfg, network_globals.clone())? }; @@ -1563,6 +1578,28 @@ impl Network { request_type, }) } + RequestType::ExecutionProofsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_root"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } + RequestType::ExecutionProofsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } RequestType::LightClientBootstrap(_) => { metrics::inc_counter_vec( &metrics::TOTAL_RPC_REQUESTS, @@ -1648,6 +1685,16 @@ impl Network { RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } + RpcSuccessResponse::ExecutionProofsByRoot(resp) => self.build_response( + id, + peer_id, + Response::ExecutionProofsByRoot(Some(resp)), + ), + RpcSuccessResponse::ExecutionProofsByRange(resp) => self.build_response( + id, + peer_id, + Response::ExecutionProofsByRange(Some(resp)), + ), // Should never be reached RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1677,6 +1724,12 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::ExecutionProofsByRoot => { + Response::ExecutionProofsByRoot(None) + } + ResponseTermination::ExecutionProofsByRange => { + Response::ExecutionProofsByRange(None) + } ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 63f22be5e2c..4f331d0e16d 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -288,6 +288,8 @@ pub(crate) fn create_whitelist_filter( for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } + // Add ExecutionProof topic + add(ExecutionProof); } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 72f2873def9..1cd46a2a723 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -8,7 +8,7 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, @@ -26,6 +26,8 @@ pub enum PubsubMessage { BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a [`DataColumnSidecar`] along with the subnet id where it was received. DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), + /// Gossipsub message providing notification of an [`ExecutionProof`]. + ExecutionProof(Arc), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a `SingleAttestation` with its subnet id. @@ -135,6 +137,7 @@ impl PubsubMessage { PubsubMessage::DataColumnSidecar(column_sidecar_data) => { GossipKind::DataColumnSidecar(column_sidecar_data.0) } + PubsubMessage::ExecutionProof(_) => GossipKind::ExecutionProof, PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -290,6 +293,23 @@ impl PubsubMessage { )), } } + GossipKind::ExecutionProof => { + match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { + // TODO(zkproofs): we don't have the ChainSpec here, so if we change this to + // be for gloas, then we should change it here too + Some(fork) if fork.fulu_enabled() => { + let execution_proof = Arc::new( + ExecutionProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::ExecutionProof(execution_proof)) + } + Some(_) | None => Err(format!( + "execution_proof topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )), + } + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -403,6 +423,7 @@ impl PubsubMessage { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::DataColumnSidecar(data) => data.1.as_ssz_bytes(), + PubsubMessage::ExecutionProof(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -438,6 +459,12 @@ impl std::fmt::Display for PubsubMessage { data.1.slot(), data.1.index, ), + PubsubMessage::ExecutionProof(data) => write!( + f, + "ExecutionProof: block_root: {}, proof_id: {}", + data.block_root, + data.proof_id.as_u8(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 1892dcc83af..2d5ca95bf50 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -14,6 +14,13 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), + /// Represents execution proof support. + // + /// Note: ExecutionProof uses a single gossip topic (not multiple topics), + /// but we track it here for ENR-based peer discovery to find zkVM-enabled peers. + /// TODO(zkproofs): Is there a way to have peer discovery without adding the global topic + /// into Subnet? + ExecutionProof, } /// A subnet to discover peers on along with the instant after which it's no longer useful. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 0c988f35c39..a3524ec5763 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -17,6 +17,7 @@ pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const DATA_COLUMN_SIDECAR_PREFIX: &str = "data_column_sidecar_"; +pub const EXECUTION_PROOF_TOPIC: &str = "execution_proof"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -85,6 +86,14 @@ pub fn core_topics_to_subscribe( } } + // Subscribe to execution proof topic if zkVM mode is enabled for this fork. + // TODO(zkproofs): this looks different than the other checks because + // there is no official zkvm_fork and we enable this alongside a current fork + let zkvm_check = spec.is_zkvm_enabled_for_fork(fork_name); + if zkvm_check { + topics.push(GossipKind::ExecutionProof); + } + topics } @@ -103,6 +112,7 @@ pub fn is_fork_non_core_topic(topic: &GossipTopic, _fork_name: ForkName) -> bool | GossipKind::BeaconAggregateAndProof | GossipKind::BlobSidecar(_) | GossipKind::DataColumnSidecar(_) + | GossipKind::ExecutionProof | GossipKind::VoluntaryExit | GossipKind::ProposerSlashing | GossipKind::AttesterSlashing @@ -149,6 +159,8 @@ pub enum GossipKind { BlobSidecar(u64), /// Topic for publishing DataColumnSidecars. DataColumnSidecar(DataColumnSubnetId), + /// Topic for publishing ExecutionProofs + ExecutionProof, /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -249,6 +261,7 @@ impl GossipTopic { PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, + EXECUTION_PROOF_TOPIC => GossipKind::ExecutionProof, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match subnet_topic_index(topic) { @@ -313,6 +326,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::DataColumnSidecar(column_subnet_id) => { format!("{}{}", DATA_COLUMN_SIDECAR_PREFIX, *column_subnet_id) } + GossipKind::ExecutionProof => EXECUTION_PROOF_TOPIC.into(), GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), @@ -334,6 +348,7 @@ impl From for GossipKind { Subnet::Attestation(s) => GossipKind::Attestation(s), Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), Subnet::DataColumn(s) => GossipKind::DataColumnSidecar(s), + Subnet::ExecutionProof => GossipKind::ExecutionProof, } } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 412ee5aca5a..3f53fa9c314 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -28,6 +28,8 @@ pub fn spec_with_all_forks_enabled() -> ChainSpec { chain_spec.electra_fork_epoch = Some(Epoch::new(5)); chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); chain_spec.gloas_fork_epoch = Some(Epoch::new(7)); + // Enable zkVM + chain_spec.zkvm_enabled = true; // check that we have all forks covered assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 599fcd242bf..2327184eeea 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -18,8 +18,8 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkName, Hash256, + KzgCommitment, KzgProof, MinimalEthSpec, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; @@ -1739,3 +1739,368 @@ fn test_active_requests() { } }) } + +// Tests ExecutionProofsByRoot RPC - basic single proof request +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_single() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Fulu; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from_root(Hash256::random()); + let subnet_id = ExecutionProofId::new(0).unwrap(); + + // ExecutionProofsByRoot Request + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new( + block_root, + vec![], // No proofs already have + 2, // Request 2 proofs + ) + .unwrap(), + ); + + // ExecutionProofsByRoot Response + let proof = Arc::new( + ExecutionProof::new( + subnet_id, + Slot::new(100), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ); + let rpc_response = Response::ExecutionProofsByRoot(Some(proof.clone())); + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(received_proof)) => { + debug!("Proof received"); + assert_eq!(received_proof.block_root, block_root); + assert_eq!(received_proof.block_hash, block_hash); + assert_eq!(received_proof.proof_id, subnet_id); + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated"); + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send the proof + receiver.send_response( + peer_id, + inbound_request_id, + rpc_response.clone(), + ); + // Send stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent proof and termination"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests ExecutionProofsByRoot RPC - multiple proofs chunked response +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_chunked() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Deneb; + + let messages_to_send = 3; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from_root(Hash256::random()); + let proof_ids = [ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ExecutionProofId::new(2).unwrap(), + ]; + assert_eq!(proof_ids.len(), messages_to_send); + + // ExecutionProofsByRoot Request for multiple proofs + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new(block_root, vec![], proof_ids.len()).unwrap(), + ); + + // Create proofs for each proof ID + let proofs: Vec> = proof_ids + .iter() + .map(|subnet_id| { + Arc::new( + ExecutionProof::new( + *subnet_id, + Slot::new(100), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ) + }) + .collect(); + + let mut messages_received = 0; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(received_proof)) => { + debug!("Chunk received"); + assert_eq!(received_proof.block_root, block_root); + assert_eq!(received_proof.block_hash, block_hash); + messages_received += 1; + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated"); + assert_eq!(messages_received, messages_to_send); + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send all proofs + for proof in &proofs { + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(Some(proof.clone())), + ); + debug!("Sent proof chunk"); + } + // Send stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent termination"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests ExecutionProofsByRoot RPC - empty response (peer has no proofs) +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_empty_response() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Fulu; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new(block_root, vec![], 2).unwrap(), + ); + + let mut received_termination = false; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(_)) => { + panic!("Should not receive any proofs in empty response test"); + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated (empty response)"); + received_termination = true; + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send only stream termination (no proofs) + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent empty response (termination only)"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => { + assert!(received_termination, "Should have received stream termination"); + } + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index 56dccadaa94..9ca5afbcf9c 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -39,6 +39,10 @@ pub const SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST: &str = "handle_blobs_by_range_requ pub const SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST: &str = "handle_data_columns_by_range_request"; pub const SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST: &str = "handle_blocks_by_root_request"; pub const SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST: &str = "handle_blobs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = + "handle_execution_proofs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST: &str = + "handle_execution_proofs_by_range_request"; pub const SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST: &str = "handle_data_columns_by_root_request"; pub const SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE: &str = "handle_light_client_updates_by_range"; pub const SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP: &str = "handle_light_client_bootstrap"; @@ -70,6 +74,8 @@ pub const LH_BN_ROOT_SPAN_NAMES: &[&str] = &[ SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 2a7fedb53e9..d6b4303d04f 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -14,3 +14,4 @@ pub use lighthouse_network::NetworkConfig; pub use service::{ NetworkMessage, NetworkReceivers, NetworkSenders, NetworkService, ValidatorSubscriptionMessage, }; +pub use sync::manager::SyncMessage; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index eb70147c6ef..5d2203ee380 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -7,6 +7,9 @@ use crate::{ use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use beacon_chain::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use beacon_chain::store::Error; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -37,10 +40,11 @@ use store::hot_cold_store::HotColdDBError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, - DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, + DataColumnSubnetId, EthSpec, ExecutionProof, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -767,6 +771,237 @@ impl NetworkBeaconProcessor { } } + /// Process a gossip execution proof. + /// + /// Validates the execution proof according to the gossip spec and processes it + /// through the DataAvailabilityChecker if valid. + pub async fn process_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Arc, + _seen_timestamp: Duration, + ) { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + + debug!( + %peer_id, + %proof_id, + %block_root, + "Received execution proof via gossip" + ); + + // Verify the execution proof for gossip + match self + .chain + .verify_execution_proof_for_gossip(execution_proof.clone()) + { + Ok(gossip_verified_proof) => { + debug!( + %block_root, + subnet_id = %gossip_verified_proof.subnet_id(), + "Successfully verified gossip execution proof" + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Process the verified proof through DA checker + self.process_gossip_verified_execution_proof( + peer_id, + gossip_verified_proof, + _seen_timestamp, + ) + .await + } + Err(err) => { + match err { + GossipExecutionProofError::PriorKnownUnpublished => { + debug!( + %block_root, + %proof_id, + "Gossip execution proof already processed via the EL. Checking availability." + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Accept, + ); + + // The proof is already in the DA checker (from HTTP API). + // Check if this makes any pending blocks complete and import them. + let slot = execution_proof.slot; + if let Err(e) = self + .chain + .process_rpc_execution_proofs(slot, block_root, vec![execution_proof]) + .await + { + debug!( + %block_root, + %proof_id, + error = ?e, + "Failed to process availability for prior known execution proof" + ); + } + } + GossipExecutionProofError::PriorKnown { + block_root, + proof_id, + .. + } => { + // Proof already known via gossip. No penalty, gossip filter should + // filter duplicates. + debug!( + %block_root, + %proof_id, + "Received already known execution proof. Ignoring the proof" + ); + } + GossipExecutionProofError::ParentUnknown { parent_root } => { + debug!( + action = "requesting parent", + %block_root, + %parent_root, + "Unknown parent hash for execution proof" + ); + // TODO(zkproofs): Implement parent lookup for execution proofs + // This might require creating a new SyncMessage variant + // For now, we just ignore the proof + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::BeaconChainError(_) => { + crit!( + error = ?err, + "Internal error when verifying execution proof" + ) + } + GossipExecutionProofError::ProofVerificationFailed(ref reason) => { + warn!( + error = ?err, + %block_root, + %proof_id, + %reason, + "Execution proof verification failed. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_verification_failed", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::ProofTooLarge { size, max_size } => { + warn!( + error = ?err, + %block_root, + %proof_id, + %size, + %max_size, + "Execution proof exceeds maximum size. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_too_large", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::BlockNotAvailable { block_root } => { + debug!( + error = ?err, + %block_root, + %proof_id, + "Block for execution proof not yet available. Ignoring the proof" + ); + // Block might arrive later, so don't penalize heavily + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::NotFinalizedDescendant { block_parent_root } => { + debug!( + error = ?err, + %block_root, + %block_parent_root, + %proof_id, + "Execution proof conflicts with finality. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_not_finalized_descendant", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::FutureSlot { + message_slot, + latest_permissible_slot, + } => { + debug!( + error = ?err, + %block_root, + %proof_id, + %message_slot, + %latest_permissible_slot, + "Execution proof from future slot. Ignoring the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_execution_proof_future_slot", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::PastFinalizedSlot { + proof_slot, + finalized_slot, + } => { + debug!( + error = ?err, + %block_root, + %proof_id, + %proof_slot, + %finalized_slot, + "Execution proof from past finalized slot. Ignoring the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_past_finalized", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + } + } + } + #[allow(clippy::too_many_arguments)] #[instrument( name = SPAN_PROCESS_GOSSIP_BLOB, @@ -1126,6 +1361,83 @@ impl NetworkBeaconProcessor { } } + async fn process_gossip_verified_execution_proof( + self: &Arc, + peer_id: PeerId, + verified_proof: GossipVerifiedExecutionProof, + _seen_duration: Duration, + ) { + let processing_start_time = Instant::now(); + let block_root = verified_proof.block_root(); + let proof_slot = verified_proof.slot(); + let subnet_id = verified_proof.subnet_id(); + + let result = self + .chain + .process_gossip_execution_proof(verified_proof, || Ok(())) + .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "execution_proof"); + + match &result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + %block_root, + %subnet_id, + "Gossipsub execution proof processed, imported fully available block" + ); + self.chain.recompute_head_at_current_slot().await; + + debug!( + processing_time_ms = processing_start_time.elapsed().as_millis(), + "Execution proof full verification complete" + ); + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + %slot, + %subnet_id, + "Execution proof cached, block still needs more components" + ); + debug!( + %block_root, + %proof_slot, + %subnet_id, + "Execution proof cached for pending block" + ); + } + }, + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + ?block_root, + %subnet_id, + "Ignoring gossip execution proof for already imported block" + ); + } + Err(err) => { + debug!( + outcome = ?err, + ?block_root, + block_slot = %proof_slot, + %subnet_id, + "Invalid gossip execution proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_execution_proof", + ); + } + } + + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } + } + /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index bebda36d71c..ffac53e522a 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -14,7 +14,7 @@ use beacon_processor::{ use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - LightClientUpdatesByRangeRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, }; use lighthouse_network::service::api_types::CustodyBackfillBatchId; use lighthouse_network::{ @@ -249,6 +249,32 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proof. + pub fn send_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Arc, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, @@ -469,6 +495,30 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proofs. `process_rpc_execution_proofs` reports + /// the result back to sync. + pub fn send_rpc_execution_proofs( + self: &Arc, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + if proofs.is_empty() { + return Ok(()); + } + let process_fn = self.clone().generate_rpc_execution_proofs_process_fn( + block_root, + proofs, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcExecutionProofs { process_fn }, + }) + } + /// Create a new `Work` event for some custody columns. `process_rpc_custody_columns` reports /// the result back to sync. pub fn send_rpc_custody_columns( @@ -631,6 +681,42 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `ExecutionProofsByRootRequest`s from the RPC network. + pub fn send_execution_proofs_by_roots_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_root_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRootsRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `ExecutionProofsByRangeRequest`s from the RPC network. + pub fn send_execution_proofs_by_range_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_range_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRangeRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. pub fn send_data_columns_by_roots_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index ac24b648e05..17ee4076731 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -7,6 +7,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenS use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -14,6 +15,7 @@ use lighthouse_tracing::{ SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, }; @@ -390,6 +392,251 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle an `ExecutionProofsByRoot` request from the peer. + #[instrument( + name = SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + parent = None, + level = "debug", + skip_all, + fields( + peer_id = %peer_id, + client = tracing::field::Empty, + ) + )] + pub fn handle_execution_proofs_by_root_request( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_root_request_inner( + peer_id, + inbound_request_id, + request, + ), + Response::ExecutionProofsByRoot, + ); + } + + /// Handle an `ExecutionProofsByRoot` request from the peer. + fn handle_execution_proofs_by_root_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + let block_root = request.block_root; + let already_have_set: std::collections::HashSet<_> = + request.already_have.iter().copied().collect(); + let count_needed = request.count_needed as usize; + + // Get all execution proofs we have for this block from the DA checker, falling back to the + // store (which checks the store cache/DB). + let available_proofs = match self + .chain + .data_availability_checker + .get_execution_proofs(&block_root) + { + Some(proofs) => proofs, + None => match self.chain.store.get_execution_proofs(&block_root) { + Ok(proofs) => { + if proofs.is_empty() { + debug!( + %peer_id, + %block_root, + "No execution proofs available for peer" + ); + return Ok(()); + } + proofs + } + Err(e) => { + error!( + %peer_id, + %block_root, + error = ?e, + "Error fetching execution proofs for block root" + ); + return Err(( + RpcErrorResponse::ServerError, + "Error fetching execution proofs", + )); + } + }, + }; + + // Filter out proofs the peer already has and send up to count_needed + let mut sent_count = 0; + for proof in available_proofs { + // Skip proofs the peer already has + if already_have_set.contains(&proof.proof_id) { + continue; + } + + // Send the proof + self.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(Some(proof)), + ); + + sent_count += 1; + + // Stop when we've sent the requested count + if sent_count >= count_needed { + break; + } + } + + debug!( + %peer_id, + %block_root, + requested = count_needed, + already_have = already_have_set.len(), + sent = sent_count, + "ExecutionProofsByRoot outgoing response processed" + ); + + Ok(()) + } + + /// Handle an `ExecutionProofsByRange` request from the peer. + #[instrument( + name = SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, + parent = None, + level = "debug", + skip_all, + fields(peer_id = %peer_id, client = tracing::field::Empty) + )] + pub fn handle_execution_proofs_by_range_request( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_range_request_inner(peer_id, inbound_request_id, req), + Response::ExecutionProofsByRange, + ); + } + + /// Handle an `ExecutionProofsByRange` request from the peer. + fn handle_execution_proofs_by_range_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!( + %peer_id, + count = req.count, + start_slot = req.start_slot, + "Received ExecutionProofsByRange Request" + ); + + let request_start_slot = Slot::from(req.start_slot); + + // Check if zkvm is enabled and get the execution proof boundary + let execution_proof_boundary_slot = match self.chain.execution_proof_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!("ZKVM fork is disabled"); + return Err((RpcErrorResponse::InvalidRequest, "ZKVM fork is disabled")); + } + }; + + // Get the oldest execution proof slot from the store + let oldest_execution_proof_slot = self + .chain + .store + .get_execution_proof_info() + .oldest_execution_proof_slot + .unwrap_or(execution_proof_boundary_slot); + + if request_start_slot < oldest_execution_proof_slot { + debug!( + %request_start_slot, + %oldest_execution_proof_slot, + %execution_proof_boundary_slot, + "Range request start slot is older than the oldest execution proof slot." + ); + + return if execution_proof_boundary_slot < oldest_execution_proof_slot { + Err(( + RpcErrorResponse::ResourceUnavailable, + "execution proofs pruned within boundary", + )) + } else { + Err(( + RpcErrorResponse::InvalidRequest, + "Req outside availability period", + )) + }; + } + + let block_roots = self.get_block_roots_for_slot_range( + req.start_slot, + req.count, + "ExecutionProofsByRange", + )?; + let mut proofs_sent = 0; + + for root in block_roots { + // Get execution proofs from the database (like BlobsByRange does for blobs) + match self.chain.store.get_execution_proofs(&root) { + Ok(proofs) => { + for proof in proofs { + // Due to skip slots, proofs could be out of the range + if proof.slot >= request_start_slot + && proof.slot < request_start_slot + req.count + { + proofs_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::ExecutionProofsByRange(Some(proof)), + }); + } + } + } + Err(e) => { + error!( + request = ?req, + %peer_id, + block_root = ?root, + error = ?e, + "Error fetching execution proofs for block root" + ); + return Err(( + RpcErrorResponse::ServerError, + "Failed fetching execution proofs from database", + )); + } + } + } + + debug!( + %peer_id, + start_slot = req.start_slot, + count = req.count, + sent = proofs_sent, + "ExecutionProofsByRange outgoing response processed" + ); + + Ok(()) + } + /// Handle a `DataColumnsByRoot` request from the peer. #[instrument( name = SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index e49ae134fe4..6c260c9bb16 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -259,6 +259,21 @@ impl NetworkBeaconProcessor { Box::pin(process_fn) } + pub fn generate_rpc_execution_proofs_process_fn( + self: Arc, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> AsyncFn { + let process_fn = async move { + self.clone() + .process_rpc_execution_proofs(block_root, proofs, seen_timestamp, process_type) + .await; + }; + Box::pin(process_fn) + } + /// Attempt to process a list of blobs received from a direct RPC request. #[instrument( name = SPAN_PROCESS_RPC_BLOBS, @@ -997,4 +1012,79 @@ impl NetworkBeaconProcessor { } } } + + /// Process execution proofs received via RPC. + pub async fn process_rpc_execution_proofs( + self: Arc>, + block_root: Hash256, + proofs: Vec>, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + // Get slot directly from the first proof. All proofs should be for the same block. + let slot = match proofs.first() { + Some(proof) => proof.slot, + None => { + debug!(?block_root, "No execution proofs to process"); + return; + } + }; + + let proof_ids: Vec<_> = proofs.iter().map(|p| p.proof_id).collect(); + + debug!( + ?proof_ids, + %block_root, + %slot, + proof_count = proofs.len(), + "RPC execution proofs received" + ); + + if let Ok(current_slot) = self.chain.slot() + && current_slot == slot + { + // let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + // TODO(zkproofs): Add dedicated metrics for execution proofs + } + + let result = self + .chain + .process_rpc_execution_proofs(slot, block_root, proofs) + .await; + + // TODO(zkproofs): Add dedicated metrics for execution proof processing + // register_process_result_metrics(&result, metrics::BlockSource::Rpc, "execution_proofs"); + + match &result { + Ok(AvailabilityProcessingStatus::Imported(hash)) => { + debug!( + result = "imported block with execution proofs", + %slot, + block_hash = %hash, + "Block components retrieved" + ); + self.chain.recompute_head_at_current_slot().await; + } + Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + debug!( + block_hash = %block_root, + %slot, + "Missing components over rpc (still need more proofs or other components)" + ); + } + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + block_hash = %block_root, + %slot, + "Execution proofs have already been imported" + ); + } + Err(_) => {} + } + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 60fe094bb7c..6ccfb55ddee 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -24,7 +24,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ExecutionProof, ForkContext, SignedBeaconBlock, +}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -86,14 +88,13 @@ impl Router { invalid_block_storage: InvalidBlockStorage, beacon_processor_send: BeaconProcessorSend, fork_context: Arc, + sync_send: mpsc::UnboundedSender>, + sync_recv: mpsc::UnboundedReceiver>, ) -> Result>, String> { trace!("Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // generate the message channel - let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let network_beacon_processor = NetworkBeaconProcessor { beacon_processor_send, duplicate_cache: DuplicateCache::default(), @@ -272,6 +273,24 @@ impl Router { request, ), ), + RequestType::ExecutionProofsByRoot(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_roots_request( + peer_id, + inbound_request_id, + request, + ), + ), + RequestType::ExecutionProofsByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_range_request( + peer_id, + inbound_request_id, + request, + ), + ), _ => {} } } @@ -309,6 +328,16 @@ impl Router { Response::DataColumnsByRange(data_column) => { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } + Response::ExecutionProofsByRoot(execution_proof) => { + self.on_execution_proofs_by_root_response(peer_id, app_request_id, execution_proof); + } + Response::ExecutionProofsByRange(execution_proof) => { + self.on_execution_proofs_by_range_response( + peer_id, + app_request_id, + execution_proof, + ); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -384,6 +413,15 @@ impl Router { ), ) } + PubsubMessage::ExecutionProof(execution_proof) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + timestamp_now(), + ), + ), PubsubMessage::VoluntaryExit(exit) => { debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( @@ -670,6 +708,64 @@ impl Router { }); } + /// Handle an `ExecutionProofsByRoot` response from the peer. + pub fn on_execution_proofs_by_root_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + let sync_request_id = match app_request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::SingleExecutionProof { .. } => id, + other => { + crit!(request = ?other, "ExecutionProofsByRoot response on incorrect request"); + return; + } + }, + AppRequestId::Router => { + crit!(%peer_id, "All ExecutionProofsByRoot requests belong to sync"); + return; + } + AppRequestId::Internal => unreachable!("Handled internally"), + }; + + trace!( + %peer_id, + "Received ExecutionProofsByRoot Response" + ); + self.send_to_sync(SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + seen_timestamp: timestamp_now(), + }); + } + + /// Handle an `ExecutionProofsByRange` response from the peer. + pub fn on_execution_proofs_by_range_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + trace!( + %peer_id, + "Received ExecutionProofsByRange Response" + ); + + if let AppRequestId::Sync(sync_request_id) = app_request_id { + self.send_to_sync(SyncMessage::RpcExecutionProof { + peer_id, + sync_request_id, + execution_proof, + seen_timestamp: timestamp_now(), + }); + } else { + crit!("All execution proofs by range responses should belong to sync"); + } + } + /// Handle a `DataColumnsByRoot` response from the peer. pub fn on_data_columns_by_root_response( &mut self, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0869b442aec..dcb1fd5a507 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,6 +5,7 @@ use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; +use crate::sync::manager::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::BeaconProcessorSend; use futures::channel::mpsc::Sender; @@ -138,11 +139,13 @@ pub enum ValidatorSubscriptionMessage { pub struct NetworkSenders { network_send: mpsc::UnboundedSender>, validator_subscription_send: mpsc::Sender, + sync_send: mpsc::UnboundedSender>, } pub struct NetworkReceivers { pub network_recv: mpsc::UnboundedReceiver>, pub validator_subscription_recv: mpsc::Receiver, + pub sync_recv: mpsc::UnboundedReceiver>, } impl NetworkSenders { @@ -150,13 +153,16 @@ impl NetworkSenders { let (network_send, network_recv) = mpsc::unbounded_channel::>(); let (validator_subscription_send, validator_subscription_recv) = mpsc::channel(VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE); + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); let senders = Self { network_send, validator_subscription_send, + sync_send, }; let receivers = NetworkReceivers { network_recv, validator_subscription_recv, + sync_recv, }; (senders, receivers) } @@ -168,6 +174,10 @@ impl NetworkSenders { pub fn validator_subscription_send(&self) -> mpsc::Sender { self.validator_subscription_send.clone() } + + pub fn sync_send(&self) -> mpsc::UnboundedSender> { + self.sync_send.clone() + } } /// Service that handles communication between internal services and the `lighthouse_network` network service. @@ -320,6 +330,8 @@ impl NetworkService { invalid_block_storage, beacon_processor_send, fork_context.clone(), + network_senders.sync_send(), + network_receivers.sync_recv, )?; // attestation and sync committee subnet service @@ -338,6 +350,7 @@ impl NetworkService { let NetworkReceivers { network_recv, validator_subscription_recv, + sync_recv: _, } = network_receivers; // create the network service and spawn the task diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 6c0cbd7e554..441e9b0a6d9 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -348,6 +348,23 @@ impl BackFillSync { CouplingError::BlobPeerFailure(msg) => { tracing::debug!(?batch_id, msg, "Blob peer failure"); } + CouplingError::ExecutionProofPeerFailure { + error, + peer, + exceeded_retries, + } => { + tracing::debug!(?batch_id, ?peer, error, "Execution proof peer failure"); + if !*exceeded_retries { + let mut failed_peers = HashSet::new(); + failed_peers.insert(*peer); + return self.retry_execution_proof_batch( + network, + batch_id, + request_id, + failed_peers, + ); + } + } CouplingError::InternalError(msg) => { error!(?batch_id, msg, "Block components coupling internal error"); } @@ -1001,6 +1018,46 @@ impl BackFillSync { Ok(()) } + /// Retries execution proof requests within the batch by creating a new proofs request. + pub fn retry_execution_proof_batch( + &mut self, + network: &mut SyncNetworkContext, + batch_id: BatchId, + id: Id, + mut failed_peers: HashSet, + ) -> Result<(), BackFillError> { + if let Some(batch) = self.batches.get_mut(&batch_id) { + failed_peers.extend(&batch.failed_peers()); + let req = batch.to_blocks_by_range_request().0; + + let synced_peers = network + .network_globals() + .peers + .read() + .synced_peers_for_epoch(batch_id) + .cloned() + .collect::>(); + + match network.retry_execution_proofs_by_range(id, &synced_peers, &failed_peers, req) { + Ok(()) => { + debug!( + ?batch_id, + id, "Retried execution proof requests from different peers" + ); + return Ok(()); + } + Err(e) => { + debug!(?batch_id, id, e, "Failed to retry execution proof batch"); + } + } + } else { + return Err(BackFillError::InvalidSyncState( + "Batch should exist to be retried".to_string(), + )); + } + Ok(()) + } + /// When resuming a chain, this function searches for batches that need to be re-downloaded and /// transitions their state to redownload the batch. fn resume_batches(&mut self, network: &mut SyncNetworkContext) -> Result<(), BackFillError> { diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index c6b05190871..64da1ae61fc 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -2,7 +2,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, + BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, ProofRequestState, }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; @@ -12,7 +12,7 @@ use parking_lot::RwLock; use std::collections::HashSet; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, SignedBeaconBlock}; +use types::{DataColumnSidecarList, ExecutionProof, SignedBeaconBlock}; use super::SingleLookupId; use super::single_block_lookup::{ComponentRequests, DownloadResult}; @@ -22,6 +22,7 @@ pub enum ResponseType { Block, Blob, CustodyColumn, + ExecutionProof, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -215,3 +216,57 @@ impl RequestState for CustodyRequestState { &mut self.state } } + +impl RequestState for ProofRequestState { + type VerifiedResponseType = Vec>; + + fn make_request( + &self, + id: Id, + lookup_peers: Arc>>, + _min_proofs: usize, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.execution_proof_lookup_request( + id, + lookup_peers, + self.block_root, + self.min_proofs_required, + ) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_execution_proofs_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::ExecutionProof + } + + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + request + .proof_request + .as_mut() + .ok_or("no active proof request") + } + + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f8ffd298caf..6212c63a119 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,9 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; +pub use single_block_lookup::{ + BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState, +}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; @@ -532,6 +534,9 @@ impl BlockLookups { BlockProcessType::SingleCustodyColumn(id) => { self.on_processing_result_inner::>(id, result, cx) } + BlockProcessType::SingleExecutionProof { id } => { + self.on_processing_result_inner::(id, result, cx) + } }; self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); } @@ -673,6 +678,9 @@ impl BlockLookups { ResponseType::CustodyColumn => { "lookup_custody_column_processing_failure" } + ResponseType::ExecutionProof => { + "lookup_execution_proof_processing_failure" + } }, ); } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 46897b2283b..6c326e84d4d 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -17,7 +17,7 @@ use store::Hash256; use strum::IntoStaticStr; use tracing::{Span, debug_span}; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; +use types::{DataColumnSidecarList, EthSpec, ExecutionProof, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -63,6 +63,7 @@ pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, pub component_requests: ComponentRequests, + pub proof_request: Option, /// Peers that claim to have imported this set of block components. This state is shared with /// the custody request to have an updated view of the peers that claim to have imported the /// block associated with this lookup. The peer set of a lookup can change rapidly, and faster @@ -102,6 +103,7 @@ impl SingleBlockLookup { id, block_request_state: BlockRequestState::new(requested_block_root), component_requests: ComponentRequests::WaitingForBlock, + proof_request: None, peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, @@ -168,32 +170,51 @@ impl SingleBlockLookup { /// Returns true if the block has already been downloaded. pub fn all_components_processed(&self) -> bool { - self.block_request_state.state.is_processed() - && match &self.component_requests { - ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), - ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), - ComponentRequests::NotNeeded { .. } => true, - } + let block_processed = self.block_request_state.state.is_processed(); + + let da_component_processed = match &self.component_requests { + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::NotNeeded { .. } => true, + }; + + let proof_processed = self + .proof_request + .as_ref() + .map(|request| request.state.is_processed()) + .unwrap_or(true); // If no proof request, consider it processed + + block_processed && da_component_processed && proof_processed } /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { - self.awaiting_parent.is_some() - || self.block_request_state.state.is_awaiting_event() - || match &self.component_requests { - // If components are waiting for the block request to complete, here we should - // check if the`block_request_state.state.is_awaiting_event(). However we already - // checked that above, so `WaitingForBlock => false` is equivalent. - ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => { - request.state.is_awaiting_event() - } - ComponentRequests::ActiveCustodyRequest(request) => { - request.state.is_awaiting_event() - } - ComponentRequests::NotNeeded { .. } => false, - } + if self.awaiting_parent.is_some() { + return true; + } + + if self.block_request_state.state.is_awaiting_event() { + return true; + } + + let da_awaiting = match &self.component_requests { + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_awaiting_event(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_awaiting_event(), + ComponentRequests::NotNeeded { .. } => false, + }; + + let proof_awaiting = self + .proof_request + .as_ref() + .map(|request| request.state.is_awaiting_event()) + .unwrap_or(false); + + da_awaiting || proof_awaiting } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -239,6 +260,13 @@ impl SingleBlockLookup { } else { self.component_requests = ComponentRequests::NotNeeded("outside da window"); } + + if cx.chain.should_fetch_execution_proofs(block_epoch) { + self.proof_request = cx + .chain + .min_execution_proofs_required() + .map(|min_proofs| ProofRequestState::new(self.block_root, min_proofs)); + } } else { // Wait to download the block before downloading blobs. Then we can be sure that the // block has data, so there's no need to do "blind" requests for all possible blobs and @@ -253,6 +281,7 @@ impl SingleBlockLookup { } } + // Progress DA component requests match &self.component_requests { ComponentRequests::WaitingForBlock => {} // do nothing ComponentRequests::ActiveBlobRequest(_, expected_blobs) => { @@ -264,6 +293,11 @@ impl SingleBlockLookup { ComponentRequests::NotNeeded { .. } => {} // do nothing } + // Progress proof request (separate from DA components) + if let Some(request) = &self.proof_request { + self.continue_request::(cx, request.min_proofs_required)?; + } + // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. // This case can happen if we receive the components from gossip during a retry. @@ -404,6 +438,26 @@ impl CustodyRequestState { } } +/// The state of the execution proof request component of a `SingleBlockLookup`. +#[derive(Educe)] +#[educe(Debug)] +pub struct ProofRequestState { + #[educe(Debug(ignore))] + pub block_root: Hash256, + pub state: SingleLookupRequestState>>, + pub min_proofs_required: usize, +} + +impl ProofRequestState { + pub fn new(block_root: Hash256, min_proofs_required: usize) -> Self { + Self { + block_root, + state: SingleLookupRequestState::new(), + min_proofs_required, + } + } +} + /// The state of the block request component of a `SingleBlockLookup`. #[derive(Educe)] #[educe(Debug)] diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index ed9a11a03de..faa2fac949c 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -5,6 +5,7 @@ use lighthouse_network::{ PeerId, service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, + ExecutionProofsByRangeRequestId, }, }; use ssz_types::RuntimeVariableList; @@ -12,7 +13,7 @@ use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, SignedBeaconBlock, + ExecutionProof, Hash256, SignedBeaconBlock, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; @@ -24,6 +25,7 @@ use crate::sync::network_context::MAX_COLUMN_RETRIES; /// - Blocks themselves (always required) /// - Blob sidecars (pre-Fulu fork) /// - Data columns (Fulu fork and later) +/// - Execution proofs (for zkvm-enabled nodes) /// /// It accumulates responses until all expected components are received, then couples /// them together and returns complete `RpcBlock`s ready for processing. Handles validation @@ -33,10 +35,25 @@ pub struct RangeBlockComponentsRequest { blocks_request: ByRangeRequest>>>, /// Sidecars we have received awaiting for their corresponding block. block_data_request: RangeBlockDataRequest, + /// Execution proofs request (for zkvm-enabled nodes). + execution_proofs_request: Option>, /// Span to track the range request and all children range requests. pub(crate) request_span: Span, } +/// Tracks execution proofs requests during range sync. +struct ExecutionProofsRequest { + /// The request tracking state. + request: ByRangeRequest>>, + /// The peer we requested proofs from. + peer: PeerId, + /// Number of proofs required per block. + min_proofs_required: usize, + /// Number of proof attempts completed for this batch. + attempt: usize, + _phantom: std::marker::PhantomData, +} + pub enum ByRangeRequest { Active(I), Complete(T), @@ -67,6 +84,12 @@ pub(crate) enum CouplingError { exceeded_retries: bool, }, BlobPeerFailure(String), + /// The peer we requested execution proofs from was faulty/malicious + ExecutionProofPeerFailure { + error: String, + peer: PeerId, + exceeded_retries: bool, + }, } impl RangeBlockComponentsRequest { @@ -76,6 +99,7 @@ impl RangeBlockComponentsRequest { /// * `blocks_req_id` - Request ID for the blocks /// * `blobs_req_id` - Optional request ID for blobs (pre-Fulu fork) /// * `data_columns` - Optional tuple of (request_id->column_indices pairs, expected_custody_columns) for Fulu fork + /// * `execution_proofs` - Optional tuple of (request_id, peer, min_proofs_required) for zkvm-enabled nodes #[allow(clippy::type_complexity)] pub fn new( blocks_req_id: BlocksByRangeRequestId, @@ -84,6 +108,7 @@ impl RangeBlockComponentsRequest { Vec<(DataColumnsByRangeRequestId, Vec)>, Vec, )>, + execution_proofs: Option<(ExecutionProofsByRangeRequestId, usize)>, request_span: Span, ) -> Self { let block_data_request = if let Some(blobs_req_id) = blobs_req_id { @@ -103,9 +128,19 @@ impl RangeBlockComponentsRequest { RangeBlockDataRequest::NoData }; + let execution_proofs_request = + execution_proofs.map(|(req_id, min_proofs_required)| ExecutionProofsRequest { + request: ByRangeRequest::Active(req_id), + peer: req_id.peer, + min_proofs_required, + attempt: 0, + _phantom: std::marker::PhantomData, + }); + Self { blocks_request: ByRangeRequest::Active(blocks_req_id), block_data_request, + execution_proofs_request, request_span, } } @@ -187,6 +222,30 @@ impl RangeBlockComponentsRequest { } } + /// Adds received execution proofs to the request. + /// + /// Returns an error if this request doesn't expect execution proofs, + /// or if the request ID doesn't match. + pub fn add_execution_proofs( + &mut self, + req_id: ExecutionProofsByRangeRequestId, + proofs: Vec>, + ) -> Result<(), String> { + match &mut self.execution_proofs_request { + Some(exec_proofs_req) => { + exec_proofs_req.request.finish(req_id, proofs)?; + exec_proofs_req.attempt += 1; + Ok(()) + } + None => Err("received execution proofs but none were expected".to_owned()), + } + } + + /// Returns true if this request expects execution proofs. + pub fn expects_execution_proofs(&self) -> bool { + self.execution_proofs_request.is_some() + } + /// Attempts to construct RPC blocks from all received components. /// /// Returns `None` if not all expected requests have completed. @@ -200,6 +259,13 @@ impl RangeBlockComponentsRequest { return None; }; + // Check if execution proofs are required but not yet complete + if let Some(exec_proofs_req) = &self.execution_proofs_request + && exec_proofs_req.request.to_finished().is_none() + { + return None; + } + // Increment the attempt once this function returns the response or errors match &mut self.block_data_request { RangeBlockDataRequest::NoData => { @@ -269,6 +335,50 @@ impl RangeBlockComponentsRequest { } } + /// Returns the collected execution proofs if available. + /// This should be called after `responses()` returns `Some`. + pub fn get_execution_proofs(&self) -> Option>> { + self.execution_proofs_request + .as_ref() + .and_then(|req| req.request.to_finished().cloned()) + } + + /// Returns the peer that was responsible for providing execution proofs. + pub fn execution_proofs_peer(&self) -> Option { + self.execution_proofs_request.as_ref().map(|req| req.peer) + } + + /// Returns the minimum number of execution proofs required per block, if any. + pub fn min_execution_proofs_required(&self) -> Option { + self.execution_proofs_request + .as_ref() + .map(|req| req.min_proofs_required) + } + + /// Returns the number of completed proof attempts for this batch, if any. + pub fn execution_proofs_attempt(&self) -> Option { + self.execution_proofs_request + .as_ref() + .map(|req| req.attempt) + } + + /// Resets the execution proofs request to retry with a new peer. + pub fn reinsert_execution_proofs_request( + &mut self, + req_id: ExecutionProofsByRangeRequestId, + min_proofs_required: usize, + ) -> Result<(), String> { + match &mut self.execution_proofs_request { + Some(exec_proofs_req) => { + exec_proofs_req.request = ByRangeRequest::Active(req_id); + exec_proofs_req.peer = req_id.peer; + exec_proofs_req.min_proofs_required = min_proofs_required; + Ok(()) + } + None => Err("execution proofs request not present".to_owned()), + } + } + fn responses_with_blobs( blocks: Vec>>, blobs: Vec>>, @@ -529,7 +639,7 @@ mod tests { let blocks_req_id = blocks_id(components_id()); let mut info = - RangeBlockComponentsRequest::::new(blocks_req_id, None, None, Span::none()); + RangeBlockComponentsRequest::::new(blocks_req_id, None, None, None, Span::none()); // Send blocks and complete terminate response info.add_blocks(blocks_req_id, blocks).unwrap(); @@ -557,6 +667,7 @@ mod tests { blocks_req_id, Some(blobs_req_id), None, + None, Span::none(), ); @@ -606,6 +717,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expects_custody_columns.clone())), + None, Span::none(), ); // Send blocks and complete terminate response @@ -674,6 +786,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expects_custody_columns.clone())), + None, Span::none(), ); @@ -762,6 +875,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_custody_columns.clone())), + None, Span::none(), ); @@ -848,6 +962,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_custody_columns.clone())), + None, Span::none(), ); @@ -941,6 +1056,7 @@ mod tests { blocks_req_id, None, Some((columns_req_id.clone(), expected_custody_columns.clone())), + None, Span::none(), ); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 338f21ce987..6c41d3d9c75 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -45,6 +45,7 @@ use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, + ProofRequestState, }; use crate::sync::custody_backfill_sync::CustodyBackFillSync; use crate::sync::network_context::{PeerGroup, RpcResponseResult}; @@ -60,7 +61,8 @@ use lighthouse_network::service::api_types::{ BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, ExecutionProofsByRangeRequestId, Id, SingleLookupReqId, + SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::{PeerAction, PeerId}; @@ -73,7 +75,8 @@ use std::time::Duration; use tokio::sync::mpsc; use tracing::{debug, error, info, trace}; use types::{ - BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, + BlobSidecar, DataColumnSidecar, EthSpec, ExecutionProof, ForkContext, Hash256, + SignedBeaconBlock, Slot, }; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -132,6 +135,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// An execution proof has been received from the RPC + RpcExecutionProof { + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, Arc>, Hash256), @@ -183,6 +194,7 @@ pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, SingleCustodyColumn(Id), + SingleExecutionProof { id: Id }, } impl BlockProcessType { @@ -190,7 +202,8 @@ impl BlockProcessType { match self { BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } - | BlockProcessType::SingleCustodyColumn(id) => *id, + | BlockProcessType::SingleCustodyColumn(id) + | BlockProcessType::SingleExecutionProof { id } => *id, } } } @@ -491,6 +504,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::SingleExecutionProof { id } => { + self.on_single_execution_proof_response(id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) } @@ -503,6 +519,8 @@ impl SyncManager { SyncRequestId::DataColumnsByRange(req_id) => { self.on_data_columns_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::ExecutionProofsByRange(req_id) => self + .on_execution_proofs_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)), } } @@ -833,6 +851,17 @@ impl SyncManager { } => { self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) } + SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + seen_timestamp, + } => self.rpc_execution_proof_received( + sync_request_id, + peer_id, + execution_proof, + seen_timestamp, + ), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -1186,6 +1215,31 @@ impl SyncManager { } } + fn rpc_execution_proof_received( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + seen_timestamp: Duration, + ) { + match sync_request_id { + SyncRequestId::SingleExecutionProof { id } => self.on_single_execution_proof_response( + id, + peer_id, + RpcEvent::from_chunk(execution_proof, seen_timestamp), + ), + SyncRequestId::ExecutionProofsByRange(req_id) => self + .on_execution_proofs_by_range_response( + req_id, + peer_id, + RpcEvent::from_chunk(execution_proof, seen_timestamp), + ), + _ => { + crit!(%peer_id, "bad request id for execution_proof"); + } + } + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, @@ -1204,6 +1258,27 @@ impl SyncManager { } } + fn on_single_execution_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + execution_proof: RpcEvent>, + ) { + if let Some(resp) = + self.network + .on_single_execution_proof_response(id, peer_id, execution_proof) + { + self.block_lookups + .on_download_response::( + id, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), + &mut self.network, + ) + } + } + fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, @@ -1286,6 +1361,28 @@ impl SyncManager { } } + /// Handles a response for an execution proofs by range request. + /// + /// Note: This is currently a stub. Execution proofs by range requests are not yet issued + /// during range sync. + fn on_execution_proofs_by_range_response( + &mut self, + id: ExecutionProofsByRangeRequestId, + peer_id: PeerId, + proof: RpcEvent>, + ) { + if let Some(resp) = self + .network + .on_execution_proofs_by_range_response(id, peer_id, proof) + { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::ExecutionProofs(id, resp), + ); + } + } + fn on_custody_by_root_result( &mut self, requester: CustodyRequester, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 2e0c56db23f..65ae25bfd3c 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,26 +17,31 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use crate::sync::range_data_column_batch_request::RangeDataColumnBatchRequest; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, ExecutionProofsByRangeRequest, +}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; pub use lighthouse_network::service::api_types::RangeRequestId; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyBackFillBatchRequestId, CustodyBackfillBatchId, CustodyId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRangeRequester, DataColumnsByRootRequestId, - DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, + DataColumnsByRootRequester, ExecutionProofsByRangeRequestId, Id, SingleLookupReqId, + SyncRequestId, }; -use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; +use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Subnet}; use lighthouse_tracing::{SPAN_OUTGOING_BLOCK_BY_ROOT_REQUEST, SPAN_OUTGOING_RANGE_REQUEST}; use parking_lot::RwLock; pub use requests::LookupVerifyError; use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, + ExecutionProofsByRangeRequestItems, ExecutionProofsByRootRequestItems, + ExecutionProofsByRootSingleBlockRequest, }; #[cfg(test)] use slot_clock::SlotClock; @@ -52,7 +57,7 @@ use tracing::{Span, debug, debug_span, error, warn}; use types::blob_sidecar::FixedBlobSidecarList; use types::{ BlobSidecar, BlockImportSource, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - ForkContext, Hash256, SignedBeaconBlock, Slot, + ExecutionProof, ForkContext, Hash256, SignedBeaconBlock, Slot, }; pub mod custody; @@ -72,6 +77,8 @@ macro_rules! new_range_request_span { /// Max retries for block components after which we fail the batch. pub const MAX_COLUMN_RETRIES: usize = 3; +/// Max retries for execution proofs after which we fail the batch. +pub const MAX_EXECUTION_PROOF_RETRIES: usize = 3; #[derive(Debug)] pub enum RpcEvent { @@ -117,6 +124,7 @@ pub enum RpcRequestSendError { pub enum NoPeerError { BlockPeer, CustodyPeer(ColumnIndex), + ExecutionProofPeer, } #[derive(Debug, PartialEq, Eq)] @@ -204,6 +212,9 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRoot requests data_columns_by_root_requests: ActiveRequests>, + /// A mapping of active ExecutionProofsByRoot requests + execution_proofs_by_root_requests: + ActiveRequests>, /// A mapping of active BlocksByRange requests blocks_by_range_requests: ActiveRequests>, @@ -213,6 +224,9 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRange requests data_columns_by_range_requests: ActiveRequests>, + /// A mapping of active ExecutionProofsByRange requests + execution_proofs_by_range_requests: + ActiveRequests, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, @@ -250,6 +264,17 @@ pub enum RangeBlockComponent { DataColumnsByRangeRequestId, RpcResponseResult>>>, ), + ExecutionProofs( + ExecutionProofsByRangeRequestId, + RpcResponseResult>>, + ), +} + +struct RangeExecutionProofInputs { + min_proofs_required: usize, + proofs_peer: PeerId, + proofs: Vec>, + attempt: usize, } #[cfg(test)] @@ -295,9 +320,11 @@ impl SyncNetworkContext { blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), + execution_proofs_by_root_requests: ActiveRequests::new("execution_proofs_by_root"), blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), + execution_proofs_by_range_requests: ActiveRequests::new("execution_proofs_by_range"), custody_by_root_requests: <_>::default(), components_by_range_requests: FnvHashMap::default(), custody_backfill_data_column_batch_requests: FnvHashMap::default(), @@ -323,9 +350,11 @@ impl SyncNetworkContext { blocks_by_root_requests, blobs_by_root_requests, data_columns_by_root_requests, + execution_proofs_by_root_requests, blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, + execution_proofs_by_range_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -349,6 +378,10 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); + let execution_proofs_by_root_ids = execution_proofs_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleExecutionProof { id: *id }); let blocks_by_range_ids = blocks_by_range_requests .active_requests_of_peer(peer_id) .into_iter() @@ -361,12 +394,18 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRange(*req_id)); + let execution_proofs_by_range_ids = execution_proofs_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::ExecutionProofsByRange(*req_id)); blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) + .chain(execution_proofs_by_root_ids) .chain(blocks_by_range_ids) .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) + .chain(execution_proofs_by_range_ids) .collect() } @@ -420,9 +459,11 @@ impl SyncNetworkContext { blocks_by_root_requests, blobs_by_root_requests, data_columns_by_root_requests, + execution_proofs_by_root_requests, blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, + execution_proofs_by_range_requests, // custody_by_root_requests is a meta request of data_columns_by_root_requests custody_by_root_requests: _, // components_by_range_requests is a meta request of various _by_range requests @@ -442,9 +483,11 @@ impl SyncNetworkContext { .iter_request_peers() .chain(blobs_by_root_requests.iter_request_peers()) .chain(data_columns_by_root_requests.iter_request_peers()) + .chain(execution_proofs_by_root_requests.iter_request_peers()) .chain(blocks_by_range_requests.iter_request_peers()) .chain(blobs_by_range_requests.iter_request_peers()) .chain(data_columns_by_range_requests.iter_request_peers()) + .chain(execution_proofs_by_range_requests.iter_request_peers()) { *active_request_count_by_peer.entry(peer_id).or_default() += 1; } @@ -534,6 +577,83 @@ impl SyncNetworkContext { Ok(()) } + /// Retries execution proofs by range by requesting the proofs again from a different peer. + pub fn retry_execution_proofs_by_range( + &mut self, + id: Id, + peers: &HashSet, + peers_to_deprioritize: &HashSet, + request: BlocksByRangeRequest, + ) -> Result<(), String> { + let Some((requester, parent_request_span)) = self + .components_by_range_requests + .iter() + .find_map(|(key, value)| { + if key.id == id { + Some((key.requester, value.request_span.clone())) + } else { + None + } + }) + else { + return Err("request id not present".to_string()); + }; + + let active_request_count_by_peer = self.active_request_count_by_peer(); + + let proof_peer = self + .select_execution_proofs_peer( + peers, + &active_request_count_by_peer, + peers_to_deprioritize, + ) + .ok_or_else(|| "no zkvm-enabled peer available for execution proofs".to_string())?; + + let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); + let min_proofs_required = self.chain.spec.zkvm_min_proofs_required().ok_or_else(|| { + "zkvm enabled but min proofs requirement is not configured".to_string() + })?; + if !self.chain.spec.is_zkvm_enabled_for_epoch(epoch) { + return Err("execution proofs retry requested for pre-zkvm epoch".to_string()); + } + + debug!( + id, + ?requester, + ?proof_peer, + "Retrying execution proofs by range from a different peer" + ); + + let id = ComponentsByRangeRequestId { id, requester }; + let req_id = self + .send_execution_proofs_by_range_request( + proof_peer, + ExecutionProofsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }, + id, + new_range_request_span!( + self, + "outgoing_proofs_by_range_retry", + parent_request_span.clone(), + proof_peer + ), + ) + .map_err(|e| format!("{:?}", e))?; + + let Some(range_request) = self.components_by_range_requests.get_mut(&id) else { + return Err( + "retrying execution proofs for range request that does not exist".to_string(), + ); + }; + + range_request + .reinsert_execution_proofs_request(req_id, min_proofs_required) + .map_err(|e| format!("{e:?}"))?; + Ok(()) + } + /// A blocks by range request sent by the range sync algorithm pub fn block_components_by_range_request( &mut self, @@ -659,6 +779,43 @@ impl SyncNetworkContext { .transpose()?; let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); + + // Request execution proofs only if zkvm is enabled for this epoch. + let execution_proofs_request = if self.chain.spec.is_zkvm_enabled_for_epoch(epoch) { + let min_proofs_required = + self.chain.spec.zkvm_min_proofs_required().ok_or_else(|| { + RpcRequestSendError::InternalError( + "zkvm enabled but min proofs requirement is not configured".to_string(), + ) + })?; + + // Find a zkvm-enabled peer from block_peers or column_peers + let zkvm_peer = self.find_zkvm_enabled_peer(block_peers, column_peers); + + if let Some(proofs_peer) = zkvm_peer { + let proofs_request = ExecutionProofsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }; + let req_id = self.send_execution_proofs_by_range_request( + proofs_peer, + proofs_request, + id, + new_range_request_span!( + self, + "outgoing_proofs_by_range", + range_request_span.clone(), + proofs_peer + ), + )?; + Some((req_id, min_proofs_required)) + } else { + return Err(RpcRequestSendError::NoPeer(NoPeerError::ExecutionProofPeer)); + } + } else { + None + }; + let info = RangeBlockComponentsRequest::new( blocks_req_id, blobs_req_id, @@ -668,6 +825,7 @@ impl SyncNetworkContext { self.chain.sampling_columns_for_epoch(epoch).to_vec(), ) }), + execution_proofs_request, range_request_span, ); self.components_by_range_requests.insert(id, info); @@ -730,6 +888,33 @@ impl SyncNetworkContext { Ok(columns_to_request_by_peer) } + fn select_execution_proofs_peer( + &self, + peers: &HashSet, + active_request_count_by_peer: &HashMap, + peers_to_deprioritize: &HashSet, + ) -> Option { + let peers_db = self.network_globals().peers.read(); + peers + .iter() + .filter(|peer| { + peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + }) + .map(|peer| { + ( + peers_to_deprioritize.contains(peer), + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, _, peer)| *peer) + } + /// Received a blocks by range or blobs by range response for a request that couples blocks ' /// and blobs. pub fn range_block_component_response( @@ -737,13 +922,14 @@ impl SyncNetworkContext { id: ComponentsByRangeRequestId, range_block_component: RangeBlockComponent, ) -> Option>, RpcResponseError>> { - let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); - return None; - }; - - if let Err(e) = { - let request = entry.get_mut(); + let add_result = { + let Some(request) = self.components_by_range_requests.get_mut(&id) else { + metrics::inc_counter_vec( + &metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, + &["range_blocks"], + ); + return None; + }; match range_block_component { RangeBlockComponent::Block(req_id, resp) => resp.and_then(|(blocks, _)| { request.add_blocks(req_id, blocks).map_err(|e| { @@ -770,14 +956,104 @@ impl SyncNetworkContext { }) }) } + RangeBlockComponent::ExecutionProofs(req_id, resp) => { + let expects_execution_proofs = request.expects_execution_proofs(); + // Handle execution proofs response, treating UnsupportedProtocol as an error + // if proofs are required. + let proofs = match resp { + Ok((proofs, _)) => proofs, + Err(RpcResponseError::RpcError(RPCError::UnsupportedProtocol)) + if expects_execution_proofs => + { + return Some(Err(RpcResponseError::BlockComponentCouplingError( + CouplingError::ExecutionProofPeerFailure { + error: "Peer doesn't support execution_proofs_by_range" + .to_string(), + peer: req_id.peer, + exceeded_retries: false, + }, + ))); + } + Err(RpcResponseError::RpcError(RPCError::UnsupportedProtocol)) => { + debug!( + req_id = ?req_id, + "Peer doesn't support execution_proofs_by_range, treating as empty response" + ); + vec![] + } + Err(e) => return Some(Err(e)), + }; + request.add_execution_proofs(req_id, proofs).map_err(|e| { + RpcResponseError::BlockComponentCouplingError(CouplingError::InternalError( + e, + )) + }) + } } - } { - entry.remove(); + }; + + if let Err(e) = add_result { + self.components_by_range_requests.remove(&id); return Some(Err(e)); } - let range_req = entry.get_mut(); - if let Some(blocks_result) = range_req.responses(&self.chain.spec) { + let (blocks_result, min_proofs_required, proofs_peer, proofs, proofs_attempt) = { + let Some(range_req) = self.components_by_range_requests.get_mut(&id) else { + metrics::inc_counter_vec( + &metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, + &["range_blocks"], + ); + return None; + }; + let blocks_result = range_req.responses(&self.chain.spec); + let min_proofs_required = range_req.min_execution_proofs_required(); + let proofs_peer = range_req.execution_proofs_peer(); + let proofs = range_req.get_execution_proofs().unwrap_or_default(); + let proofs_attempt = range_req.execution_proofs_attempt().unwrap_or(0); + ( + blocks_result, + min_proofs_required, + proofs_peer, + proofs, + proofs_attempt, + ) + }; + + if let Some(Ok(blocks)) = &blocks_result + && let Some(min_proofs_required) = min_proofs_required + { + let Some(proofs_peer) = proofs_peer else { + self.components_by_range_requests.remove(&id); + return Some(Err(RpcResponseError::BlockComponentCouplingError( + CouplingError::InternalError( + "execution proofs request completed without a peer".to_string(), + ), + ))); + }; + let proof_inputs = RangeExecutionProofInputs { + min_proofs_required, + proofs_peer, + proofs, + attempt: proofs_attempt, + }; + if let Err(err) = self.process_range_execution_proofs(proof_inputs, blocks) { + let remove_entry = !matches!( + err, + RpcResponseError::BlockComponentCouplingError( + CouplingError::ExecutionProofPeerFailure { + exceeded_retries: false, + .. + } + ) + ); + if remove_entry { + self.components_by_range_requests.remove(&id); + } + return Some(Err(err)); + } + } + + if let Some(blocks_result) = blocks_result { if let Err(CouplingError::DataColumnPeerFailure { error, faulty_peers: _, @@ -787,16 +1063,16 @@ impl SyncNetworkContext { // Remove the entry if it's a peer failure **and** retry counter is exceeded if *exceeded_retries { debug!( - entry=?entry.key(), + entry = ?id, msg = error, "Request exceeded max retries, failing batch" ); - entry.remove(); - }; + self.components_by_range_requests.remove(&id); + } } else { - // also remove the entry only if it coupled successfully + // Also remove the entry only if it coupled successfully // or if it isn't a column peer failure. - entry.remove(); + self.components_by_range_requests.remove(&id); } // If the request is finished, dequeue everything Some(blocks_result.map_err(RpcResponseError::BlockComponentCouplingError)) @@ -805,6 +1081,138 @@ impl SyncNetworkContext { } } + fn process_range_execution_proofs( + &self, + inputs: RangeExecutionProofInputs, + blocks: &[RpcBlock], + ) -> Result<(), RpcResponseError> { + let RangeExecutionProofInputs { + min_proofs_required, + proofs_peer, + proofs, + attempt, + } = inputs; + let exceeded_retries = attempt >= MAX_EXECUTION_PROOF_RETRIES; + let mut proofs_by_root: HashMap>> = HashMap::new(); + for proof in proofs { + proofs_by_root + .entry(proof.block_root) + .or_default() + .push(proof); + } + + let proof_error = |error: String| { + RpcResponseError::BlockComponentCouplingError( + CouplingError::ExecutionProofPeerFailure { + error, + peer: proofs_peer, + exceeded_retries, + }, + ) + }; + + for block in blocks { + let block_root = block.block_root(); + if !self.chain.spec.is_zkvm_enabled_for_epoch(block.epoch()) { + proofs_by_root.remove(&block_root); + continue; + } + let existing_count = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .map(|ids| ids.len()) + .unwrap_or(0); + + let proofs_for_block = proofs_by_root.remove(&block_root).unwrap_or_default(); + if existing_count >= min_proofs_required { + if !proofs_for_block.is_empty() { + debug!( + ?block_root, + existing_count, + min_proofs_required, + "Ignoring execution proofs because cache already satisfies requirement" + ); + } + continue; + } + + let payload_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|payload| payload.execution_payload_ref().block_hash()) + .ok_or_else(|| { + RpcResponseError::BlockComponentCouplingError(CouplingError::InternalError( + "execution payload missing for zkvm proofs".to_string(), + )) + })?; + + let mut verified_proofs = Vec::new(); + for proof in proofs_for_block { + if proof.block_root != block_root { + return Err(proof_error(format!( + "proof block_root mismatch: expected {block_root:?} got {:?}", + proof.block_root + ))); + } + if proof.block_hash != payload_hash { + return Err(proof_error(format!( + "proof execution payload hash mismatch for {block_root:?}" + ))); + } + match self + .chain + .data_availability_checker + .verify_execution_proof_for_gossip(&proof) + { + Ok(true) => verified_proofs.push((*proof).clone()), + Ok(false) => { + return Err(proof_error(format!( + "execution proof verification failed for {block_root:?}" + ))); + } + Err(e) => { + return Err(proof_error(format!( + "execution proof verification error for {block_root:?}: {e:?}" + ))); + } + } + } + + if !verified_proofs.is_empty() + && let Err(e) = self + .chain + .data_availability_checker + .put_verified_execution_proofs(block_root, verified_proofs) + { + return Err(proof_error(format!( + "failed to store execution proofs for {block_root:?}: {e:?}" + ))); + } + + let updated_count = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .map(|ids| ids.len()) + .unwrap_or(0); + if updated_count < min_proofs_required { + return Err(proof_error(format!( + "missing execution proofs for {block_root:?}: have {updated_count}, need {min_proofs_required}" + ))); + } + } + + if !proofs_by_root.is_empty() { + let unknown_roots: Vec<_> = proofs_by_root.keys().collect(); + debug!(?unknown_roots, "Execution proofs for unknown block roots"); + } + + Ok(()) + } + /// Request block of `block_root` if necessary by checking: /// - If the da_checker has a pending block from gossip or a previous request /// @@ -1026,6 +1434,111 @@ impl SyncNetworkContext { Ok(LookupRequestResult::RequestSent(id.req_id)) } + /// Request execution proofs for `block_root` + pub fn execution_proof_lookup_request( + &mut self, + lookup_id: SingleLookupId, + lookup_peers: Arc>>, + block_root: Hash256, + min_proofs_required: usize, + ) -> Result { + let active_request_count_by_peer = self.active_request_count_by_peer(); + let peers_db = self.network_globals().peers.read(); + + // Filter to only zkvm-enabled peers + let Some(peer_id) = lookup_peers + .read() + .iter() + .filter(|peer| { + peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + }) + .map(|peer| { + ( + // Prefer peers with less overall requests + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + // Random factor to break ties, otherwise the PeerID breaks ties + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, peer)| *peer) + else { + return Ok(LookupRequestResult::Pending("no zkvm-enabled peers")); + }; + + drop(peers_db); + + // Query DA checker for proofs we already have + let already_have = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .unwrap_or_default(); + + let current_count = already_have.len(); + + // Calculate how many more proofs we need + if current_count >= min_proofs_required { + // Already have enough proofs, no request needed + return Ok(LookupRequestResult::NoRequestNeeded( + "already have minimum proofs", + )); + } + + let count_needed = min_proofs_required - current_count; + + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; + + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: already_have.clone(), + count_needed, + }; + + let network_request = RequestType::ExecutionProofsByRoot( + request + .clone() + .into_request() + .map_err(RpcRequestSendError::InternalError)?, + ); + + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: network_request, + app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "ExecutionProofsByRoot", + ?block_root, + already_have_count = already_have.len(), + count_needed, + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + self.execution_proofs_by_root_requests.insert( + id, + peer_id, + // Expect peer to provide all requested proofs - if they can't, penalize + true, + ExecutionProofsByRootRequestItems::new(request), + Span::none(), + ); + + Ok(LookupRequestResult::RequestSent(id.req_id)) + } + /// Request to send a single `data_columns_by_root` request to the network. pub fn data_column_lookup_request( &mut self, @@ -1284,6 +1797,75 @@ impl SyncNetworkContext { Ok((id, requested_columns)) } + /// Find a zkvm-enabled peer from the given peer sets. + /// + /// Peers advertise zkvm support via their ENR's zkvm flag. This function + /// checks both block_peers and column_peers to find any peer that supports + /// the execution_proofs_by_range protocol. + fn find_zkvm_enabled_peer( + &self, + block_peers: &HashSet, + column_peers: &HashSet, + ) -> Option { + let peers_db = self.network_globals().peers.read(); + + // First try block_peers, then column_peers + let all_peers = block_peers.iter().chain(column_peers.iter()); + + for peer in all_peers { + if peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + { + return Some(*peer); + } + } + + None + } + + fn send_execution_proofs_by_range_request( + &mut self, + peer_id: PeerId, + request: ExecutionProofsByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + request_span: Span, + ) -> Result { + let id = ExecutionProofsByRangeRequestId { + id: self.next_id(), + parent_request_id, + peer: peer_id, + }; + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: RequestType::ExecutionProofsByRange(request.clone()), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "ExecutionProofsByRange", + slots = request.count, + epoch = %Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + self.execution_proofs_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know how many proofs to expect per block. + false, + ExecutionProofsByRangeRequestItems::new(request), + request_span, + ); + Ok(id) + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -1460,6 +2042,20 @@ impl SyncNetworkContext { self.on_rpc_response_result(id, "BlobsByRoot", resp, peer_id, |_| 1) } + pub(crate) fn on_single_execution_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + rpc_event: RpcEvent>, + ) -> Option>>> { + let resp = self + .execution_proofs_by_root_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(id, "ExecutionProofsByRoot", resp, peer_id, |proofs| { + proofs.len() + }) + } + #[allow(clippy::type_complexity)] pub(crate) fn on_data_columns_by_root_response( &mut self, @@ -1508,6 +2104,20 @@ impl SyncNetworkContext { self.on_rpc_response_result(id, "DataColumnsByRange", resp, peer_id, |d| d.len()) } + /// Handles a response for an execution proofs by range request. + #[allow(clippy::type_complexity)] + pub(crate) fn on_execution_proofs_by_range_response( + &mut self, + id: ExecutionProofsByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>, + ) -> Option>>> { + let resp = self + .execution_proofs_by_range_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(id, "ExecutionProofsByRange", resp, peer_id, |p| p.len()) + } + fn on_rpc_response_result usize>( &mut self, id: I, @@ -1657,6 +2267,36 @@ impl SyncNetworkContext { }) } + pub fn send_execution_proofs_for_processing( + &self, + id: Id, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(?block_root, ?id, "Sending execution proofs for processing"); + // Lookup sync event safety: If `beacon_processor.send_rpc_execution_proofs` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type + beacon_processor + .send_rpc_execution_proofs( + block_root, + proofs, + seen_timestamp, + BlockProcessType::SingleExecutionProof { id }, + ) + .map_err(|e| { + error!( + error = ?e, + "Failed to send sync execution proofs to processor" + ); + SendErrorProcessor::SendError + }) + } + pub fn send_custody_columns_for_processing( &self, _id: Id, diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 3183c06d762..238e551659d 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -5,7 +5,7 @@ use fnv::FnvHashMap; use lighthouse_network::PeerId; use strum::IntoStaticStr; use tracing::Span; -use types::{Hash256, Slot}; +use types::{ExecutionProofId, Hash256, Slot}; pub use blobs_by_range::BlobsByRangeRequestItems; pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; @@ -15,6 +15,10 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +pub use execution_proofs_by_range::ExecutionProofsByRangeRequestItems; +pub use execution_proofs_by_root::{ + ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, +}; use crate::metrics; @@ -26,6 +30,8 @@ mod blocks_by_range; mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; +mod execution_proofs_by_range; +mod execution_proofs_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { @@ -34,8 +40,10 @@ pub enum LookupVerifyError { UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), UnrequestedSlot(Slot), + UnrequestedProof(ExecutionProofId), InvalidInclusionProof, DuplicatedData(Slot, u64), + DuplicatedProofIDs(ExecutionProofId), InternalError(String), } diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs new file mode 100644 index 00000000000..179f08a6547 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_range.rs @@ -0,0 +1,54 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::ExecutionProofsByRangeRequest; +use std::sync::Arc; +use types::{ExecutionProof, Slot}; + +/// Accumulates results of an execution_proofs_by_range request. Only returns items after receiving +/// the stream termination. +pub struct ExecutionProofsByRangeRequestItems { + request: ExecutionProofsByRangeRequest, + items: Vec>, +} + +impl ExecutionProofsByRangeRequestItems { + pub fn new(request: ExecutionProofsByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for ExecutionProofsByRangeRequestItems { + type Item = Arc; + + fn add(&mut self, proof: Self::Item) -> Result { + let proof_slot = proof.slot; + + // Verify the proof is within the requested slot range + if proof_slot < Slot::new(self.request.start_slot) + || proof_slot >= Slot::new(self.request.start_slot + self.request.count) + { + return Err(LookupVerifyError::UnrequestedSlot(proof_slot)); + } + + // Check for duplicate proofs (same slot and proof_id) + if self + .items + .iter() + .any(|existing| existing.slot == proof_slot && existing.proof_id == proof.proof_id) + { + return Err(LookupVerifyError::DuplicatedProofIDs(proof.proof_id)); + } + + self.items.push(proof); + + // We don't know exactly how many proofs to expect (depends on block content), + // so we never return true here - rely on stream termination + Ok(false) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs new file mode 100644 index 00000000000..257d6e1a311 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs @@ -0,0 +1,68 @@ +use lighthouse_network::rpc::methods::ExecutionProofsByRootRequest; +use std::sync::Arc; +use types::{EthSpec, ExecutionProof, ExecutionProofId, Hash256}; + +use super::{ActiveRequestItems, LookupVerifyError}; + +#[derive(Debug, Clone)] +pub struct ExecutionProofsByRootSingleBlockRequest { + pub block_root: Hash256, + pub already_have: Vec, + pub count_needed: usize, +} + +impl ExecutionProofsByRootSingleBlockRequest { + pub fn into_request(self) -> Result { + ExecutionProofsByRootRequest::new(self.block_root, self.already_have, self.count_needed) + .map_err(|e| e.to_string()) + } +} + +pub struct ExecutionProofsByRootRequestItems { + request: ExecutionProofsByRootSingleBlockRequest, + items: Vec>, + _phantom: std::marker::PhantomData, +} + +impl ExecutionProofsByRootRequestItems { + pub fn new(request: ExecutionProofsByRootSingleBlockRequest) -> Self { + Self { + request, + items: vec![], + _phantom: std::marker::PhantomData, + } + } +} + +impl ActiveRequestItems for ExecutionProofsByRootRequestItems { + type Item = Arc; + + /// Appends a proof to this multi-item request. + /// Note: This is very similar to `DataColumnsByRootSingleBlockRequest` + fn add(&mut self, proof: Self::Item) -> Result { + let block_root = proof.block_root; + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + + // Verify proof is not in the already_have list + // We should not receive proofs we already have + if self.request.already_have.contains(&proof.proof_id) { + return Err(LookupVerifyError::UnrequestedProof(proof.proof_id)); + } + + // Check for duplicate proof IDs + if self.items.iter().any(|p| p.proof_id == proof.proof_id) { + return Err(LookupVerifyError::DuplicatedProofIDs(proof.proof_id)); + } + + self.items.push(proof); + + // We've received all requested proofs when we have count_needed proofs + Ok(self.items.len() >= self.request.count_needed) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 4ce10e23ca1..251dff9ffb7 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -6,12 +6,14 @@ use crate::sync::batch::{ BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, }; use crate::sync::block_sidecar_coupling::CouplingError; -use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; +use crate::sync::network_context::{ + NoPeerError, RangeRequestId, RpcRequestSendError, RpcResponseError, +}; use crate::sync::{BatchProcessResult, network_context::SyncNetworkContext}; use beacon_chain::BeaconChainTypes; use beacon_chain::block_verification_types::RpcBlock; use lighthouse_network::service::api_types::Id; -use lighthouse_network::{PeerAction, PeerId}; +use lighthouse_network::{PeerAction, PeerId, Subnet}; use lighthouse_tracing::SPAN_SYNCING_CHAIN; use logging::crit; use std::collections::{BTreeMap, HashSet, btree_map::Entry}; @@ -463,6 +465,12 @@ impl SyncingChain { // target when there is no sampling peers available. This is a valid state and should not // return an error. return Ok(KeepChain); + } else if !self.good_peers_on_execution_proof_subnet(self.processing_target, network) { + debug!( + src = "process_completed_batches", + "Waiting for zkvm-enabled peers for execution proofs" + ); + return Ok(KeepChain); } else { // NOTE: It is possible that the batch doesn't exist for the processing id. This can happen // when we complete a batch and attempt to download a new batch but there are: @@ -944,6 +952,31 @@ impl SyncingChain { CouplingError::BlobPeerFailure(msg) => { tracing::debug!(?batch_id, msg, "Blob peer failure"); } + CouplingError::ExecutionProofPeerFailure { + error, + peer, + exceeded_retries, + } => { + tracing::debug!(?batch_id, ?peer, error, "Execution proof peer failure"); + if !*exceeded_retries { + if let BatchOperationOutcome::Failed { blacklist } = + batch.downloading_to_awaiting_download()? + { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }); + } + let mut failed_peers = HashSet::new(); + failed_peers.insert(*peer); + return self.retry_execution_proof_batch( + network, + batch_id, + request_id, + failed_peers, + ); + } + } CouplingError::InternalError(msg) => { tracing::error!(?batch_id, msg, "Block components coupling internal error"); } @@ -1020,6 +1053,13 @@ impl SyncingChain { for batch_id in awaiting_downloads { if self.good_peers_on_sampling_subnets(batch_id, network) { + if !self.good_peers_on_execution_proof_subnet(batch_id, network) { + debug!( + src = "attempt_send_awaiting_download_batches", + "Waiting for zkvm-enabled peers for execution proofs" + ); + continue; + } self.send_batch(network, batch_id)?; } else { debug!( @@ -1083,6 +1123,13 @@ impl SyncingChain { return Ok(KeepChain); } Err(e) => match e { + RpcRequestSendError::NoPeer(NoPeerError::ExecutionProofPeer) => { + debug!( + %batch_id, + "Waiting for zkvm-enabled peers for execution proofs" + ); + return Ok(KeepChain); + } // TODO(das): Handle the NoPeer case explicitly and don't drop the batch. For // sync to work properly it must be okay to have "stalled" batches in // AwaitingDownload state. Currently it will error with invalid state if @@ -1163,6 +1210,45 @@ impl SyncingChain { Ok(KeepChain) } + /// Retries execution proof requests within the batch by creating a new proofs request. + fn retry_execution_proof_batch( + &mut self, + network: &mut SyncNetworkContext, + batch_id: BatchId, + id: Id, + mut failed_peers: HashSet, + ) -> ProcessingResult { + let _guard = self.span.clone().entered(); + debug!(%batch_id, %id, ?failed_peers, "Retrying execution proof requests"); + if let Some(batch) = self.batches.get_mut(&batch_id) { + failed_peers.extend(&batch.failed_peers()); + let req = batch.to_blocks_by_range_request().0; + + let synced_peers = network + .network_globals() + .peers + .read() + .synced_peers_for_epoch(batch_id) + .cloned() + .collect::>(); + + match network.retry_execution_proofs_by_range(id, &synced_peers, &failed_peers, req) { + Ok(()) => { + batch.start_downloading(id)?; + debug!( + ?batch_id, + id, "Retried execution proof requests from other peers" + ); + return Ok(KeepChain); + } + Err(e) => { + debug!(?batch_id, id, e, "Failed to retry execution proof batch"); + } + } + } + Ok(KeepChain) + } + /// Returns true if this chain is currently syncing. pub fn is_syncing(&self) -> bool { match self.state { @@ -1206,6 +1292,13 @@ impl SyncingChain { ); return Ok(KeepChain); } + if !self.good_peers_on_execution_proof_subnet(epoch, network) { + debug!( + src = "request_batches_optimistic", + "Waiting for zkvm-enabled peers for execution proofs" + ); + return Ok(KeepChain); + } if let Entry::Vacant(entry) = self.batches.entry(epoch) { let batch_type = network.batch_type(epoch); @@ -1252,6 +1345,27 @@ impl SyncingChain { } } + /// Returns true if there is at least one zkvm-enabled peer for execution proofs. + fn good_peers_on_execution_proof_subnet( + &self, + epoch: Epoch, + network: &SyncNetworkContext, + ) -> bool { + if !network.chain.spec.is_zkvm_enabled_for_epoch(epoch) { + return true; + } + + let peers_db = network.network_globals().peers.read(); + let synced_peers: HashSet<_> = peers_db.synced_peers_for_epoch(epoch).cloned().collect(); + + self.peers.iter().chain(synced_peers.iter()).any(|peer| { + peers_db + .peer_info(peer) + .map(|info| info.on_subnet_metadata(&Subnet::ExecutionProof)) + .unwrap_or(false) + }) + } + /// Creates the next required batch from the chain. If there are no more batches required, /// `false` is returned. fn include_next_batch(&mut self, network: &mut SyncNetworkContext) -> Option { @@ -1294,6 +1408,13 @@ impl SyncingChain { ); return None; } + if !self.good_peers_on_execution_proof_subnet(self.to_be_downloaded, network) { + debug!( + src = "include_next_batch", + "Waiting for zkvm-enabled peers for execution proofs" + ); + return None; + } // If no batch needs a retry, attempt to send the batch of the next epoch to download let next_batch_id = self.to_be_downloaded; diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs new file mode 100644 index 00000000000..32f251adccc --- /dev/null +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -0,0 +1,458 @@ +use super::*; +use crate::sync::block_lookups::common::ResponseType; +use lighthouse_network::rpc::{RPCError, RpcErrorResponse}; +use lighthouse_network::service::api_types::SyncRequestId; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// Test successful execution proof fetch and verification +#[test] +fn test_proof_lookup_happy_path() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + + // Get execution payload hash from the block + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger the unknown block (which should trigger proof request) + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + + // Expect block request + let block_id = rig.expect_block_lookup_request(block_root); + + // Send the block + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + // Now expect proof request + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send all requested proofs (minimal spec requires 2) + let proof_ids = vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ]; + rig.complete_single_lookup_proof_download(proof_id, peer_id, block_root, block_hash, proof_ids); + + // Proofs should be processed + rig.expect_block_process(ResponseType::ExecutionProof); + + // Block should be imported + rig.proof_component_processed_imported(block_root); + rig.expect_empty_network(); + rig.expect_no_active_lookups(); +} + +/// Test that empty proof response results in peer penalization +#[test] +fn test_proof_lookup_empty_response() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer sends stream terminator with no proofs + rig.single_lookup_proof_response(proof_id, peer_id, None); + + // Peer should be penalized for not providing proofs + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Should retry with different peer + let _new_peer = rig.new_connected_zkvm_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test partial proof response (peer doesn't have all requested proofs) +#[test] +fn test_proof_lookup_partial_response() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Requested 2 proofs but peer only sends 1 + let proof_0 = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0)); + rig.single_lookup_proof_response(proof_id, peer_id, None); // End stream early + + // Should penalize peer for not providing all requested proofs + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Should retry with another peer + let new_peer = rig.new_connected_zkvm_peer(); + let retry_proof_id = rig.expect_proof_lookup_request(block_root); + + // Complete with all proofs + rig.complete_single_lookup_proof_download( + retry_proof_id, + new_peer, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} + +/// Test duplicate proofs triggers penalization +#[test] +fn test_proof_lookup_duplicate_proof() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proof 0 twice + let proof_0_a = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + // TODO(zkproofs): In this case we have the same proofID but different proof_data + // zkVMs should be deterministic, so if this happens there is likely an issue somewhere + let proof_0_b = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![4, 5, 6], // Different data + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_a)); + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_b)); + + // Should penalize peer for duplicate proof + rig.expect_penalty(peer_id, "DuplicatedProofIDs"); + + // Should retry + let _new_peer = rig.new_connected_zkvm_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test wrong block root in proof triggers penalization +#[test] +fn test_proof_lookup_wrong_block_root() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let wrong_root = Hash256::random(); + let peer_id = rig.new_connected_zkvm_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proof with wrong block_root + let wrong_proof = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + wrong_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(wrong_proof)); + + // Should penalize peer + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); + + // Should retry + let _new_peer = rig.new_connected_zkvm_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test proof request timeout +#[test] +fn test_proof_lookup_timeout() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Simulate timeout by sending error + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::ErrorResponse(RpcErrorResponse::ServerError, "timeout".to_string()), + }); + + // RPC errors trigger retry without necessarily penalizing the peer + // Should retry with different peer + let _new_peer = rig.new_connected_zkvm_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test peer disconnection during proof request +#[test] +fn test_proof_lookup_peer_disconnected() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer disconnects + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::Disconnected, + }); + + // Should retry with different peer (no penalty for disconnect) + let _new_peer = rig.new_connected_zkvm_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test multiple retries on failure +#[test] +fn test_proof_lookup_multiple_retries() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + let peer_id = rig.new_connected_zkvm_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + // First attempt - empty response + let proof_id_1 = rig.expect_proof_lookup_request(block_root); + rig.single_lookup_proof_response(proof_id_1, peer_id, None); + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Second attempt - different peer, also fails + let peer_id_2 = rig.new_connected_zkvm_peer(); + let proof_id_2 = rig.expect_proof_lookup_request(block_root); + rig.single_lookup_proof_response(proof_id_2, peer_id_2, None); + rig.expect_penalty(peer_id_2, "NotEnoughResponsesReturned"); + + // Third attempt - succeeds + let peer_id_3 = rig.new_connected_zkvm_peer(); + let proof_id_3 = rig.expect_proof_lookup_request(block_root); + rig.complete_single_lookup_proof_download( + proof_id_3, + peer_id_3, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} + +/// Test proof lookup with no peers available +#[test] +fn test_proof_lookup_no_peers() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_zkvm_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer fails and disconnects + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::Disconnected, + }); + + // Disconnect the peer + rig.peer_disconnected(peer_id); + + // Should not be able to find another peer immediately + // The lookup should remain active waiting for peers + assert_eq!(rig.active_single_lookups_count(), 1); +} + +/// Test successful proof verification after block already has blobs +#[test] +fn test_proof_lookup_with_existing_blobs() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + let peer_id = rig.new_connected_zkvm_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + + // Get block + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.clone().into())); + rig.expect_block_process(ResponseType::Block); + + // Block might still be missing proofs even if blobs present + // Proofs are an additional requirement + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proofs + rig.complete_single_lookup_proof_download( + proof_id, + peer_id, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index ef52f896785..fb4adbcee65 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -41,8 +41,9 @@ use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; use tracing::info; use types::{ - BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, - ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, ChainSpec, DataColumnSidecar, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkContext, ForkName, Hash256, + MinimalEthSpec as E, SignedBeaconBlock, Slot, data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; @@ -56,9 +57,20 @@ impl TestRig { pub fn test_setup() -> Self { // Use `fork_from_env` logic to set correct fork epochs let spec = test_spec::(); + Self::test_setup_with_spec(spec) + } + + fn test_setup_with_spec(spec: ChainSpec) -> Self { + Self::test_setup_with_spec_and_zkvm(spec, false, None) + } + fn test_setup_with_spec_and_zkvm( + spec: ChainSpec, + zkvm_dummy_verifiers: bool, + anchor_oldest_slot: Option, + ) -> Self { // Initialise a new beacon chain - let harness = BeaconChainHarness::>::builder(E) + let mut builder = BeaconChainHarness::>::builder(E) .spec(Arc::new(spec)) .deterministic_keypairs(1) .fresh_ephemeral_store() @@ -67,8 +79,23 @@ impl TestRig { Slot::new(0), Duration::from_secs(0), Duration::from_secs(12), - )) - .build(); + )); + + if zkvm_dummy_verifiers { + // TODO(zkproofs): For unit tests, we likely always want dummy verifiers + builder = builder.zkvm_with_dummy_verifiers(); + } + + let harness = builder.build(); + if let Some(oldest_slot) = anchor_oldest_slot { + let store = &harness.chain.store; + let prev_anchor = store.get_anchor_info(); + let mut new_anchor = prev_anchor.clone(); + new_anchor.oldest_block_slot = oldest_slot; + store + .compare_and_set_anchor_info_with_write(prev_anchor, new_anchor) + .expect("anchor info updated"); + } let chain = harness.chain.clone(); let fork_context = Arc::new(ForkContext::new::( @@ -150,6 +177,33 @@ impl TestRig { } } + /// Setup test rig for Fulu with zkvm enabled. + /// This is needed for execution proof tests since proof requests are only made + /// when zkvm mode is enabled in the chain spec. + pub fn test_setup_after_fulu_with_zkvm() -> Option { + let mut spec = test_spec::(); + spec.zkvm_enabled = true; + let r = Self::test_setup_with_spec_and_zkvm(spec, true, None); + if r.fork_name.fulu_enabled() { + Some(r) + } else { + None + } + } + + /// Setup test rig for Fulu with zkvm enabled and backfill required. + pub fn test_setup_after_fulu_with_zkvm_backfill() -> Option { + let mut spec = test_spec::(); + spec.zkvm_enabled = true; + let backfill_start_slot = Slot::new(E::slots_per_epoch()); + let r = Self::test_setup_with_spec_and_zkvm(spec, true, Some(backfill_start_slot)); + if r.fork_name.fulu_enabled() { + Some(r) + } else { + None + } + } + pub fn log(&self, msg: &str) { info!(msg, "TEST_RIG"); } @@ -171,7 +225,11 @@ impl TestRig { self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob.into())); } - fn trigger_unknown_block_from_attestation(&mut self, block_root: Hash256, peer_id: PeerId) { + pub(super) fn trigger_unknown_block_from_attestation( + &mut self, + block_root: Hash256, + peer_id: PeerId, + ) { self.send_sync_message(SyncMessage::UnknownBlockHashFromAttestation( peer_id, block_root, )); @@ -184,7 +242,7 @@ impl TestRig { } } - fn rand_block(&mut self) -> SignedBeaconBlock { + pub(super) fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -228,7 +286,7 @@ impl TestRig { self.sync_manager.active_single_lookups() } - fn active_single_lookups_count(&self) -> usize { + pub(super) fn active_single_lookups_count(&self) -> usize { self.sync_manager.active_single_lookups().len() } @@ -321,7 +379,7 @@ impl TestRig { } #[track_caller] - fn expect_no_active_lookups(&self) { + pub(super) fn expect_no_active_lookups(&self) { self.expect_no_active_single_lookups(); } @@ -349,6 +407,18 @@ impl TestRig { .__add_connected_peer_testing_only(true, &self.harness.spec, key) } + /// Create a new connected peer with zkvm enabled (advertises zkvm=true in ENR) + pub fn new_connected_zkvm_peer(&mut self) -> PeerId { + let key = self.determinstic_key(); + let peer_id = self + .network_globals + .peers + .write() + .__add_connected_zkvm_peer_testing_only(&self.harness.spec, key); + self.log(&format!("Added new zkvm peer for testing {peer_id:?}")); + peer_id + } + fn determinstic_key(&mut self) -> CombinedKey { k256::ecdsa::SigningKey::random(&mut self.rng_08).into() } @@ -445,7 +515,7 @@ impl TestRig { }); } - fn single_lookup_block_response( + pub(super) fn single_lookup_block_response( &mut self, id: SingleLookupReqId, peer_id: PeerId, @@ -527,6 +597,69 @@ impl TestRig { ); } + /// Send a single execution proof response + pub(super) fn single_lookup_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + proof: Option>, + ) { + self.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::SingleExecutionProof { id }, + peer_id, + execution_proof: proof, + seen_timestamp: D, + }); + } + + /// Complete execution proof download by sending all requested proofs + pub(super) fn complete_single_lookup_proof_download( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + block_root: Hash256, + block_hash: ExecutionBlockHash, + subnet_ids: Vec, + ) { + for subnet_id in subnet_ids { + let proof = Arc::new( + ExecutionProof::new( + subnet_id, + types::Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ); + self.single_lookup_proof_response(id, peer_id, Some(proof)); + } + // Send stream terminator + self.single_lookup_proof_response(id, peer_id, None); + } + + /// Expect an execution proof request for a specific block + pub(super) fn expect_proof_lookup_request(&mut self, block_root: Hash256) -> SingleLookupReqId { + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: RequestType::ExecutionProofsByRoot(req), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), + .. + } if req.block_root == block_root => Some(*id), + _ => None, + }) + .unwrap_or_else(|_| panic!("Expected proof request for {block_root}")) + } + + /// Send a processing result indicating proofs were processed and block imported + pub(super) fn proof_component_processed_imported(&mut self, block_root: Hash256) { + let id = self.find_single_lookup_for(block_root); + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type: BlockProcessType::SingleBlock { id }, + result: BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + }); + } + fn complete_lookup_block_download(&mut self, block: SignedBeaconBlock) { let block_root = block.canonical_root(); let id = self.expect_block_lookup_request(block_root); @@ -786,7 +919,7 @@ impl TestRig { } #[track_caller] - fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + pub(super) fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { self.find_block_lookup_request(for_block) .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) } @@ -910,7 +1043,7 @@ impl TestRig { } #[track_caller] - fn expect_block_process(&mut self, response_type: ResponseType) { + pub(super) fn expect_block_process(&mut self, response_type: ResponseType) { match response_type { ResponseType::Block => self .pop_received_processor_event(|ev| { @@ -927,6 +1060,11 @@ impl TestRig { (ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn).then_some(()) }) .unwrap_or_else(|e| panic!("Expected column work event: {e}")), + ResponseType::ExecutionProof => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::RpcExecutionProofs).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected execution proofs work event: {e}")), } } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 23c14ff63ef..9b82f830bcb 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -18,6 +18,7 @@ use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use types::{ChainSpec, ForkName, MinimalEthSpec as E}; +mod execution_proof_tests; mod lookups; mod range; diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index cb728a90c1b..f122087ae3c 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -3,26 +3,26 @@ use crate::network_beacon_processor::ChainSegmentProcessId; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; -use crate::sync::network_context::RangeRequestId; +use crate::sync::network_context::{MAX_EXECUTION_PROOF_RETRIES, RangeRequestId}; use crate::sync::range_sync::RangeSyncType; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; use beacon_processor::WorkType; -use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV2, StatusMessageV2, + BlobsByRangeRequest, DataColumnsByRangeRequest, ExecutionProofsByRangeRequest, + OldBlocksByRangeRequest, OldBlocksByRangeRequestV2, StatusMessageV2, }; +use lighthouse_network::rpc::{RPCError, RequestType}; use lighthouse_network::service::api_types::{ AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, - SyncRequestId, + ExecutionProofsByRangeRequestId, SyncRequestId, }; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; use types::{ - BlobSidecarList, BlockImportSource, Epoch, EthSpec, Hash256, MinimalEthSpec as E, - SignedBeaconBlock, SignedBeaconBlockHash, Slot, + BlobSidecarList, BlockImportSource, Epoch, EthSpec, ExecutionBlockHash, ExecutionProof, + ExecutionProofId, Hash256, MinimalEthSpec as E, SignedBeaconBlock, SignedBeaconBlockHash, Slot, }; const D: Duration = Duration::new(0, 0); @@ -38,6 +38,13 @@ enum ByRangeDataRequestIds { PostPeerDAS(Vec<(DataColumnsByRangeRequestId, PeerId)>), } +struct BlocksByRangeRequestMeta { + id: BlocksByRangeRequestId, + peer: PeerId, + start_slot: u64, + count: u64, +} + /// Sync tests are usually written in the form: /// - Do some action /// - Expect a request to be sent @@ -84,6 +91,20 @@ impl TestRig { }) } + fn add_head_zkvm_peer_with_root(&mut self, head_root: Hash256) -> PeerId { + let local_info = self.local_info(); + let peer_id = self.new_connected_zkvm_peer(); + self.send_sync_message(SyncMessage::AddPeer( + peer_id, + SyncInfo { + head_root, + head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), + ..local_info + }, + )); + peer_id + } + // Produce a finalized peer with an advanced finalized epoch fn add_finalized_peer(&mut self) -> PeerId { self.add_finalized_peer_with_root(Hash256::random()) @@ -155,6 +176,13 @@ impl TestRig { } } + fn add_synced_zkvm_peer(&mut self) -> PeerId { + let peer_id = self.new_connected_zkvm_peer(); + let local_info = self.local_info(); + self.send_sync_message(SyncMessage::AddPeer(peer_id, local_info)); + peer_id + } + fn assert_state(&self, state: RangeSyncType) { assert_eq!( self.sync_manager @@ -200,6 +228,16 @@ impl TestRig { &mut self, request_filter: RequestFilter, ) -> ((BlocksByRangeRequestId, PeerId), ByRangeDataRequestIds) { + let (meta, by_range_data_requests) = + self.find_blocks_by_range_request_with_meta(request_filter); + + ((meta.id, meta.peer), by_range_data_requests) + } + + fn find_blocks_by_range_request_with_meta( + &mut self, + request_filter: RequestFilter, + ) -> (BlocksByRangeRequestMeta, ByRangeDataRequestIds) { let filter_f = |peer: PeerId, start_slot: u64| { if let Some(expected_epoch) = request_filter.epoch { let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); @@ -222,10 +260,17 @@ impl TestRig { peer_id, request: RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( - OldBlocksByRangeRequestV2 { start_slot, .. }, + OldBlocksByRangeRequestV2 { + start_slot, count, .. + }, )), app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), - } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), + } if filter_f(*peer_id, *start_slot) => Some(BlocksByRangeRequestMeta { + id: *id, + peer: *peer_id, + start_slot: *start_slot, + count: *count, + }), _ => None, }) .unwrap_or_else(|e| { @@ -272,6 +317,45 @@ impl TestRig { (block_req, by_range_data_requests) } + fn find_execution_proofs_by_range_request( + &mut self, + request_filter: RequestFilter, + ) -> (ExecutionProofsByRangeRequestId, PeerId, u64, u64) { + let filter_f = |peer: PeerId, start_slot: u64| { + if let Some(expected_epoch) = request_filter.epoch { + let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); + if epoch != expected_epoch { + return false; + } + } + if let Some(expected_peer) = request_filter.peer + && peer != expected_peer + { + return false; + } + + true + }; + + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: + RequestType::ExecutionProofsByRange(ExecutionProofsByRangeRequest { + start_slot, + count, + }), + app_request_id: AppRequestId::Sync(SyncRequestId::ExecutionProofsByRange(id)), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id, *start_slot, *count)), + _ => None, + }) + .unwrap_or_else(|e| { + panic!( + "Should have an ExecutionProofsByRange request, filter {request_filter:?}: {e:?}" + ) + }) + } + fn find_and_complete_blocks_by_range_request( &mut self, request_filter: RequestFilter, @@ -290,6 +374,15 @@ impl TestRig { seen_timestamp: D, }); + self.complete_by_range_data_requests(by_range_data_request_ids); + + blocks_req_id.parent_request_id.requester + } + + fn complete_by_range_data_requests( + &mut self, + by_range_data_request_ids: ByRangeDataRequestIds, + ) { match by_range_data_request_ids { ByRangeDataRequestIds::PreDeneb => {} ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { @@ -319,8 +412,6 @@ impl TestRig { } } } - - blocks_req_id.parent_request_id.requester } fn find_and_complete_processing_chain_segment(&mut self, id: ChainSegmentProcessId) { @@ -601,3 +692,611 @@ fn finalized_sync_not_enough_custody_peers_on_start() { let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS; r.complete_and_process_range_sync_until(last_epoch, filter()); } + +#[test] +fn range_sync_requests_execution_proofs_for_zkvm() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + rig.assert_state(RangeSyncType::Head); + rig.expect_empty_network(); + + let zkvm_peer = rig.add_head_zkvm_peer_with_root(head_root); + let _ = rig.find_blocks_by_range_request(filter()); + let (_, proof_peer, _, _) = + rig.find_execution_proofs_by_range_request(filter().peer(zkvm_peer)); + assert_eq!(proof_peer, zkvm_peer); +} + +#[test] +fn range_sync_uses_cached_execution_proofs() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let (proof_req_id, proof_peer, proof_start_slot, proof_count) = + rig.find_execution_proofs_by_range_request(filter().peer(zkvm_peer)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert_eq!(proof_count, block_meta.count); + assert_eq!(proof_peer, zkvm_peer); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let min_proofs = rig + .harness + .chain + .spec + .zkvm_min_proofs_required() + .expect("zkvm enabled"); + + let proofs = (0..min_proofs) + .map(|i| { + ExecutionProof::new( + ExecutionProofId::new(u8::try_from(i).expect("proof id fits")).unwrap(), + block.slot(), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap() + }) + .collect::>(); + + rig.harness + .chain + .data_availability_checker + .put_verified_execution_proofs(block_root, proofs) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + let request_id = block_meta.id.parent_request_id.requester; + let process_id = match request_id { + RangeRequestId::RangeSync { chain_id, batch_id } => { + ChainSegmentProcessId::RangeBatchId(chain_id, batch_id) + } + RangeRequestId::BackfillSync { batch_id } => { + ChainSegmentProcessId::BackSyncBatchId(batch_id) + } + }; + rig.find_and_complete_processing_chain_segment(process_id); +} + +#[test] +fn range_sync_retries_execution_proofs_without_block_retry() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer_1 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_2 = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry for execution proof failure"); + } + + let (_, retry_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(retry_peer, proof_peer); +} + +#[test] +fn backfill_retries_execution_proofs_without_block_retry() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm_backfill() else { + return; + }; + + let zkvm_peer_1 = rig.add_synced_zkvm_peer(); + let zkvm_peer_2 = rig.add_synced_zkvm_peer(); + let local_info = rig.local_info(); + let _supernode_peer = rig.add_supernode_peer(local_info); + + let backfill_epoch = Slot::new(E::slots_per_epoch()) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (block_meta, by_range_data) = + rig.find_blocks_by_range_request_with_meta(filter().epoch(backfill_epoch)); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(backfill_epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry for execution proof failure"); + } + + let (_, retry_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(backfill_epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(retry_peer, proof_peer); +} + +#[test] +fn range_sync_execution_proof_retries_exhaust_then_block_retry() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer_1 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_2 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_3 = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (mut proof_req_id, mut proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2 || proof_peer == zkvm_peer_3); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + rig.complete_by_range_data_requests(by_range_data); + + for attempt in 1..=MAX_EXECUTION_PROOF_RETRIES { + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if attempt < MAX_EXECUTION_PROOF_RETRIES { + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry before proof retries are exhausted"); + } + + let (next_req_id, next_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(next_peer, proof_peer); + proof_req_id = next_req_id; + proof_peer = next_peer; + } + } + + rig.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot, + .. + })), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .unwrap_or_else(|e| panic!("Expected BlocksByRange retry after exhausted proofs: {e}")); +} + +#[test] +fn range_sync_proof_retry_on_unsupported_protocol() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer_1 = rig.add_head_zkvm_peer_with_root(head_root); + let zkvm_peer_2 = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert!(proof_peer == zkvm_peer_1 || proof_peer == zkvm_peer_2); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + rig.complete_by_range_data_requests(by_range_data); + + rig.send_sync_message(SyncMessage::RpcError { + peer_id: proof_peer, + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + error: RPCError::UnsupportedProtocol, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected BlocksByRange retry on unsupported protocol"); + } + + let (_, retry_peer, retry_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + assert_eq!(retry_start_slot, block_meta.start_slot); + assert_ne!(retry_peer, proof_peer); +} + +#[test] +fn range_sync_ignores_bad_proofs_when_cached() { + let Some(mut rig) = TestRig::test_setup_after_fulu_with_zkvm() else { + return; + }; + + let head_root = Hash256::random(); + let zkvm_peer = rig.add_head_zkvm_peer_with_root(head_root); + let _supernode_peer = rig.add_head_peer_with_root(head_root); + + let (block_meta, by_range_data) = rig.find_blocks_by_range_request_with_meta(filter()); + let epoch = Slot::new(block_meta.start_slot) + .epoch(E::slots_per_epoch()) + .as_u64(); + let (proof_req_id, proof_peer, proof_start_slot, _) = + rig.find_execution_proofs_by_range_request(filter().epoch(epoch)); + + assert_eq!(proof_start_slot, block_meta.start_slot); + assert_eq!(proof_peer, zkvm_peer); + + let mut block = rig.rand_block(); + *block.message_mut().slot_mut() = Slot::new(block_meta.start_slot); + + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .expect("execution payload should exist") + .execution_payload_ref() + .block_hash(); + + let min_proofs = rig + .harness + .chain + .spec + .zkvm_min_proofs_required() + .expect("zkvm enabled"); + + let proofs = (0..min_proofs) + .map(|i| { + ExecutionProof::new( + ExecutionProofId::new(u8::try_from(i).expect("proof id fits")).unwrap(), + block.slot(), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap() + }) + .collect::>(); + + rig.harness + .chain + .data_availability_checker + .put_verified_execution_proofs(block_root, proofs) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: Some(Arc::new(block)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcBlock { + sync_request_id: SyncRequestId::BlocksByRange(block_meta.id), + peer_id: block_meta.peer, + beacon_block: None, + seen_timestamp: D, + }); + + rig.complete_by_range_data_requests(by_range_data); + + let wrong_hash = if block_hash == ExecutionBlockHash::zero() { + ExecutionBlockHash::repeat_byte(0x11) + } else { + ExecutionBlockHash::zero() + }; + + let bad_proof = ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(block_meta.start_slot), + wrong_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(); + + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: Some(Arc::new(bad_proof)), + seen_timestamp: D, + }); + rig.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::ExecutionProofsByRange(proof_req_id), + peer_id: proof_peer, + execution_proof: None, + seen_timestamp: D, + }); + + if rig + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: + RequestType::ExecutionProofsByRange(ExecutionProofsByRangeRequest { + start_slot, + .. + }), + .. + } if *start_slot == block_meta.start_slot => Some(()), + _ => None, + }) + .is_ok() + { + panic!("unexpected execution proof retry when cache already satisfies requirement"); + } +} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e4c7c6ff1fe..48a7fb3e4f3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -933,6 +933,16 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + /* ZK-VM Execution Layer settings */ + .arg( + Arg::new("activate-zkvm") + .long("activate-zkvm") + .help("Activates ZKVM execution proof mode. Enables the node to subscribe to the \ + execution_proof gossip topic, receive and verify execution proofs from peers, \ + and advertise zkVM support in its ENR for peer discovery.") + .action(ArgAction::SetTrue) + .display_order(0) + ) /* Deneb settings */ .arg( Arg::new("trusted-setup-file-override") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 26dd3b6642e..f58ca5d12da 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -31,6 +31,7 @@ use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; use types::{Checkpoint, Epoch, EthSpec, Hash256}; +use zkvm_execution_layer::ZKVMExecutionLayerConfig; const PURGE_DB_CONFIRMATION: &str = "confirm"; @@ -337,6 +338,17 @@ pub fn get_config( // Store the EL config in the client config. client_config.execution_layer = Some(el_config); + // Parse ZK-VM execution layer config if provided + if cli_args.get_flag("activate-zkvm") { + let zkvm_config = ZKVMExecutionLayerConfig::builder() + .build() + .map_err(|e| format!("Invalid ZK-VM configuration: {}", e))?; + + client_config.zkvm_execution_layer = Some(zkvm_config); + + info!("ZKVM mode activated"); + } + // Override default trusted setup file if required if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") { diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index a07cc838863..9cb6620816e 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -32,6 +32,8 @@ pub enum Error { BlobInfoConcurrentMutation, /// The store's `data_column_info` was mutated concurrently, the latest modification wasn't applied. DataColumnInfoConcurrentMutation, + /// The store's `execution_proof_info` was mutated concurrently, the latest modification wasn't applied. + ExecutionProofInfoConcurrentMutation, /// The block or state is unavailable due to weak subjectivity sync. HistoryUnavailable, /// State reconstruction cannot commence because not all historic blocks are known. @@ -92,6 +94,7 @@ pub enum Error { LoadSplit(Box), LoadBlobInfo(Box), LoadDataColumnInfo(Box), + LoadExecutionProofInfo(Box), LoadConfig(Box), LoadHotStateSummary(Hash256, Box), LoadHotStateSummaryForSplit(Box), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c4137191744..a05d915795f 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -9,12 +9,13 @@ use crate::metadata::{ ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, AnchorInfo, BLOB_INFO_KEY, BlobInfo, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, CompactionTimestamp, DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, DataColumnCustodyInfo, DataColumnInfo, - SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, + EXECUTION_PROOF_INFO_KEY, ExecutionProofInfo, SCHEMA_VERSION_KEY, SPLIT_KEY, + STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, - StoreOp, get_data_column_key, + StoreOp, get_data_column_key, get_execution_proof_key, metrics::{self, COLD_METRIC, HOT_METRIC}, parse_data_column_key, }; @@ -61,6 +62,8 @@ pub struct HotColdDB, Cold: ItemStore> { blob_info: RwLock, /// The starting slots for the range of data columns stored in the database. data_column_info: RwLock, + /// The starting slots for the range of execution proofs stored in the database. + execution_proof_info: RwLock, pub(crate) config: StoreConfig, pub hierarchy: HierarchyModuli, /// Cold database containing compact historical data. @@ -93,6 +96,7 @@ struct BlockCache { block_cache: LruCache>, blob_cache: LruCache>, data_column_cache: LruCache>>>, + execution_proof_cache: LruCache>>, data_column_custody_info_cache: Option, } @@ -102,6 +106,7 @@ impl BlockCache { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), data_column_cache: LruCache::new(size), + execution_proof_cache: LruCache::new(size), data_column_custody_info_cache: None, } } @@ -116,6 +121,9 @@ impl BlockCache { .get_or_insert_mut(block_root, Default::default) .insert(data_column.index, data_column); } + pub fn put_execution_proofs(&mut self, block_root: Hash256, proofs: Vec>) { + self.execution_proof_cache.put(block_root, proofs); + } pub fn put_data_column_custody_info( &mut self, data_column_custody_info: Option, @@ -139,6 +147,12 @@ impl BlockCache { .get(block_root) .and_then(|map| map.get(column_index).cloned()) } + pub fn get_execution_proofs( + &mut self, + block_root: &Hash256, + ) -> Option>> { + self.execution_proof_cache.get(block_root).cloned() + } pub fn get_data_column_custody_info(&self) -> Option { self.data_column_custody_info_cache.clone() } @@ -151,10 +165,14 @@ impl BlockCache { pub fn delete_data_columns(&mut self, block_root: &Hash256) { let _ = self.data_column_cache.pop(block_root); } + pub fn delete_execution_proofs(&mut self, block_root: &Hash256) { + let _ = self.execution_proof_cache.pop(block_root); + } pub fn delete(&mut self, block_root: &Hash256) { self.delete_block(block_root); self.delete_blobs(block_root); self.delete_data_columns(block_root); + self.delete_execution_proofs(block_root); } } @@ -232,6 +250,7 @@ impl HotColdDB, MemoryStore> { anchor_info: RwLock::new(ANCHOR_UNINITIALIZED), blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), + execution_proof_info: RwLock::new(ExecutionProofInfo::default()), cold_db: MemoryStore::open(), blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), @@ -286,6 +305,7 @@ impl HotColdDB, BeaconNodeBackend> { anchor_info, blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), + execution_proof_info: RwLock::new(ExecutionProofInfo::default()), blobs_db: BeaconNodeBackend::open(&config, blobs_db_path)?, cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, @@ -395,10 +415,38 @@ impl HotColdDB, BeaconNodeBackend> { new_data_column_info.clone(), )?; + // Initialize execution proof info + let execution_proof_info = db.load_execution_proof_info()?; + let zkvm_fork_slot = db + .spec + .zkvm_fork_epoch() + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let new_execution_proof_info = match &execution_proof_info { + Some(execution_proof_info) => { + // Set the oldest execution proof slot to the fork slot if it is not yet set. + let oldest_execution_proof_slot = execution_proof_info + .oldest_execution_proof_slot + .or(zkvm_fork_slot); + ExecutionProofInfo { + oldest_execution_proof_slot, + } + } + // First start. + None => ExecutionProofInfo { + // Set the oldest execution proof slot to the fork slot if it is not yet set. + oldest_execution_proof_slot: zkvm_fork_slot, + }, + }; + db.compare_and_set_execution_proof_info_with_write( + <_>::default(), + new_execution_proof_info.clone(), + )?; + info!( path = ?blobs_db_path, oldest_blob_slot = ?new_blob_info.oldest_blob_slot, oldest_data_column_slot = ?new_data_column_info.oldest_data_column_slot, + oldest_execution_proof_slot = ?new_execution_proof_info.oldest_execution_proof_slot, "Blob DB initialized" ); @@ -1027,6 +1075,47 @@ impl, Cold: ItemStore> HotColdDB } } + /// Store execution proofs for a block. + pub fn put_execution_proofs( + &self, + block_root: &Hash256, + proofs: &[ExecutionProof], + ) -> Result<(), Error> { + for proof in proofs { + self.blobs_db.put_bytes( + DBColumn::BeaconExecutionProof, + &get_execution_proof_key(block_root, proof.proof_id.as_u8()), + &proof.as_ssz_bytes(), + )?; + } + if !proofs.is_empty() { + let cached = proofs + .iter() + .map(|proof| Arc::new(proof.clone())) + .collect::>(); + self.block_cache + .as_ref() + .inspect(|cache| cache.lock().put_execution_proofs(*block_root, cached)); + } + Ok(()) + } + + /// Create key-value store operations for storing execution proofs. + pub fn execution_proofs_as_kv_store_ops( + &self, + block_root: &Hash256, + proofs: &[ExecutionProof], + ops: &mut Vec, + ) { + for proof in proofs { + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconExecutionProof, + get_execution_proof_key(block_root, proof.proof_id.as_u8()), + proof.as_ssz_bytes(), + )); + } + } + /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); @@ -2558,6 +2647,47 @@ impl, Cold: ItemStore> HotColdDB } } + /// Fetch all execution proofs for a given block from the store. + pub fn get_execution_proofs( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + if let Some(proofs) = self + .block_cache + .as_ref() + .and_then(|cache| cache.lock().get_execution_proofs(block_root)) + { + return Ok(proofs); + } + + let mut proofs = Vec::new(); + let prefix = block_root.as_slice(); + + for result in self + .blobs_db + .iter_column_from::>(DBColumn::BeaconExecutionProof, prefix) + { + let (key, value) = result?; + // Check if key starts with our block_root prefix + if !key.starts_with(prefix) { + // We've moved past this block's proofs + break; + } + let proof = Arc::new(ExecutionProof::from_ssz_bytes(&value)?); + proofs.push(proof); + } + + if !proofs.is_empty() { + self.block_cache.as_ref().inspect(|cache| { + cache + .lock() + .put_execution_proofs(*block_root, proofs.clone()) + }); + } + + Ok(proofs) + } + /// Fetch all keys in the data_column column with prefix `block_root` pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { self.blobs_db @@ -2877,6 +3007,77 @@ impl, Cold: ItemStore> HotColdDB data_column_info.as_kv_store_op(DATA_COLUMN_INFO_KEY) } + /// Get a clone of the store's execution proof info. + /// + /// To do mutations, use `compare_and_set_execution_proof_info`. + pub fn get_execution_proof_info(&self) -> ExecutionProofInfo { + self.execution_proof_info.read_recursive().clone() + } + + /// Initialize the `ExecutionProofInfo` when starting from genesis or a checkpoint. + pub fn init_execution_proof_info(&self, anchor_slot: Slot) -> Result { + let oldest_execution_proof_slot = self.spec.zkvm_fork_epoch().map(|fork_epoch| { + std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) + }); + let execution_proof_info = ExecutionProofInfo { + oldest_execution_proof_slot, + }; + self.compare_and_set_execution_proof_info( + self.get_execution_proof_info(), + execution_proof_info, + ) + } + + /// Atomically update the execution proof info from `prev_value` to `new_value`. + /// + /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other + /// values. + /// + /// Return an `ExecutionProofInfoConcurrentMutation` error if the `prev_value` provided + /// is not correct. + pub fn compare_and_set_execution_proof_info( + &self, + prev_value: ExecutionProofInfo, + new_value: ExecutionProofInfo, + ) -> Result { + let mut execution_proof_info = self.execution_proof_info.write(); + if *execution_proof_info == prev_value { + let kv_op = self.store_execution_proof_info_in_batch(&new_value); + *execution_proof_info = new_value; + Ok(kv_op) + } else { + Err(Error::ExecutionProofInfoConcurrentMutation) + } + } + + /// As for `compare_and_set_execution_proof_info`, but also writes to disk immediately. + pub fn compare_and_set_execution_proof_info_with_write( + &self, + prev_value: ExecutionProofInfo, + new_value: ExecutionProofInfo, + ) -> Result<(), Error> { + let kv_store_op = self.compare_and_set_execution_proof_info(prev_value, new_value)?; + self.hot_db.do_atomically(vec![kv_store_op]) + } + + /// Load the execution proof info from disk, but do not set `self.execution_proof_info`. + fn load_execution_proof_info(&self) -> Result, Error> { + self.hot_db + .get(&EXECUTION_PROOF_INFO_KEY) + .map_err(|e| Error::LoadExecutionProofInfo(e.into())) + } + + /// Store the given `execution_proof_info` to disk. + /// + /// The argument is intended to be `self.execution_proof_info`, but is passed manually to avoid + /// issues with recursive locking. + fn store_execution_proof_info_in_batch( + &self, + execution_proof_info: &ExecutionProofInfo, + ) -> KeyValueStoreOp { + execution_proof_info.as_kv_store_op(EXECUTION_PROOF_INFO_KEY) + } + /// Return the slot-window describing the available historic states. /// /// Returns `(lower_limit, upper_limit)`. @@ -3395,6 +3596,178 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Try to prune execution proofs older than the execution proof boundary. + /// + /// Proofs from the epoch `execution_proof_boundary` are retained. + /// This epoch is an _exclusive_ endpoint for the pruning process. + /// + /// This function only supports pruning execution proofs older than the split point, + /// which is older than (or equal to) finalization. + pub fn try_prune_execution_proofs( + &self, + force: bool, + execution_proof_boundary: Epoch, + ) -> Result<(), Error> { + // Check if zkvm fork is enabled + if self.spec.zkvm_fork_epoch().is_none() { + debug!("ZKVM fork is disabled"); + return Ok(()); + } + + let pruning_enabled = self.get_config().prune_blobs; // Use same config as blobs for now + if !force && !pruning_enabled { + debug!( + prune_blobs = pruning_enabled, + "Execution proof pruning is disabled" + ); + return Ok(()); + } + + let execution_proof_info = self.get_execution_proof_info(); + let Some(oldest_execution_proof_slot) = execution_proof_info.oldest_execution_proof_slot + else { + debug!("No execution proofs stored yet"); + return Ok(()); + }; + + let start_epoch = oldest_execution_proof_slot.epoch(E::slots_per_epoch()); + + // Prune execution proofs up until the `execution_proof_boundary - 1` or the split + // slot's epoch, whichever is older. + let split = self.get_split_info(); + let end_epoch = std::cmp::min( + execution_proof_boundary.saturating_sub(1u64), + split.slot.epoch(E::slots_per_epoch()).saturating_sub(1u64), + ); + let end_slot = end_epoch.end_slot(E::slots_per_epoch()); + + let can_prune = end_epoch != Epoch::new(0) && start_epoch <= end_epoch; + if !can_prune { + debug!( + %oldest_execution_proof_slot, + %execution_proof_boundary, + %split.slot, + %end_epoch, + %start_epoch, + "Execution proofs are pruned" + ); + return Ok(()); + } + + debug!( + %end_epoch, + %execution_proof_boundary, + "Pruning execution proofs" + ); + + // Iterate blocks backwards from the `end_epoch`. + let Some((end_block_root, _)) = self + .forwards_block_roots_iterator_until(end_slot, end_slot, || { + self.get_hot_state(&split.state_root, true)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + )) + .map(|state| (state, split.state_root)) + .map_err(Into::into) + })? + .next() + .transpose()? + else { + debug!( + %end_epoch, + %execution_proof_boundary, + "No execution proofs to prune" + ); + return Ok(()); + }; + + let mut db_ops = vec![]; + let mut removed_block_roots = vec![]; + let mut new_oldest_slot: Option = None; + + // Iterate blocks backwards until we reach blocks older than the boundary. + for tuple in ParentRootBlockIterator::new(self, end_block_root) { + let (block_root, blinded_block) = tuple?; + let slot = blinded_block.slot(); + + // Get all execution proof keys for this block + let keys = self.get_all_execution_proof_keys(&block_root); + + // Check if any proofs exist for this block + let mut block_has_proofs = false; + for key in keys { + if self + .blobs_db + .key_exists(DBColumn::BeaconExecutionProof, &key)? + { + block_has_proofs = true; + db_ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconExecutionProof, + key, + )); + } + } + + if block_has_proofs { + debug!( + ?block_root, + %slot, + "Pruning execution proofs for block" + ); + removed_block_roots.push(block_root); + new_oldest_slot = Some(slot); + } + // Continue iterating even if this block has no proofs - proofs may be sparse + } + + // Commit deletions + if !db_ops.is_empty() { + debug!( + num_deleted = db_ops.len(), + "Deleting execution proofs from disk" + ); + self.blobs_db.do_atomically(db_ops)?; + } + + // TODO(zkproofs): Fix this to make it more readable + if !removed_block_roots.is_empty() + && let Some(mut block_cache) = self.block_cache.as_ref().map(|cache| cache.lock()) + { + for block_root in removed_block_roots { + block_cache.delete_execution_proofs(&block_root); + } + } + + // Update the execution proof info with the new oldest slot + if let Some(new_slot) = new_oldest_slot { + let new_oldest = end_slot + 1; + self.compare_and_set_execution_proof_info_with_write( + execution_proof_info.clone(), + ExecutionProofInfo { + oldest_execution_proof_slot: Some(new_oldest), + }, + )?; + debug!( + old_oldest = %new_slot, + new_oldest = %new_oldest, + "Updated execution proof info" + ); + } + + debug!("Execution proof pruning complete"); + + Ok(()) + } + + /// Get all possible execution proof keys for a given block root. + /// Returns keys for proof_ids 0 to MAX_PROOFS-1. + fn get_all_execution_proof_keys(&self, block_root: &Hash256) -> Vec> { + (0..types::MAX_PROOFS as u8) + .map(|proof_id| get_execution_proof_key(block_root, proof_id)) + .collect() + } + /// Delete *all* states from the freezer database and update the anchor accordingly. /// /// WARNING: this method deletes the genesis state and replaces it with the provided diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ae5b2e1e571..516e858e581 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -40,6 +40,7 @@ use strum::{EnumIter, EnumString, IntoStaticStr}; pub use types::*; const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; +const EXECUTION_PROOF_DB_KEY_SIZE: usize = 32 + 1; // block_root + proof_id pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a, K> = Box> + 'a>; @@ -171,6 +172,25 @@ pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Er Ok((block_root, column_index)) } +pub fn get_execution_proof_key(block_root: &Hash256, proof_id: u8) -> Vec { + let mut result = block_root.as_slice().to_vec(); + result.push(proof_id); + result +} + +pub fn parse_execution_proof_key(data: Vec) -> Result<(Hash256, u8), Error> { + if data.len() != EXECUTION_PROOF_DB_KEY_SIZE { + return Err(Error::InvalidKey(format!( + "Unexpected BeaconExecutionProof key len {}", + data.len() + ))); + } + let (block_root_bytes, proof_id_bytes) = data.split_at(32); + let block_root = Hash256::from_slice(block_root_bytes); + let proof_id = proof_id_bytes[0]; + Ok((block_root, proof_id)) +} + #[must_use] #[derive(Clone)] pub enum KeyValueStoreOp { @@ -263,6 +283,12 @@ pub enum DBColumn { BeaconDataColumn, #[strum(serialize = "bdi")] BeaconDataColumnCustodyInfo, + /// For storing execution proofs (zkVM proofs) in the blob database. + /// + /// - Key: `Hash256` block root + `u8` proof_id (33 bytes total). + /// - Value: SSZ-encoded ExecutionProof. + #[strum(serialize = "bep")] + BeaconExecutionProof, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). /// /// DEPRECATED. @@ -437,6 +463,7 @@ impl DBColumn { | Self::LightClientUpdate | Self::Dummy => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, + Self::BeaconExecutionProof => EXECUTION_PROOF_DB_KEY_SIZE, } } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index cf494684515..7a5979481fe 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -19,6 +19,7 @@ pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); pub const DATA_COLUMN_CUSTODY_INFO_KEY: Hash256 = Hash256::repeat_byte(8); +pub const EXECUTION_PROOF_INFO_KEY: Hash256 = Hash256::repeat_byte(9); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -255,3 +256,30 @@ impl StoreItem for DataColumnInfo { Ok(Self::from_ssz_bytes(bytes)?) } } + +/// Database parameters relevant to execution proof sync. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct ExecutionProofInfo { + /// The slot after which execution proofs are or *will be* available (>=). + /// + /// If this slot is in the future, then it is the first slot of the ZKVM fork, from which + /// execution proofs will be available. + /// + /// If the `oldest_execution_proof_slot` is `None` then this means that the ZKVM fork epoch + /// is not yet known. + pub oldest_execution_proof_slot: Option, +} + +impl StoreItem for ExecutionProofInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 5f3c43a7e42..208667e8c1a 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -8,6 +8,10 @@ beacon chain and publishing messages to the network. Usage: lighthouse beacon_node [OPTIONS] --execution-endpoint Options: + --activate-zkvm + Activates ZKVM execution proof mode. Enables the node to subscribe to + the execution_proof gossip topic, receive and verify execution proofs + from peers, and advertise zkVM support in its ENR for peer discovery. --auto-compact-db Enable or disable automatic compaction of the database on finalization. [default: true] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 8746e3c063c..b3ad86a1c93 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1297,6 +1297,17 @@ impl BeaconNodeHttpClient { Ok(path) } + /// Path for `v1/beacon/execution_proofs/{block_id}` + pub fn get_execution_proofs_path(&self, block_id: BlockId) -> Result { + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("execution_proofs") + .push(&block_id.to_string()); + Ok(path) + } + /// Path for `v1/beacon/blinded_blocks/{block_id}` pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V1)?; @@ -1376,6 +1387,30 @@ impl BeaconNodeHttpClient { .map(|opt| opt.map(BeaconResponse::Unversioned)) } + /// `GET v1/beacon/execution_proofs/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_execution_proofs( + &self, + block_id: BlockId, + proof_ids: Option<&[u8]>, + ) -> Result>>, Error> + { + let mut path = self.get_execution_proofs_path(block_id)?; + if let Some(proof_ids) = proof_ids { + let ids_string = proof_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + .join(","); + path.query_pairs_mut().append_pair("proof_ids", &ids_string); + } + + self.get_opt(path) + .await + .map(|opt| opt.map(BeaconResponse::Unversioned)) + } + /// `GET v1/beacon/blinded_blocks/{block_id}` /// /// Returns `Ok(None)` on a 404 error. @@ -1755,6 +1790,24 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/pool/execution_proofs` + pub async fn post_beacon_pool_execution_proofs( + &self, + proof: &ExecutionProof, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("execution_proofs"); + + self.post(path, proof).await?; + + Ok(()) + } + /// `POST beacon/rewards/sync_committee` pub async fn post_beacon_rewards_sync_committee( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index b1a61ce00cc..b96ae7dd7a0 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -688,6 +688,13 @@ pub struct BlobIndicesQuery { pub indices: Option>, } +#[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ExecutionProofIdsQuery { + #[serde(default, deserialize_with = "option_query_vec")] + pub proof_ids: Option>, +} + #[derive(Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct BlobsVersionedHashesQuery { diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index da3f9b90ccc..611122823da 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -236,6 +236,17 @@ pub struct ChainSpec { pub builder_payment_threshold_numerator: u64, pub builder_payment_threshold_denominator: u64, + /* + * zkVM execution proof params + */ + /// Whether zkVM mode is enabled via CLI flag --activate-zkvm. + /// When true, the node will subscribe to execution proof gossip, verify proofs, + /// and optionally generate proofs. zkVM activates at the Fulu fork. + /// Unlike other forks, this is not a network-wide activation but a per-node opt-in. + pub zkvm_enabled: bool, + /// Minimum number of execution proofs required from different subnets. + /// Only used when zkvm_enabled is true. + pub zkvm_min_proofs_required: usize, /* * Networking */ @@ -278,6 +289,11 @@ pub struct ChainSpec { pub(crate) blob_schedule: BlobSchedule, pub min_epochs_for_data_column_sidecars_requests: u64, + /* + * Networking zkvm + */ + pub min_epochs_for_execution_proof_requests: u64, + /* * Networking Gloas */ @@ -494,6 +510,44 @@ impl ChainSpec { .is_some_and(|gloas_fork_epoch| gloas_fork_epoch != self.far_future_epoch) } + /// Returns true if zkVM mode is enabled via CLI flag. + /// Unlike other forks, this is set via CLI and indicates per-node opt-in. + pub fn is_zkvm_enabled(&self) -> bool { + self.zkvm_enabled + } + + /// Returns the epoch at which zkVM activates. + /// Currently uses Fulu fork epoch. + /// Returns None if zkVM is disabled or Fulu is not scheduled. + pub fn zkvm_fork_epoch(&self) -> Option { + if self.zkvm_enabled { + self.fulu_fork_epoch + } else { + None + } + } + + /// Returns true if zkVM mode is enabled for the given epoch. + pub fn is_zkvm_enabled_for_epoch(&self, epoch: Epoch) -> bool { + self.zkvm_fork_epoch() + .is_some_and(|zkvm_fork_epoch| epoch >= zkvm_fork_epoch) + } + + /// Returns true if zkVM mode can be used at the given fork. + pub fn is_zkvm_enabled_for_fork(&self, fork_name: ForkName) -> bool { + self.is_zkvm_enabled() && fork_name.fulu_enabled() + } + + /// Returns the minimum number of execution proofs required. + /// Only meaningful when zkVM is enabled. + pub fn zkvm_min_proofs_required(&self) -> Option { + if self.is_zkvm_enabled() { + Some(self.zkvm_min_proofs_required) + } else { + None + } + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); @@ -1146,6 +1200,12 @@ impl ChainSpec { builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, + /* + * zkVM execution proof params + */ + zkvm_enabled: false, + zkvm_min_proofs_required: default_zkvm_min_proofs_required(), + /* * Network specific */ @@ -1206,6 +1266,12 @@ impl ChainSpec { default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking zkvm specific + */ + min_epochs_for_execution_proof_requests: + default_min_epochs_for_execution_proof_requests(), + /* * Application specific */ @@ -1281,6 +1347,10 @@ impl ChainSpec { // Gloas gloas_fork_version: [0x07, 0x00, 0x00, 0x00], gloas_fork_epoch: None, + // zkVM + zkvm_enabled: false, + zkvm_min_proofs_required: 2, + min_epochs_for_execution_proof_requests: 2, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1511,6 +1581,12 @@ impl ChainSpec { builder_payment_threshold_numerator: 6, builder_payment_threshold_denominator: 10, + /* + * zkVM execution proof params + */ + zkvm_enabled: false, + zkvm_min_proofs_required: default_zkvm_min_proofs_required(), + /* * Network specific */ @@ -1562,6 +1638,12 @@ impl ChainSpec { default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking zkvm specific + */ + min_epochs_for_execution_proof_requests: + default_min_epochs_for_execution_proof_requests(), + /* * Application specific */ @@ -2022,6 +2104,11 @@ const fn default_min_epochs_for_blob_sidecars_requests() -> u64 { 4096 } +const fn default_min_epochs_for_execution_proof_requests() -> u64 { + // TODO(zkproofs): add into consensus-specs with rational + 2 +} + const fn default_blob_sidecar_subnet_count() -> u64 { 6 } @@ -2052,6 +2139,12 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } +/// Minimum number of execution proofs required from different subnets +/// before marking an execution payload as available in ZK-VM mode. +pub const fn default_zkvm_min_proofs_required() -> usize { + crate::execution_proof::DEFAULT_MIN_PROOFS_REQUIRED +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs new file mode 100644 index 00000000000..1ae7fa8dffe --- /dev/null +++ b/consensus/types/src/execution_proof.rs @@ -0,0 +1,178 @@ +use crate::{ExecutionBlockHash, Hash256, Slot}; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode as DeriveEncode}; +use ssz_types::VariableList; +use ssz_types::typenum; +use std::fmt::{self, Debug}; +use tree_hash_derive::TreeHash; + +use super::ExecutionProofId; + +/// Maximum size of proof data in bytes +/// +/// Note: Most proofs will fit within 300KB. Some zkVMs have 1MB proofs (currently) +/// and so this number was set to accommodate for the most zkVMs. +pub const MAX_PROOF_DATA_BYTES: usize = 1_048_576; + +/// Minimum number of execution proofs required from different proof types +/// before marking an execution payload as available in ZK-VM mode. +/// +/// This provides client diversity - nodes wait for proofs from K different +/// zkVM+EL combinations before considering an execution payload available. +pub const DEFAULT_MIN_PROOFS_REQUIRED: usize = 2; + +/// Maximum number of execution proofs that can be requested or stored. +/// This corresponds to the maximum number of proof types (zkVM+EL combinations) +/// that can be supported, which is currently 8 (ExecutionProofId is 0-7). +pub const MAX_PROOFS: usize = 8; + +type ProofData = VariableList; + +/// ExecutionProof represents a cryptographic `proof of execution` that +/// an execution payload is valid. +/// +/// In short, it is proof that if we were to run a particular execution layer client +/// with the given execution payload, they would return the output values that are attached +/// to the proof. +/// +/// Each proof is associated with a specific proof_id, which identifies the +/// zkVM and EL combination used to generate it. Multiple proofs from different +/// proof IDs can exist for the same execution payload, providing both zkVM and EL diversity. +#[derive(Clone, Serialize, Deserialize, DeriveEncode, Decode, TreeHash, PartialEq, Eq)] +pub struct ExecutionProof { + /// Which proof type (zkVM+EL combination) this proof belongs to + /// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc. + pub proof_id: ExecutionProofId, + + /// The slot of the beacon block this proof validates + pub slot: Slot, + + /// The block hash of the execution payload this proof validates + pub block_hash: ExecutionBlockHash, + + /// The beacon block root corresponding to the beacon block + /// with the execution payload, that this proof attests to. + pub block_root: Hash256, + + /// The actual proof data + pub proof_data: ProofData, +} + +impl ExecutionProof { + pub fn new( + proof_id: ExecutionProofId, + slot: Slot, + block_hash: ExecutionBlockHash, + block_root: Hash256, + proof_data: Vec, + ) -> Result { + let proof_data = ProofData::new(proof_data) + .map_err(|e| format!("Failed to create proof data: {:?}", e))?; + + Ok(Self { + proof_id, + slot, + block_hash, + block_root, + proof_data, + }) + } + + /// Returns the size of the proof data in bytes + pub fn proof_data_size(&self) -> usize { + self.proof_data.len() + } + + /// Get a reference to the proof data as a slice + pub fn proof_data_slice(&self) -> &[u8] { + &self.proof_data + } + + /// Check if this proof is for a specific execution block hash + pub fn is_for_block(&self, block_hash: &ExecutionBlockHash) -> bool { + &self.block_hash == block_hash + } + + /// Check if this proof is from a specific proof type + pub fn is_from_proof_type(&self, proof_id: ExecutionProofId) -> bool { + self.proof_id == proof_id + } + + /// Get the proof type ID + pub fn proof_id(&self) -> ExecutionProofId { + self.proof_id + } + + /// Minimum size of an ExecutionProof in SSZ bytes (with empty proof_data) + /// TODO(zkproofs): If the proof_data is empty, then that is an invalid proof + pub fn min_size() -> usize { + use bls::FixedBytesExtended; + Self { + proof_id: ExecutionProofId::new(0).unwrap(), + slot: Slot::new(0), + block_hash: ExecutionBlockHash::zero(), + block_root: Hash256::zero(), + proof_data: ProofData::new(vec![]).unwrap(), + } + .as_ssz_bytes() + .len() + } + + /// Maximum size of an ExecutionProof in SSZ bytes (with max proof_data) + pub fn max_size() -> usize { + use bls::FixedBytesExtended; + Self { + proof_id: ExecutionProofId::new(0).unwrap(), + slot: Slot::new(0), + block_hash: ExecutionBlockHash::zero(), + block_root: Hash256::zero(), + proof_data: ProofData::new(vec![0u8; MAX_PROOF_DATA_BYTES]).unwrap(), + } + .as_ssz_bytes() + .len() + } +} + +impl Debug for ExecutionProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ExecutionProof") + .field("proof_id", &self.proof_id) + .field("slot", &self.slot) + .field("block_hash", &self.block_hash) + .field("block_root", &self.block_root) + .field("proof_data_size", &self.proof_data.len()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::FixedBytesExtended; + + #[test] + fn test_execution_proof_too_large() { + let subnet_id = ExecutionProofId::new(0).unwrap(); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::zero(); + let block_root = Hash256::zero(); + let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES + 1]; + + let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("Failed to create proof data")); + } + + #[test] + fn test_execution_proof_max_size() { + let subnet_id = ExecutionProofId::new(0).unwrap(); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::zero(); + let block_root = Hash256::zero(); + let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES]; + + let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); + assert!(result.is_ok()); + } +} diff --git a/consensus/types/src/execution_proof_id.rs b/consensus/types/src/execution_proof_id.rs new file mode 100644 index 00000000000..c180f5e0412 --- /dev/null +++ b/consensus/types/src/execution_proof_id.rs @@ -0,0 +1,144 @@ +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt::{self, Display}; +use tree_hash::TreeHash; + +/// Number of execution proofs +/// Each proof represents a different zkVM+EL combination +/// +/// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future +pub const EXECUTION_PROOF_TYPE_COUNT: u8 = 8; + +/// ExecutionProofId identifies which zkVM/proof system a proof belongs to. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub struct ExecutionProofId(u8); + +impl Encode for ExecutionProofId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + + fn as_ssz_bytes(&self) -> Vec { + self.0.as_ssz_bytes() + } +} + +impl Decode for ExecutionProofId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let value = u8::from_ssz_bytes(bytes)?; + Self::new(value).map_err(DecodeError::BytesInvalid) + } +} + +impl TreeHash for ExecutionProofId { + fn tree_hash_type() -> tree_hash::TreeHashType { + ::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + ::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl ExecutionProofId { + /// Creates a new ExecutionProofId if the value is valid + pub fn new(id: u8) -> Result { + if id < EXECUTION_PROOF_TYPE_COUNT { + Ok(Self(id)) + } else { + Err(format!( + "Invalid ExecutionProofId: {}, must be < {}", + id, EXECUTION_PROOF_TYPE_COUNT + )) + } + } + + /// Returns the inner u8 value + pub fn as_u8(&self) -> u8 { + self.0 + } + + /// Returns the subnet ID as a usize + pub fn as_usize(&self) -> usize { + self.0 as usize + } + + /// Returns all valid subnet IDs + pub fn all() -> Vec { + (0..EXECUTION_PROOF_TYPE_COUNT).map(Self).collect() + } +} + +impl Display for ExecutionProofId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for u8 { + fn from(subnet_id: ExecutionProofId) -> u8 { + subnet_id.0 + } +} + +impl TryFrom for ExecutionProofId { + type Error = String; + + fn try_from(value: u8) -> Result { + Self::new(value) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_valid_proof_ids() { + for id in 0..EXECUTION_PROOF_TYPE_COUNT { + assert!(ExecutionProofId::new(id).is_ok()); + } + } + + #[test] + fn test_invalid_proof_ids() { + assert!(ExecutionProofId::new(EXECUTION_PROOF_TYPE_COUNT).is_err()); + } + + #[test] + fn test_all_proof_ids() { + let all = ExecutionProofId::all(); + assert_eq!(all.len(), EXECUTION_PROOF_TYPE_COUNT as usize); + for (idx, proof_id) in all.iter().enumerate() { + assert_eq!(proof_id.as_usize(), idx); + } + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5a89fcb1d48..21eb5e1b8c7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -20,6 +20,8 @@ pub mod core; pub mod data; pub mod deposit; pub mod execution; +pub mod execution_proof; +pub mod execution_proof_id; pub mod exit; pub mod fork; pub mod kzg_ext; @@ -58,6 +60,10 @@ pub mod chain_spec { pub use crate::core::ChainSpec; } +// Re-export execution_proof types for backwards compatibility +pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES, MAX_PROOFS}; +pub use crate::execution_proof_id::{EXECUTION_PROOF_TYPE_COUNT, ExecutionProofId}; + pub mod beacon_block { pub use crate::block::{BlindedBeaconBlock, BlockImportSource}; } diff --git a/dummy_el/Cargo.toml b/dummy_el/Cargo.toml new file mode 100644 index 00000000000..788a52a3fb7 --- /dev/null +++ b/dummy_el/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "dummy_el" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "dummy_el" +path = "src/main.rs" + +[dependencies] +anyhow = { workspace = true } +axum = { workspace = true } +clap = { workspace = true } +hex = { workspace = true } +jsonwebtoken = "9" +serde = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } diff --git a/dummy_el/Dockerfile b/dummy_el/Dockerfile new file mode 100644 index 00000000000..1ece25c7225 --- /dev/null +++ b/dummy_el/Dockerfile @@ -0,0 +1,32 @@ +# Multi-stage build for dummy_el +FROM rust:1.88.0-bullseye AS builder + +WORKDIR /build + +# Copy the entire workspace (needed for workspace structure) +COPY . . + +# Build only dummy_el in release mode +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/build/target \ + cargo build --release -p dummy_el && \ + cp target/release/dummy_el /dummy_el + +# Runtime stage with minimal Ubuntu image +FROM ubuntu:22.04 + +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Copy the binary from builder +COPY --from=builder /dummy_el /usr/local/bin/dummy_el + +# Create a fake 'geth' binary that runs dummy_el instead +# Kurtosis will call "geth init ..." and "geth --..." but we'll run dummy_el +COPY --from=builder /build/dummy_el/geth-wrapper.sh /usr/local/bin/geth +RUN chmod +x /usr/local/bin/geth + +# Expose default Engine API port +EXPOSE 8551 diff --git a/dummy_el/README.md b/dummy_el/README.md new file mode 100644 index 00000000000..0c3361a4a94 --- /dev/null +++ b/dummy_el/README.md @@ -0,0 +1,24 @@ +# Using Dummy EL + +This is a dummy EL that can be used with proof verification nodes. These nodes do not require an EL to function since they just take in proofs. + +## Quick Start + +### 1. Build the Docker Image + +From the lighthouse repository root: + +```bash +docker build -f dummy_el/Dockerfile -t dummy_el:local . +``` + +### 2. Adding to Kurtosis + +In Kurtosis, you can add the following: + +```yaml + - el_type: geth + el_image: dummy_el:local +``` + +Note that we need to use el_type `geth` as kurtosis will be looking for a binary named geth. We wrap calls to the Geth binary so that they are processed by our dummy_el. \ No newline at end of file diff --git a/dummy_el/geth-wrapper.sh b/dummy_el/geth-wrapper.sh new file mode 100644 index 00000000000..5705888b49c --- /dev/null +++ b/dummy_el/geth-wrapper.sh @@ -0,0 +1,107 @@ +#!/bin/sh +set -e + +# This is a wrapper that pretends to be geth but actually runs dummy_el +# Kurtosis may call various geth commands - we handle them all appropriately + +echo "[dummy_el geth-wrapper] Called with: $@" + +# Check if this is the "geth init" command - ignore it +if echo "$@" | grep -q "init"; then + echo "[dummy_el geth-wrapper] Ignoring 'geth init' command" + exit 0 +fi + +# Check for version/help commands +if echo "$@" | grep -qE "^(version|--version|-v|help|--help|-h)$"; then + echo "Dummy-EL/v0.1.0 (geth-compatible wrapper)" + exit 0 +fi + +# Filter out flags that we don't need for dummy_el +# These are geth-specific flags that kurtosis may pass +FILTERED_ARGS="" +for arg in "$@"; do + case "$arg" in + --override.*|--override*|-override.*|-override*) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --datadir=*|--datadir) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --syncmode=*|--syncmode) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --gcmode=*|--gcmode) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + --networkid=*|--networkid) + echo "[dummy_el geth-wrapper] Ignoring geth flag: $arg" + ;; + *) + FILTERED_ARGS="$FILTERED_ARGS $arg" + ;; + esac +done + +# For any other command, we start dummy_el +# Parse geth arguments to extract what we need + +JWT_PATH="" +ENGINE_PORT="8551" +RPC_PORT="8545" +WS_PORT="8546" +METRICS_PORT="9001" +P2P_PORT="30303" +HOST="0.0.0.0" + +# Parse arguments to find JWT secret and ports +for arg in "$@"; do + case "$arg" in + --authrpc.jwtsecret=*) + JWT_PATH="${arg#*=}" + ;; + --authrpc.port=*) + ENGINE_PORT="${arg#*=}" + ;; + --http.port=*) + RPC_PORT="${arg#*=}" + ;; + --ws.port=*) + WS_PORT="${arg#*=}" + ;; + --metrics.port=*) + METRICS_PORT="${arg#*=}" + ;; + --port=*) + P2P_PORT="${arg#*=}" + ;; + --discovery.port=*) + # Use discovery port for P2P if specified + P2P_PORT="${arg#*=}" + ;; + esac +done + +# Fallback to default JWT location if not parsed +if [ -z "$JWT_PATH" ] && [ -f "/jwt/jwtsecret" ]; then + JWT_PATH="/jwt/jwtsecret" +fi + +echo "[dummy_el geth-wrapper] Starting dummy_el instead of geth" +echo "[dummy_el geth-wrapper] Engine port: $ENGINE_PORT, RPC port: $RPC_PORT, WS port: $WS_PORT" +echo "[dummy_el geth-wrapper] Metrics port: $METRICS_PORT, P2P port: $P2P_PORT" + +# Build dummy_el command +DUMMY_EL_CMD="/usr/local/bin/dummy_el --host $HOST --port $ENGINE_PORT --rpc-port $RPC_PORT --ws-port $WS_PORT --metrics-port $METRICS_PORT --p2p-port $P2P_PORT" + +# Add JWT if available +if [ -n "$JWT_PATH" ] && [ -f "$JWT_PATH" ]; then + echo "[dummy_el geth-wrapper] Using JWT from $JWT_PATH" + DUMMY_EL_CMD="$DUMMY_EL_CMD --jwt-secret $JWT_PATH" +else + echo "[dummy_el geth-wrapper] WARNING: No JWT file found" +fi + +echo "[dummy_el geth-wrapper] Executing: $DUMMY_EL_CMD" +exec $DUMMY_EL_CMD diff --git a/dummy_el/src/main.rs b/dummy_el/src/main.rs new file mode 100644 index 00000000000..b1c22030293 --- /dev/null +++ b/dummy_el/src/main.rs @@ -0,0 +1,461 @@ +use axum::{ + Json, Router, + extract::State, + http::{Request, StatusCode}, + middleware::{self, Next}, + response::Response, + routing::post, +}; +use clap::Parser; +use jsonwebtoken::{Algorithm, DecodingKey, Validation}; +use serde::{Deserialize, Serialize}; +use serde_json::{Value as JsonValue, json}; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::Arc; +use tracing::{debug, error, info, warn}; + +const JSONRPC_VERSION: &str = "2.0"; +const JWT_SECRET_LENGTH: usize = 32; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[arg(long, default_value = "8551", help = "Engine API port")] + port: u16, + + #[arg(long, default_value = "127.0.0.1")] + host: String, + + #[arg(long, help = "Path to JWT secret file (hex encoded)")] + jwt_secret: Option, + + #[arg(long, default_value = "8545", help = "HTTP RPC port")] + rpc_port: u16, + + #[arg(long, default_value = "8546", help = "WebSocket port")] + ws_port: u16, + + #[arg(long, default_value = "9001", help = "Metrics port")] + metrics_port: u16, + + #[arg(long, default_value = "30303", help = "P2P discovery port (TCP/UDP)")] + p2p_port: u16, +} + +#[derive(Debug, Clone)] +struct AppState { + jwt_secret: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JwtClaims { + iat: u64, + #[serde(skip_serializing_if = "Option::is_none")] + id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + clv: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcRequest { + jsonrpc: String, + method: String, + params: JsonValue, + id: JsonValue, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcResponse { + jsonrpc: String, + #[serde(skip_serializing_if = "Option::is_none")] + result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, + id: JsonValue, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcError { + code: i64, + message: String, +} + +async fn auth_middleware( + State(state): State>, + request: Request, + next: Next, +) -> Result { + // If no JWT secret is configured, skip auth + if state.jwt_secret.is_none() { + return Ok(next.run(request).await); + } + + let jwt_secret = state.jwt_secret.as_ref().unwrap(); + + // Check for Authorization header + let auth_header = request + .headers() + .get("Authorization") + .and_then(|h| h.to_str().ok()); + + match auth_header { + Some(auth) if auth.starts_with("Bearer ") => { + let token = &auth[7..]; // Skip "Bearer " + + // Validate JWT token + let mut validation = Validation::new(Algorithm::HS256); + validation.validate_exp = false; + validation.required_spec_claims.remove("exp"); + + match jsonwebtoken::decode::( + token, + &DecodingKey::from_secret(jwt_secret), + &validation, + ) { + Ok(_) => { + debug!("JWT authentication successful"); + Ok(next.run(request).await) + } + Err(e) => { + warn!("JWT validation failed: {:?}", e); + Err((StatusCode::UNAUTHORIZED, "Invalid JWT token".to_string())) + } + } + } + Some(_) => { + warn!("Authorization header present but not in Bearer format"); + Err(( + StatusCode::UNAUTHORIZED, + "Authorization header must be in format: Bearer ".to_string(), + )) + } + None => { + warn!("Missing Authorization header"); + Err(( + StatusCode::UNAUTHORIZED, + "Missing Authorization header".to_string(), + )) + } + } +} + +async fn handle_rpc( + State(_state): State>, + Json(request): Json, +) -> (StatusCode, Json) { + info!( + method = %request.method, + params = ?request.params, + "Received RPC request" + ); + + let result = match request.method.as_str() { + "eth_syncing" => { + debug!("eth_syncing: returning false (not syncing)"); + Ok(json!(false)) + } + "eth_getBlockByNumber" => { + debug!("eth_getBlockByNumber: returning null"); + Ok(json!(null)) + } + "eth_getBlockByHash" => { + debug!("eth_getBlockByHash: returning null"); + Ok(json!(null)) + } + "engine_newPayloadV1" + | "engine_newPayloadV2" + | "engine_newPayloadV3" + | "engine_newPayloadV4" => { + debug!("{}: returning SYNCING status", request.method); + Ok(json!({ + "status": "SYNCING", + "latestValidHash": null, + "validationError": null + })) + } + "engine_forkchoiceUpdatedV1" + | "engine_forkchoiceUpdatedV2" + | "engine_forkchoiceUpdatedV3" => { + debug!("{}: returning SYNCING status", request.method); + Ok(json!({ + "payloadStatus": { + "status": "SYNCING", + "latestValidHash": null, + "validationError": null + }, + "payloadId": null + })) + } + "engine_getPayloadV1" + | "engine_getPayloadV2" + | "engine_getPayloadV3" + | "engine_getPayloadV4" + | "engine_getPayloadV5" => { + debug!( + "{}: returning error (payload not available)", + request.method + ); + Err(JsonRpcError { + code: -38001, + message: "Unknown payload".to_string(), + }) + } + "engine_getPayloadBodiesByHashV1" => { + debug!("engine_getPayloadBodiesByHashV1: returning empty array"); + Ok(json!([])) + } + "engine_getPayloadBodiesByRangeV1" => { + debug!("engine_getPayloadBodiesByRangeV1: returning empty array"); + Ok(json!([])) + } + "engine_exchangeCapabilities" => { + let capabilities = vec![ + "engine_newPayloadV1", + "engine_newPayloadV2", + "engine_newPayloadV3", + "engine_newPayloadV4", + "engine_getPayloadV1", + "engine_getPayloadV2", + "engine_getPayloadV3", + "engine_getPayloadV4", + "engine_getPayloadV5", + "engine_forkchoiceUpdatedV1", + "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", + "engine_getPayloadBodiesByHashV1", + "engine_getPayloadBodiesByRangeV1", + "engine_getClientVersionV1", + "engine_getBlobsV1", + "engine_getBlobsV2", + ]; + debug!( + "engine_exchangeCapabilities: returning {} capabilities", + capabilities.len() + ); + Ok(json!(capabilities)) + } + "engine_getClientVersionV1" => { + debug!("engine_getClientVersionV1: returning client info"); + Ok(json!([{ + "code": "DM", + "name": "Dummy-EL", + "version": "v0.1.0", + "commit": "00000000" + }])) + } + "engine_getBlobsV1" | "engine_getBlobsV2" => { + debug!("{}: returning empty array", request.method); + Ok(json!([])) + } + _ => { + info!(method = %request.method, "Method not found"); + Err(JsonRpcError { + code: -32601, + message: format!("Method not found: {}", request.method), + }) + } + }; + + let response = match result { + Ok(result) => JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: Some(result), + error: None, + id: request.id, + }, + Err(error) => JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: None, + error: Some(error), + id: request.id, + }, + }; + + info!(method = %request.method, success = response.error.is_none(), "RPC response sent"); + (StatusCode::OK, Json(response)) +} + +// Simple RPC handler without JWT auth for non-Engine API ports +async fn handle_simple_rpc( + Json(request): Json, +) -> (StatusCode, Json) { + debug!(method = %request.method, "Received simple RPC request"); + + let result: Result = match request.method.as_str() { + "admin_nodeInfo" => Ok(json!({ + "id": "0ecd4a2c5f7c2a304e3acbec67efea275510d31c304fe47f4e626a2ebd5fb101", + "name": "Dummy-EL/v0.1.0", + "enode": "enode://dummy@127.0.0.1:30303", + "enr": "enr:-Iq4QDummy0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "ip": "127.0.0.1", + "ports": { + "discovery": 30303, + "listener": 30303 + } + })), + _ => { + // For any other method, just return a success response + Ok(json!(null)) + } + }; + + let response = JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: Some(result.unwrap_or(json!(null))), + error: None, + id: request.id, + }; + + (StatusCode::OK, Json(response)) +} + +fn strip_prefix(s: &str) -> &str { + s.strip_prefix("0x").unwrap_or(s) +} + +fn read_jwt_secret(path: &PathBuf) -> anyhow::Result> { + let contents = std::fs::read_to_string(path)?; + let hex_str = strip_prefix(contents.trim()); + let bytes = hex::decode(hex_str)?; + + if bytes.len() != JWT_SECRET_LENGTH { + anyhow::bail!( + "Invalid JWT secret length. Expected {} bytes, got {}", + JWT_SECRET_LENGTH, + bytes.len() + ); + } + + Ok(bytes) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let args = Args::parse(); + + // Read JWT secret if provided + let jwt_secret = match &args.jwt_secret { + Some(path) => match read_jwt_secret(path) { + Ok(secret) => { + info!("JWT secret loaded from {:?}", path); + Some(secret) + } + Err(e) => { + error!("Failed to read JWT secret from {:?}: {}", path, e); + return Err(e); + } + }, + None => { + warn!("No JWT secret provided - authentication disabled!"); + warn!("This is insecure and should only be used for testing"); + None + } + }; + + info!( + host = %args.host, + engine_port = args.port, + rpc_port = args.rpc_port, + ws_port = args.ws_port, + metrics_port = args.metrics_port, + p2p_port = args.p2p_port, + jwt_auth = jwt_secret.is_some(), + "Starting Dummy Execution Layer" + ); + + let state = Arc::new(AppState { jwt_secret }); + + // Engine API server (port 8551) with JWT auth + let engine_app = Router::new() + .route("/", post(handle_rpc)) + .layer(middleware::from_fn_with_state( + state.clone(), + auth_middleware, + )) + .with_state(state.clone()); + + let engine_addr = format!("{}:{}", args.host, args.port) + .parse::() + .expect("Invalid engine address"); + + info!("Engine API listening on http://{}", engine_addr); + + // Simple RPC server for HTTP RPC (port 8545) - no JWT auth + let rpc_app = Router::new().route("/", post(handle_simple_rpc)); + let rpc_addr = format!("{}:{}", args.host, args.rpc_port) + .parse::() + .expect("Invalid RPC address"); + info!("HTTP RPC listening on http://{}", rpc_addr); + + // Simple RPC server for WebSocket (port 8546) - no JWT auth + let ws_app = Router::new().route("/", post(handle_simple_rpc)); + let ws_addr = format!("{}:{}", args.host, args.ws_port) + .parse::() + .expect("Invalid WebSocket address"); + info!("WebSocket RPC listening on http://{}", ws_addr); + + // Simple server for metrics (port 9001) + let metrics_app = Router::new().route("/", post(handle_simple_rpc)); + let metrics_addr = format!("{}:{}", args.host, args.metrics_port) + .parse::() + .expect("Invalid metrics address"); + info!("Metrics listening on http://{}", metrics_addr); + + // Bind P2P discovery ports (TCP and UDP) - just to satisfy Kurtosis port checks + let p2p_tcp_addr = format!("{}:{}", args.host, args.p2p_port) + .parse::() + .expect("Invalid P2P TCP address"); + let p2p_udp_addr = format!("{}:{}", args.host, args.p2p_port) + .parse::() + .expect("Invalid P2P UDP address"); + + // Spawn P2P TCP listener in a task to keep it alive + let p2p_tcp_listener = tokio::net::TcpListener::bind(p2p_tcp_addr).await?; + info!("P2P TCP listening on {}", p2p_tcp_addr); + let p2p_tcp_task = tokio::spawn(async move { + loop { + // Accept connections but do nothing with them + if let Ok((_socket, _addr)) = p2p_tcp_listener.accept().await { + // Connection accepted, just drop it + } + } + }); + + // Spawn P2P UDP listener in a task to keep it alive + let p2p_udp_socket = tokio::net::UdpSocket::bind(p2p_udp_addr).await?; + info!("P2P UDP listening on {}", p2p_udp_addr); + let p2p_udp_task = tokio::spawn(async move { + let mut buf = [0u8; 1024]; + loop { + // Receive packets but do nothing with them + let _ = p2p_udp_socket.recv(&mut buf).await; + } + }); + + info!("Ready to accept requests on all ports"); + + // Spawn all servers concurrently + let engine_listener = tokio::net::TcpListener::bind(engine_addr).await?; + let rpc_listener = tokio::net::TcpListener::bind(rpc_addr).await?; + let ws_listener = tokio::net::TcpListener::bind(ws_addr).await?; + let metrics_listener = tokio::net::TcpListener::bind(metrics_addr).await?; + + tokio::select! { + result = axum::serve(engine_listener, engine_app) => result?, + result = axum::serve(rpc_listener, rpc_app) => result?, + result = axum::serve(ws_listener, ws_app) => result?, + result = axum::serve(metrics_listener, metrics_app) => result?, + _ = p2p_tcp_task => {}, + _ = p2p_udp_task => {}, + } + + Ok(()) +} diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index a048674e630..79ff7df07b6 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -21,11 +21,7 @@ network_params: seconds_per_slot: 6 snooper_enabled: false global_log_level: debug -additional_services: - - dora - - spamoor - - prometheus_grafana - - tempo +additional_services: [] spamoor_params: image: ethpandaops/spamoor:master spammers: @@ -34,4 +30,4 @@ spamoor_params: throughput: 200 - scenario: blobs config: - throughput: 20 \ No newline at end of file + throughput: 20 diff --git a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml new file mode 100644 index 00000000000..b7d7e7f62ea --- /dev/null +++ b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml @@ -0,0 +1,36 @@ +# Mixed configuration: 3 normal nodes with reth, 3 zkvm nodes with dummy_el +participants: + # Normal nodes with real EL (nodes 1-3) + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=5 + count: 3 + # ZKVM nodes with dummy EL (nodes 4-6) + # Uses dummy_el wrapped as geth - returns SYNCING for all engine calls + - el_type: geth + el_image: dummy_el:local + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --target-peers=5 + count: 3 +network_params: + electra_fork_epoch: 0 + fulu_fork_epoch: 1 + seconds_per_slot: 2 +global_log_level: debug +snooper_enabled: false +additional_services: + - dora + - prometheus_grafana +port_publisher: + el: + enabled: true + public_port_start: 32000 + cl: + enabled: true + public_port_start: 33000 \ No newline at end of file diff --git a/scripts/local_testnet/network_params_simple.yaml b/scripts/local_testnet/network_params_simple.yaml new file mode 100644 index 00000000000..3f2ca40f371 --- /dev/null +++ b/scripts/local_testnet/network_params_simple.yaml @@ -0,0 +1,19 @@ +# Simple testnet config for testing EL listener with 2 nodes using Reth +participants: + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: reth + count: 2 +network_params: + electra_fork_epoch: 0 + seconds_per_slot: 6 +snooper_enabled: false +global_log_level: info +additional_services: [] +port_publisher: + el: + enabled: true + public_port_start: 32000 + cl: + enabled: true + public_port_start: 33000 \ No newline at end of file diff --git a/scripts/local_testnet/start_dummy_prover.sh b/scripts/local_testnet/start_dummy_prover.sh new file mode 100755 index 00000000000..3504cb95ae3 --- /dev/null +++ b/scripts/local_testnet/start_dummy_prover.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ROOT_DIR="$( cd -- "$SCRIPT_DIR/../.." &> /dev/null && pwd )" + +ENCLAVE_NAME="${ENCLAVE_NAME:-local-testnet}" +CL_SERVICE="${CL_SERVICE:-cl-1-lighthouse-geth}" +BEACON_NODE_URL="${BEACON_NODE_URL:-}" +SOURCE_BEACON_NODE_URL="${SOURCE_BEACON_NODE_URL:-}" +PROOFS_PER_BLOCK="${PROOFS_PER_BLOCK:-1}" +PROOF_DELAY_MS="${PROOF_DELAY_MS:-1000}" +BACKFILL_THRESHOLD_SLOTS="${BACKFILL_THRESHOLD_SLOTS:-32}" +BACKFILL_INTERVAL_SECS="${BACKFILL_INTERVAL_SECS:-10}" + +while getopts "e:s:b:S:p:d:t:i:h" flag; do + case "${flag}" in + e) ENCLAVE_NAME=${OPTARG};; + s) CL_SERVICE=${OPTARG};; + b) BEACON_NODE_URL=${OPTARG};; + S) SOURCE_BEACON_NODE_URL=${OPTARG};; + p) PROOFS_PER_BLOCK=${OPTARG};; + d) PROOF_DELAY_MS=${OPTARG};; + t) BACKFILL_THRESHOLD_SLOTS=${OPTARG};; + i) BACKFILL_INTERVAL_SECS=${OPTARG};; + h) + echo "Start the dummy prover against a local testnet." + echo "Note: Run this after the testnet is up so the beacon node endpoint exists." + echo + echo "Usage: $0 [options]" + echo + echo "Options:" + echo " -e ENCLAVE_NAME Kurtosis enclave name (default: $ENCLAVE_NAME)" + echo " -s CL_SERVICE Kurtosis CL service name (default: $CL_SERVICE)" + echo " -b BEACON_NODE_URL Target beacon node URL (default: from kurtosis)" + echo " -S SOURCE_BEACON_NODE_URL Source beacon node URL (default: target URL)" + echo " -p PROOFS_PER_BLOCK Proof IDs to submit per block (default: $PROOFS_PER_BLOCK)" + echo " -d PROOF_DELAY_MS Proof generation delay in ms (default: $PROOF_DELAY_MS)" + echo " -t BACKFILL_THRESHOLD Backfill threshold in slots (default: $BACKFILL_THRESHOLD_SLOTS)" + echo " -i BACKFILL_INTERVAL Backfill interval in seconds (default: $BACKFILL_INTERVAL_SECS)" + echo " -h Show this help" + echo + echo "Example:" + echo " $0 -e local-testnet -s cl-1-lighthouse-geth -p 2 -d 1000 -t 64 -i 5" + exit + ;; + esac +done + +if [ -z "$BEACON_NODE_URL" ]; then + if command -v kurtosis &> /dev/null; then + if BEACON_NODE_URL=$(kurtosis port print "$ENCLAVE_NAME" "$CL_SERVICE" http 2>/dev/null); then + echo "Using beacon node from kurtosis: $BEACON_NODE_URL" + else + echo "Failed to detect beacon node URL via kurtosis. Set -b or BEACON_NODE_URL." >&2 + exit 1 + fi + else + BEACON_NODE_URL="http://localhost:5052" + echo "kurtosis not found, defaulting to $BEACON_NODE_URL" + fi +fi + +if [ -z "$SOURCE_BEACON_NODE_URL" ]; then + SOURCE_BEACON_NODE_URL="$BEACON_NODE_URL" +fi + +echo "Starting dummy prover..." +echo " target: $BEACON_NODE_URL" +echo " source: $SOURCE_BEACON_NODE_URL" +echo " proofs: $PROOFS_PER_BLOCK" +echo " delay: ${PROOF_DELAY_MS}ms" +echo " backfill threshold: ${BACKFILL_THRESHOLD_SLOTS} slots" +echo " backfill interval: ${BACKFILL_INTERVAL_SECS}s" + +exec cargo run --manifest-path "$ROOT_DIR/Cargo.toml" -p zkvm_execution_layer --bin dummy-prover -- \ + --beacon-node "$BEACON_NODE_URL" \ + --source-beacon-node "$SOURCE_BEACON_NODE_URL" \ + --proofs-per-block "$PROOFS_PER_BLOCK" \ + --proof-delay-ms "$PROOF_DELAY_MS" \ + --backfill-threshold-slots "$BACKFILL_THRESHOLD_SLOTS" \ + --backfill-interval-secs "$BACKFILL_INTERVAL_SECS" diff --git a/zkvm_execution_layer/Cargo.toml b/zkvm_execution_layer/Cargo.toml new file mode 100644 index 00000000000..202a3f6933e --- /dev/null +++ b/zkvm_execution_layer/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "zkvm_execution_layer" +version = "0.1.0" +edition = "2021" + +[dependencies] +async-trait = "0.1" #TODO(zkproofs): Remove +clap = { workspace = true } +eth2 = { workspace = true, features = ["events"] } +execution_layer = { path = "../beacon_node/execution_layer" } +futures = { workspace = true } +hashbrown = "0.15" +lru = "0.12" +sensitive_url = { workspace = true } +serde = { version = "1.0", features = ["derive"] } +thiserror = "2" +tokio = { version = "1", features = ["full"] } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +types = { path = "../consensus/types" } + +[dev-dependencies] +fixed_bytes = { workspace = true } diff --git a/zkvm_execution_layer/src/bin/dummy-prover.rs b/zkvm_execution_layer/src/bin/dummy-prover.rs new file mode 100644 index 00000000000..32e0e0124f3 --- /dev/null +++ b/zkvm_execution_layer/src/bin/dummy-prover.rs @@ -0,0 +1,287 @@ +use clap::Parser; +use eth2::types::{BlockId, EventKind, EventTopic}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use futures::StreamExt; +use sensitive_url::SensitiveUrl; +use std::time::Duration; +use tokio::time::{interval, MissedTickBehavior}; +use tracing::{debug, info, warn}; +use types::{ExecPayload, ExecutionBlockHash, ExecutionProofId, Hash256, MainnetEthSpec, Slot}; +use zkvm_execution_layer::dummy_proof_gen::DummyProofGenerator; +use zkvm_execution_layer::proof_generation::ProofGenerator; + +const DEFAULT_TIMEOUT_SECS: u64 = 12; + +/// Generate and submit dummy execution proofs to a beacon node. +#[derive(Parser, Debug)] +#[command(name = "dummy-prover")] +struct Cli { + /// Beacon node HTTP endpoint to submit proofs to. + #[arg(long, default_value = "http://localhost:5052")] + beacon_node: String, + + /// Beacon node HTTP endpoint to source blocks from (defaults to --beacon-node). + #[arg(long)] + source_beacon_node: Option, + + /// Number of proof IDs to submit per block (max 8). + #[arg(long, default_value_t = 1)] + proofs_per_block: usize, + + /// Delay in milliseconds to simulate proof generation time. + #[arg(long, default_value_t = 1000)] + proof_delay_ms: u64, + + /// Start backfill when sync_distance is >= this many slots. + #[arg(long, default_value_t = 32)] + backfill_threshold_slots: u64, + + /// Backfill check interval in seconds. + #[arg(long, default_value_t = 10)] + backfill_interval_secs: u64, +} + +#[derive(Clone, Copy)] +struct BlockProofInputs { + slot: Slot, + block_root: Hash256, + block_hash: ExecutionBlockHash, +} + +struct Prover { + source: BeaconNodeHttpClient, + target: BeaconNodeHttpClient, + proof_ids: Vec, + proof_delay: Duration, + backfill_threshold_slots: u64, +} + +impl Prover { + async fn handle_block_gossip(&self, block_root: Hash256, slot: Slot) { + let Some(inputs) = self + .fetch_block_for_proofs(BlockId::Root(block_root), Some(slot)) + .await + else { + return; + }; + + if inputs.block_root != block_root { + debug!( + expected = ?block_root, + actual = ?inputs.block_root, + "Block root mismatch" + ); + } + + self.submit_dummy_proofs(inputs).await; + } + + async fn backfill_missing_slots(&self) { + if self.backfill_threshold_slots == 0 { + return; + } + + let syncing = match self.target.get_node_syncing().await { + Ok(response) => response.data, + Err(err) => { + warn!(error = ?err, "Failed to query target sync status"); + return; + } + }; + + let sync_distance = syncing.sync_distance.as_u64(); + if sync_distance < self.backfill_threshold_slots { + return; + } + + let slots_to_backfill = sync_distance.min(self.backfill_threshold_slots); + let head_slot = syncing.head_slot; + info!( + ?head_slot, + sync_distance, slots_to_backfill, "Backfilling dummy proofs" + ); + + for offset in 1..=slots_to_backfill { + let slot = head_slot.saturating_add(Slot::new(offset)); + let Some(inputs) = self + .fetch_block_for_proofs(BlockId::Slot(slot), Some(slot)) + .await + else { + continue; + }; + + self.submit_dummy_proofs(inputs).await; + } + } + + async fn submit_dummy_proofs(&self, inputs: BlockProofInputs) { + let BlockProofInputs { + slot, + block_root, + block_hash, + } = inputs; + + for proof_id in &self.proof_ids { + let generator = DummyProofGenerator::with_delay(*proof_id, self.proof_delay); + let proof = match generator.generate(slot, &block_hash, &block_root).await { + Ok(proof) => proof, + Err(err) => { + warn!( + ?block_root, + ?slot, + ?proof_id, + error = ?err, + "Failed to build dummy proof" + ); + continue; + } + }; + + if let Err(err) = self.target.post_beacon_pool_execution_proofs(&proof).await { + debug!( + ?block_root, + ?slot, + ?proof_id, + error = ?err, + "Failed to submit dummy proof" + ); + } else { + debug!(?block_root, ?slot, ?proof_id, "Submitted dummy proof"); + } + } + } + + async fn fetch_block_for_proofs( + &self, + block_id: BlockId, + slot_hint: Option, + ) -> Option { + let block = match self + .source + .get_beacon_blinded_blocks::(block_id) + .await + { + Ok(Some(response)) => response, + Ok(None) => { + debug!(?block_id, ?slot_hint, "Block not found in source node"); + return None; + } + Err(err) => { + warn!(?block_id, ?slot_hint, error = ?err, "Failed to fetch block"); + return None; + } + }; + + let block = block.data(); + let slot = block.slot(); + if let Some(expected_slot) = slot_hint { + if slot != expected_slot { + debug!( + ?block_id, + expected = ?expected_slot, + actual = ?slot, + "Block slot mismatch" + ); + } + } + + let Ok(payload) = block.message().body().execution_payload() else { + debug!(?block_id, ?slot, "Block has no execution payload"); + return None; + }; + + Some(BlockProofInputs { + slot, + block_root: block.canonical_root(), + block_hash: payload.block_hash(), + }) + } +} + +fn build_proof_ids(count: usize) -> Vec { + let mut ids = ExecutionProofId::all(); + ids.truncate(count.min(ids.len())); + ids +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::from_default_env() + .add_directive("dummy_prover=info".parse().unwrap()), + ) + .init(); + + let cli = Cli::parse(); + let target_url = SensitiveUrl::parse(&cli.beacon_node)?; + let source_url = SensitiveUrl::parse( + cli.source_beacon_node + .as_deref() + .unwrap_or(&cli.beacon_node), + )?; + + let timeouts = Timeouts::set_all(Duration::from_secs(DEFAULT_TIMEOUT_SECS)); + let target = BeaconNodeHttpClient::new(target_url, timeouts.clone()); + let source = BeaconNodeHttpClient::new(source_url, timeouts); + + let proof_ids = build_proof_ids(cli.proofs_per_block); + if proof_ids.is_empty() { + warn!("No proof IDs configured, exiting"); + return Ok(()); + } + + let prover = Prover { + source, + target, + proof_ids, + proof_delay: Duration::from_millis(cli.proof_delay_ms), + backfill_threshold_slots: cli.backfill_threshold_slots, + }; + + info!( + target = %cli.beacon_node, + source = %prover.source.server(), + proofs_per_block = prover.proof_ids.len(), + proof_delay_ms = cli.proof_delay_ms, + backfill_threshold_slots = prover.backfill_threshold_slots, + "Starting dummy prover" + ); + + let mut events = prover + .source + .get_events::(&[EventTopic::BlockGossip]) + .await + .map_err(|e| format!("Failed to subscribe to events: {:?}", e))?; + let mut backfill_interval = interval(Duration::from_secs(cli.backfill_interval_secs)); + backfill_interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Shutdown requested"); + break; + } + _ = backfill_interval.tick() => { + prover.backfill_missing_slots().await; + } + maybe_event = events.next() => { + let Some(event) = maybe_event else { + warn!("Event stream ended"); + break; + }; + match event { + Ok(EventKind::BlockGossip(gossip)) => { + prover.handle_block_gossip(gossip.block, gossip.slot).await; + } + Ok(_) => {} + Err(err) => { + warn!(error = ?err, "Event stream error"); + } + } + } + } + } + + Ok(()) +} diff --git a/zkvm_execution_layer/src/config.rs b/zkvm_execution_layer/src/config.rs new file mode 100644 index 00000000000..7ed6f6125c1 --- /dev/null +++ b/zkvm_execution_layer/src/config.rs @@ -0,0 +1,176 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; +use types::{execution_proof::DEFAULT_MIN_PROOFS_REQUIRED, ExecutionProofId}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ZKVMExecutionLayerConfig { + /// Minimum number of proofs required from _different_ proof types (proof_ids) + /// in order for the node to mark an execution payload as VALID. + /// + /// Note: All nodes receive ALL proof types via the single execution_proof gossip topic. + pub min_proofs_required: usize, + + /// Which proof types to generate (empty if not generating proofs) + /// The proof ID identifies the zkVM+EL combination (e.g., 0=SP1+Reth, 1=Risc0+Geth) + pub generation_proof_types: HashSet, + + /// Proof cache size (number of execution block hashes to cache proofs for) + /// TODO(zkproofs): remove since we use da_checker for proof caches + pub proof_cache_size: usize, +} + +impl Default for ZKVMExecutionLayerConfig { + fn default() -> Self { + Self { + min_proofs_required: DEFAULT_MIN_PROOFS_REQUIRED, + generation_proof_types: HashSet::new(), + // TODO(zkproofs): This is somewhat arbitrary. The number was computed + // by NUMBER_OF_BLOCKS_BEFORE_FINALIZATION * NUM_PROOFS_PER_BLOCK = 64 * 8 + // We can change it to be more rigorous/scientific + proof_cache_size: 64 * 8, + } + } +} + +impl ZKVMExecutionLayerConfig { + pub fn validate(&self) -> Result<(), String> { + if self.min_proofs_required == 0 { + return Err("min_proofs_required must be at least 1".to_string()); + } + + if self.proof_cache_size == 0 { + return Err("proof_cache_size must be at least 1".to_string()); + } + + // Note: We do NOT validate that generation_proof_types.len() >= min_proofs_required + // because proof-generating nodes validate via their execution layer, not via proofs. + // Only lightweight verifier nodes (without EL) need to wait for min_proofs_required. + + Ok(()) + } + + /// Create a builder for the config + /// TODO(zkproofs): I think we can remove this + pub fn builder() -> ZKVMExecutionLayerConfigBuilder { + ZKVMExecutionLayerConfigBuilder::default() + } +} + +#[derive(Default)] +pub struct ZKVMExecutionLayerConfigBuilder { + min_proofs_required: Option, + generation_proof_types: HashSet, + proof_cache_size: Option, +} + +impl ZKVMExecutionLayerConfigBuilder { + pub fn min_proofs_required(mut self, min: usize) -> Self { + self.min_proofs_required = Some(min); + self + } + + pub fn generation_proof_types(mut self, proof_types: HashSet) -> Self { + self.generation_proof_types = proof_types; + self + } + + pub fn add_generation_proof_type(mut self, proof_type: ExecutionProofId) -> Self { + self.generation_proof_types.insert(proof_type); + self + } + + pub fn proof_cache_size(mut self, size: usize) -> Self { + self.proof_cache_size = Some(size); + self + } + + /// Build the configuration + pub fn build(self) -> Result { + let config = ZKVMExecutionLayerConfig { + min_proofs_required: self + .min_proofs_required + .unwrap_or(DEFAULT_MIN_PROOFS_REQUIRED), + generation_proof_types: self.generation_proof_types, + proof_cache_size: self.proof_cache_size.unwrap_or(1024), + }; + + config.validate()?; + Ok(config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_valid_config() { + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + let proof_type_1 = ExecutionProofId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_generation_proof_type(proof_type_0) + .add_generation_proof_type(proof_type_1) + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + } + + #[test] + fn test_valid_config_with_generation() { + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + let proof_type_1 = ExecutionProofId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_generation_proof_type(proof_type_0) + .add_generation_proof_type(proof_type_1) + .min_proofs_required(1) + .proof_cache_size(512) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.generation_proof_types.len(), 2); + assert_eq!(config.min_proofs_required, 1); + assert_eq!(config.proof_cache_size, 512); + } + + #[test] + fn test_min_proofs_required_zero() { + let config = ZKVMExecutionLayerConfig::builder() + .min_proofs_required(0) // Invalid: must be > 0 + .build(); + + assert!(config.is_err()); + } + + #[test] + fn test_no_generation_proof_types() { + // Node can receive and verify proofs without generating any + let config = ZKVMExecutionLayerConfig::builder() + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert!(config.generation_proof_types.is_empty()); + } + + #[test] + fn test_generation_proof_types_less_than_min() { + // Proof-generating nodes validate via EL, not proofs + // They can generate any number of proof types regardless of min_proofs_required + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_generation_proof_type(proof_type_0) + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.generation_proof_types.len(), 1); + assert_eq!(config.min_proofs_required, 2); + } +} diff --git a/zkvm_execution_layer/src/dummy_proof_gen.rs b/zkvm_execution_layer/src/dummy_proof_gen.rs new file mode 100644 index 00000000000..596dd90f99d --- /dev/null +++ b/zkvm_execution_layer/src/dummy_proof_gen.rs @@ -0,0 +1,127 @@ +use crate::proof_generation::{ProofGenerationError, ProofGenerationResult, ProofGenerator}; +use async_trait::async_trait; +use std::time::Duration; +use tokio::time::sleep; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// Dummy proof generator for testing +/// +/// This generator simulates the proof generation process with a configurable delay +/// and creates dummy proofs. +pub struct DummyProofGenerator { + proof_id: ExecutionProofId, + generation_delay: Duration, +} + +impl DummyProofGenerator { + /// Create a new dummy generator for the specified proof ID + pub fn new(proof_id: ExecutionProofId) -> Self { + Self { + proof_id, + generation_delay: Duration::from_millis(50), + } + } + + /// Create a new dummy generator with custom generation delay + pub fn with_delay(proof_id: ExecutionProofId, delay: Duration) -> Self { + Self { + proof_id, + generation_delay: delay, + } + } +} + +#[async_trait] +impl ProofGenerator for DummyProofGenerator { + async fn generate( + &self, + slot: Slot, + payload_hash: &ExecutionBlockHash, + block_root: &Hash256, + ) -> ProofGenerationResult { + // Simulate proof generation work + if !self.generation_delay.is_zero() { + sleep(self.generation_delay).await; + } + + let proof_data = vec![ + 0xFF, + self.proof_id.as_u8(), + payload_hash.0[0], + payload_hash.0[1], + payload_hash.0[2], + payload_hash.0[3], + ]; + + ExecutionProof::new(self.proof_id, slot, *payload_hash, *block_root, proof_data) + .map_err(ProofGenerationError::ProofGenerationFailed) + } + + fn proof_id(&self) -> ExecutionProofId { + self.proof_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_dummy_generator_success() { + let subnet = ExecutionProofId::new(0).unwrap(); + let generator = DummyProofGenerator::new(subnet); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let block_root = Hash256::repeat_byte(2); + + let result = generator.generate(slot, &block_hash, &block_root).await; + assert!(result.is_ok()); + + let proof = result.unwrap(); + assert_eq!(proof.proof_id, subnet); + assert_eq!(proof.slot, slot); + assert_eq!(proof.block_hash, block_hash); + assert_eq!(proof.block_root, block_root); + assert!(proof.proof_data_size() > 0); + } + + #[tokio::test] + async fn test_dummy_generator_deterministic() { + let subnet = ExecutionProofId::new(1).unwrap(); + let generator = DummyProofGenerator::new(subnet); + let slot = Slot::new(200); + let block_hash = ExecutionBlockHash::repeat_byte(42); + let block_root = Hash256::repeat_byte(99); + + // Generate twice + let proof1 = generator + .generate(slot, &block_hash, &block_root) + .await + .unwrap(); + let proof2 = generator + .generate(slot, &block_hash, &block_root) + .await + .unwrap(); + + // Should be identical + assert_eq!(proof1.proof_data_slice(), proof2.proof_data_slice()); + } + + #[tokio::test] + async fn test_dummy_generator_custom_delay() { + // TODO(zkproofs): Maybe remove, mainly need it as a temp check + let subnet = ExecutionProofId::new(0).unwrap(); + let delay = Duration::from_millis(1); + let generator = DummyProofGenerator::with_delay(subnet, delay); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let block_root = Hash256::repeat_byte(2); + + let start = tokio::time::Instant::now(); + let result = generator.generate(slot, &block_hash, &block_root).await; + let elapsed = start.elapsed(); + + assert!(result.is_ok()); + assert!(elapsed >= delay); + } +} diff --git a/zkvm_execution_layer/src/dummy_proof_verifier.rs b/zkvm_execution_layer/src/dummy_proof_verifier.rs new file mode 100644 index 00000000000..9f322ed7551 --- /dev/null +++ b/zkvm_execution_layer/src/dummy_proof_verifier.rs @@ -0,0 +1,103 @@ +use crate::proof_verification::{ProofVerificationResult, ProofVerifier, VerificationError}; +use std::time::Duration; +use types::{ExecutionProof, ExecutionProofId}; + +/// Dummy proof verifier for testing +/// +/// This verifier simulates the verification process with a configurable delay +/// and always returns successful verification. +pub struct DummyVerifier { + proof_id: ExecutionProofId, + verification_delay: Duration, +} + +impl DummyVerifier { + /// Create a new dummy verifier for the specified proof ID + pub fn new(proof_id: ExecutionProofId) -> Self { + Self { + proof_id, + verification_delay: Duration::from_millis(10), + } + } + + /// Create a new dummy verifier with custom verification delay + pub fn with_delay(proof_id: ExecutionProofId, delay: Duration) -> Self { + Self { + proof_id, + verification_delay: delay, + } + } +} + +impl ProofVerifier for DummyVerifier { + fn verify(&self, proof: &ExecutionProof) -> ProofVerificationResult { + // Check that the proof is for the correct subnet + if proof.proof_id != self.proof_id { + return Err(VerificationError::UnsupportedProofID(proof.proof_id)); + } + + // Simulate verification work + if !self.verification_delay.is_zero() { + std::thread::sleep(self.verification_delay); + } + + // Dummy verifier always succeeds + // In a real implementation, this would cryptographically verify that + // proof.proof_data is a valid zkVM proof for proof.block_hash + Ok(true) + } + + fn proof_id(&self) -> ExecutionProofId { + self.proof_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fixed_bytes::FixedBytesExtended; + use types::ExecutionBlockHash; + + fn create_test_proof( + subnet_id: ExecutionProofId, + block_hash: types::ExecutionBlockHash, + ) -> ExecutionProof { + use types::{Hash256, Slot}; + ExecutionProof::new( + subnet_id, + Slot::new(100), + block_hash, + Hash256::zero(), + vec![1, 2, 3, 4], + ) + .unwrap() + } + + #[tokio::test] + async fn test_dummy_verifier_success() { + let subnet = ExecutionProofId::new(0).unwrap(); + let verifier = DummyVerifier::new(subnet); + let block_hash = ExecutionBlockHash::zero(); + let proof = create_test_proof(subnet, block_hash); + + let result = verifier.verify(&proof); + assert!(result.is_ok()); + assert!(result.unwrap()); + } + + #[tokio::test] + async fn test_dummy_verifier_wrong_subnet() { + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); + let verifier = DummyVerifier::new(subnet_0); + let block_hash = ExecutionBlockHash::zero(); + let proof = create_test_proof(subnet_1, block_hash); + + let result = verifier.verify(&proof); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + VerificationError::UnsupportedProofID(_) + )); + } +} diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs new file mode 100644 index 00000000000..c0f7c4ebde2 --- /dev/null +++ b/zkvm_execution_layer/src/engine_api.rs @@ -0,0 +1,50 @@ +use execution_layer::{BlockProposalContentsType, Error as ExecutionLayerError, PayloadStatus}; +use types::{EthSpec, ExecPayload, ExecutionBlockHash}; + +type PayloadId = [u8; 8]; + +pub struct ZKVMEngineApi { + _phantom: std::marker::PhantomData, +} + +impl Default for ZKVMEngineApi { + fn default() -> Self { + Self::new() + } +} + +impl ZKVMEngineApi { + pub fn new() -> Self { + Self { + _phantom: std::marker::PhantomData, + } + } + + /// Verify a new execution payload using ZK proof + pub async fn new_payload( + &self, + _execution_payload: &impl ExecPayload, + ) -> Result { + // TODO(zkproofs): There are some engine_api checks that should be made, but these should be + // done when we have the proof, check the EL newPayload method to see what these are + Ok(PayloadStatus::Syncing) + } + + /// Update fork choice state + pub async fn forkchoice_updated( + &self, + _head_block_hash: ExecutionBlockHash, + ) -> Result { + // For now, just return Valid status + Ok(PayloadStatus::Valid) + } + + /// Get a payload for block production + pub async fn get_payload( + &self, + _payload_id: PayloadId, + ) -> Result, ExecutionLayerError> { + // TODO(zkproofs): use mev-boost + Err(ExecutionLayerError::CannotProduceHeader) + } +} diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs new file mode 100644 index 00000000000..d8a6ec74af7 --- /dev/null +++ b/zkvm_execution_layer/src/lib.rs @@ -0,0 +1,18 @@ +pub mod config; + +pub mod proof_generation; +pub mod proof_verification; + +pub mod registry_proof_gen; +pub mod registry_proof_verification; + +pub mod dummy_proof_gen; +pub mod dummy_proof_verifier; + +/// Engine API implementation for ZK-VM execution +pub mod engine_api; + +pub use config::ZKVMExecutionLayerConfig; +/// Re-export the main ZK-VM engine API and config +pub use engine_api::ZKVMEngineApi; +pub use registry_proof_gen::GeneratorRegistry; diff --git a/zkvm_execution_layer/src/proof_generation.rs b/zkvm_execution_layer/src/proof_generation.rs new file mode 100644 index 00000000000..9254d5fe560 --- /dev/null +++ b/zkvm_execution_layer/src/proof_generation.rs @@ -0,0 +1,51 @@ +use async_trait::async_trait; +use std::sync::Arc; +use thiserror::Error; +use types::{ExecutionProof, ExecutionProofId}; + +/// Result type for proof generation operations +pub type ProofGenerationResult = Result; + +/// Errors that can occur during proof generation +#[derive(Debug, Error)] +pub enum ProofGenerationError { + #[error("Proof generation failed: {0}")] + ProofGenerationFailed(String), + + #[error("Missing execution witness data: {0}")] + MissingWitnessData(String), + + #[error("Invalid execution witness: {0}")] + InvalidWitness(String), + + #[error("Proof generation timeout")] + Timeout, + + #[error("Insufficient resources: {0}")] + InsufficientResources(String), + + #[error("Internal error: {0}")] + Internal(String), +} + +/// Trait for proof generation (one implementation per zkVM+EL combo) +/// +/// Each proof system (RISC Zero, SP1, etc.) + zkVM combination implements this trait +/// to generate proofs for execution payloads from their subnet. +#[async_trait] +pub trait ProofGenerator: Send + Sync { + /// Generate a proof for the given execution payload + async fn generate( + &self, + slot: types::Slot, + payload_hash: &types::ExecutionBlockHash, + block_root: &types::Hash256, + ) -> ProofGenerationResult; + + /// Get the proof ID this generator produces proofs for + fn proof_id(&self) -> ExecutionProofId; +} + +/// Type-erased proof generator mainly for convenience +/// TODO(zkproofs): Check if we can remove this +pub type DynProofGenerator = Arc; diff --git a/zkvm_execution_layer/src/proof_verification.rs b/zkvm_execution_layer/src/proof_verification.rs new file mode 100644 index 00000000000..164f56bd1ef --- /dev/null +++ b/zkvm_execution_layer/src/proof_verification.rs @@ -0,0 +1,43 @@ +use std::sync::Arc; +use thiserror::Error; +use types::{ExecutionProof, ExecutionProofId}; + +/// Result type for proof verification operations +pub type ProofVerificationResult = Result; + +/// Errors that can occur during proof verification +#[derive(Debug, Error)] +pub enum VerificationError { + #[error("Proof verification failed: {0}")] + VerificationFailed(String), + + #[error("Invalid proof format: {0}")] + InvalidProofFormat(String), + + #[error("Unsupported proof ID: {0}")] + UnsupportedProofID(ExecutionProofId), + + #[error("Proof size mismatch: expected {expected}, got {actual}")] + ProofSizeMismatch { expected: usize, actual: usize }, + + #[error("Internal error: {0}")] + Internal(String), +} + +/// Trait for proof verification (one implementation per zkVM+EL combination) +pub trait ProofVerifier: Send + Sync { + /// Verify that the proof is valid. + /// + /// TODO(zkproofs): we can probably collapse Ok(false) and Err or make Ok(false) an enum variant + /// + /// Returns: + /// - Ok(true) if valid, + /// - Ok(false) if invalid (but well-formed) + /// - Err if the proof is malformed or verification cannot be performed. + fn verify(&self, proof: &ExecutionProof) -> ProofVerificationResult; + + fn proof_id(&self) -> ExecutionProofId; +} + +/// Type-erased proof verifier +pub type DynProofVerifier = Arc; diff --git a/zkvm_execution_layer/src/registry_proof_gen.rs b/zkvm_execution_layer/src/registry_proof_gen.rs new file mode 100644 index 00000000000..01ded0af454 --- /dev/null +++ b/zkvm_execution_layer/src/registry_proof_gen.rs @@ -0,0 +1,132 @@ +use crate::dummy_proof_gen::DummyProofGenerator; +use crate::proof_generation::DynProofGenerator; +use hashbrown::HashMap; +use std::collections::HashSet; +use std::sync::Arc; +use types::ExecutionProofId; + +/// Registry mapping proof IDs to proof generators +/// +/// Each proof ID represents a different zkVM/proof system, and this registry +/// maintains the mapping from proof ID to the appropriate generator implementation. +#[derive(Clone)] +pub struct GeneratorRegistry { + generators: HashMap, +} + +impl GeneratorRegistry { + /// Create a new empty generator registry + pub fn new() -> Self { + Self { + generators: HashMap::new(), + } + } + + /// Create a registry with dummy generators for specified proof IDs + pub fn new_with_dummy_generators(enabled_subnets: HashSet) -> Self { + let mut generators = HashMap::new(); + + for subnet_id in enabled_subnets { + generators.insert( + subnet_id, + Arc::new(DummyProofGenerator::new(subnet_id)) as DynProofGenerator, + ); + } + + Self { generators } + } + + pub fn register_generator(&mut self, generator: DynProofGenerator) { + let proof_id = generator.proof_id(); + self.generators.insert(proof_id, generator); + } + + pub fn get_generator(&self, proof_id: ExecutionProofId) -> Option { + self.generators.get(&proof_id).cloned() + } + + /// Check if a generator is registered for a proof ID + pub fn has_generator(&self, proof_id: ExecutionProofId) -> bool { + self.generators.contains_key(&proof_id) + } + + /// Get the number of registered generators + pub fn len(&self) -> usize { + self.generators.len() + } + + /// Check if the registry is empty + pub fn is_empty(&self) -> bool { + self.generators.is_empty() + } + + pub fn proof_ids(&self) -> Vec { + self.generators.keys().copied().collect() + } +} + +impl Default for GeneratorRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dummy_generators_registry() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(1).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); + assert!(!registry.is_empty()); + assert_eq!(registry.len(), 2); + + assert!(registry.has_generator(ExecutionProofId::new(0).unwrap())); + assert!(registry.has_generator(ExecutionProofId::new(1).unwrap())); + assert!(!registry.has_generator(ExecutionProofId::new(2).unwrap())); + } + + #[test] + fn test_register_generator() { + let mut registry = GeneratorRegistry::new(); + let subnet_id = ExecutionProofId::new(0).unwrap(); + let generator = Arc::new(DummyProofGenerator::new(subnet_id)); + + registry.register_generator(generator); + + assert_eq!(registry.len(), 1); + assert!(registry.has_generator(subnet_id)); + } + + #[test] + fn test_get_generator() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofId::new(3).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); + let subnet_id = ExecutionProofId::new(3).unwrap(); + + let generator = registry.get_generator(subnet_id); + assert!(generator.is_some()); + assert_eq!(generator.unwrap().proof_id(), subnet_id); + } + + #[test] + fn test_subnet_ids() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(5).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets.clone()); + let subnet_ids = registry.proof_ids(); + + assert_eq!(subnet_ids.len(), 2); + for subnet_id in enabled_subnets { + assert!(subnet_ids.contains(&subnet_id)); + } + } +} diff --git a/zkvm_execution_layer/src/registry_proof_verification.rs b/zkvm_execution_layer/src/registry_proof_verification.rs new file mode 100644 index 00000000000..e2f914e1965 --- /dev/null +++ b/zkvm_execution_layer/src/registry_proof_verification.rs @@ -0,0 +1,138 @@ +use crate::dummy_proof_verifier::DummyVerifier; +use crate::proof_verification::DynProofVerifier; +use hashbrown::HashMap; +use std::sync::Arc; +use types::ExecutionProofId; + +/// Registry mapping subnet IDs to proof verifiers +/// +/// Each subnet can have a different zkVM/proof system, and this registry +/// maintains the mapping from subnet ID to the appropriate verifier implementation. +#[derive(Clone)] +pub struct VerifierRegistry { + verifiers: HashMap, +} + +impl VerifierRegistry { + /// Create a new empty verifier registry + pub fn new() -> Self { + Self { + verifiers: HashMap::new(), + } + } + + /// Create a registry with dummy verifiers for all subnets + /// This is useful for Phase 1 testing + pub fn new_with_dummy_verifiers() -> Self { + let mut verifiers = HashMap::new(); + + // Register dummy verifiers for all 8 subnets + for id in 0..types::EXECUTION_PROOF_TYPE_COUNT { + if let Ok(proof_id) = ExecutionProofId::new(id) { + verifiers.insert( + proof_id, + Arc::new(DummyVerifier::new(proof_id)) as DynProofVerifier, + ); + } + } + + Self { verifiers } + } + + /// Register a verifier for a specific subnet + pub fn register_verifier(&mut self, verifier: DynProofVerifier) { + let subnet_id = verifier.proof_id(); + self.verifiers.insert(subnet_id, verifier); + } + + /// Get a verifier for a specific proof ID + pub fn get_verifier(&self, proof_id: ExecutionProofId) -> Option { + self.verifiers.get(&proof_id).cloned() + } + + /// Check if a verifier is registered for a proof ID + pub fn has_verifier(&self, proof_id: ExecutionProofId) -> bool { + self.verifiers.contains_key(&proof_id) + } + + /// Get the number of registered verifiers + pub fn len(&self) -> usize { + self.verifiers.len() + } + + /// Check if the registry is empty + pub fn is_empty(&self) -> bool { + self.verifiers.is_empty() + } + + /// Get all registered subnet IDs + pub fn proof_ids(&self) -> Vec { + self.verifiers.keys().copied().collect() + } +} + +impl Default for VerifierRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_empty_registry() { + let registry = VerifierRegistry::new(); + assert!(registry.is_empty()); + assert_eq!(registry.len(), 0); + } + + #[test] + fn test_dummy_verifiers_registry() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + assert!(!registry.is_empty()); + assert_eq!(registry.len(), 8); // All 8 subnets + + // Check all proof IDs are registered + for id in 0..8 { + let proof_id = ExecutionProofId::new(id).unwrap(); + assert!(registry.has_verifier(proof_id)); + assert!(registry.get_verifier(proof_id).is_some()); + } + } + + #[test] + fn test_register_verifier() { + let mut registry = VerifierRegistry::new(); + let proof_id = ExecutionProofId::new(0).unwrap(); + let verifier = Arc::new(DummyVerifier::new(proof_id)); + + registry.register_verifier(verifier); + + assert_eq!(registry.len(), 1); + assert!(registry.has_verifier(proof_id)); + } + + #[test] + fn test_get_verifier() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + let proof_id = ExecutionProofId::new(3).unwrap(); + + let verifier = registry.get_verifier(proof_id); + assert!(verifier.is_some()); + assert_eq!(verifier.unwrap().proof_id(), proof_id); + } + + #[test] + fn test_proof_ids() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + let proof_ids = registry.proof_ids(); + + assert_eq!(proof_ids.len(), 8); + for id in 0..8 { + let proof_id = ExecutionProofId::new(id).unwrap(); + assert!(proof_ids.contains(&proof_id)); + } + } +}