diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 415f4db0e67..de17cc00e77 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,6 +3,7 @@ name: docker on: push: branches: + - optional-proofs - unstable - stable tags: @@ -13,10 +14,9 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DH_KEY }} - DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} + REGISTRY: ghcr.io jobs: # Extract the VERSION which is either `latest` or `vX.Y.Z`, and the VERSION_SUFFIX @@ -38,6 +38,11 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV + - name: Extract version (if optional-proofs) + if: github.event.ref == 'refs/heads/optional-proofs' + run: | + echo "VERSION=latest" >> $GITHUB_ENV + echo "VERSION_SUFFIX=-optional-proofs" >> $GITHUB_ENV - name: Extract version (if tagged release) if: startsWith(github.event.ref, 'refs/tags') run: | @@ -52,8 +57,7 @@ jobs: runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release"]') || 'ubuntu-22.04' }} strategy: matrix: - binary: [lighthouse, - lcli] + binary: [lighthouse] cpu_arch: [aarch64, x86_64] include: @@ -68,9 +72,12 @@ jobs: - name: Update Rust if: env.SELF_HOSTED_RUNNERS == 'false' run: rustup update stable - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Sets env vars for Lighthouse if: startsWith(matrix.binary, 'lighthouse') @@ -82,11 +89,6 @@ jobs: run: | echo "MAKE_CMD=build-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - - name: Set `make` command for lcli - if: startsWith(matrix.binary, 'lcli') - run: | - echo "MAKE_CMD=build-lcli-${{ matrix.cpu_arch }}" >> $GITHUB_ENV - - name: Cross build binaries run: | cargo install cross @@ -123,28 +125,14 @@ jobs: platforms: linux/${{ env.SHORT_ARCH }} push: true tags: | - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - - - name: Build and push (lcli) - if: startsWith(matrix.binary, 'lcli') - uses: docker/build-push-action@v5 - with: - file: ./lcli/Dockerfile.cross - context: . - platforms: linux/${{ env.SHORT_ARCH }} - push: true - - tags: | - ${{ github.repository_owner}}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} - + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${{ env.VERSION }}-${{ env.SHORT_ARCH }}${{ env.VERSION_SUFFIX }} build-docker-multiarch: name: build-docker-${{ matrix.binary }}-multiarch runs-on: ubuntu-22.04 strategy: matrix: - binary: [lighthouse, - lcli] + binary: [lighthouse] needs: [build-docker-single-arch, extract-version] env: VERSION: ${{ needs.extract-version.outputs.VERSION }} @@ -153,13 +141,16 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Dockerhub login - run: | - echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Create and push multiarch manifests run: | - docker buildx imagetools create -t ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ - ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ - ${{ github.repository_owner}}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; + docker buildx imagetools create -t ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}${VERSION_SUFFIX} \ + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}-arm64${VERSION_SUFFIX} \ + ${{ env.REGISTRY }}/${{ github.repository }}/${{ matrix.binary }}:${VERSION}-amd64${VERSION_SUFFIX}; diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs index 50a257db01b..3c4021fa611 100644 --- a/beacon_node/http_api/src/beacon/pool.rs +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -1,5 +1,7 @@ use crate::task_spawner::{Priority, TaskSpawner}; -use crate::utils::{NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter}; +use crate::utils::{ + NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter, SyncTxFilter, +}; use crate::version::{ ResponseIncludesVersion, V1, V2, add_consensus_version_header, beacon_response, unsupported_version_rejection, @@ -10,10 +12,10 @@ use beacon_chain::execution_proof_verification::{ }; use beacon_chain::observed_data_sidecars::Observe; use beacon_chain::observed_operations::ObservationOutcome; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; use lighthouse_network::PubsubMessage; -use network::NetworkMessage; +use network::{NetworkMessage, SyncMessage}; use operation_pool::ReceivedPreCapella; use slot_clock::SlotClock; use std::collections::HashSet; @@ -533,6 +535,7 @@ pub fn post_beacon_pool_attestations_v2( /// If the proof makes a block available, the block will be imported. pub fn post_beacon_pool_execution_proofs( network_tx_filter: &NetworkTxFilter, + sync_tx_filter: &SyncTxFilter, beacon_pool_path: &BeaconPoolPathFilter, ) -> ResponseFilter { beacon_pool_path @@ -541,12 +544,15 @@ pub fn post_beacon_pool_execution_proofs( .and(warp::path::end()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) + .and(sync_tx_filter.clone()) .then( |_task_spawner: TaskSpawner, chain: Arc>, proof: ExecutionProof, - network_tx: UnboundedSender>| async move { - let result = publish_execution_proof(chain, proof, network_tx).await; + network_tx_filter: UnboundedSender>, + sync_tx_filter: UnboundedSender>| async move { + let result = + publish_execution_proof(chain, proof, network_tx_filter, sync_tx_filter).await; convert_rejection(result.map(|()| warp::reply::json(&()))).await }, ) @@ -558,6 +564,7 @@ async fn publish_execution_proof( chain: Arc>, proof: ExecutionProof, network_tx: UnboundedSender>, + sync_tx: UnboundedSender>, ) -> Result<(), warp::Rejection> { let proof = Arc::new(proof); @@ -614,6 +621,18 @@ async fn publish_execution_proof( ?status, "Execution proof submitted and published" ); + + if let AvailabilityProcessingStatus::Imported(_) = status { + chain.recompute_head_at_current_slot().await; + + // Notify that block was imported via HTTP API + if let Err(e) = sync_tx.send(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }) { + debug!(error = %e, "Could not send message to the sync service") + }; + } } Err(e) => { // Log the error but don't fail the request - the proof was already diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 01c5314b1de..2395a5235c4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -447,6 +447,23 @@ pub fn serve( }) .boxed(); + // Create a `warp` filter that provides access to the sync sender channel. + let sync_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.sync_send()); + let sync_tx_filter = warp::any() + .map(move || sync_tx.clone()) + .and_then(|sync_tx| async move { + match sync_tx { + Some(sync_tx) => Ok(sync_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (sync_tx).".to_string(), + )), + } + }) + .boxed(); + // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = warp::any() @@ -1515,7 +1532,7 @@ pub fn serve( // POST beacon/pool/execution_proofs let post_beacon_pool_execution_proofs = - post_beacon_pool_execution_proofs(&network_tx_filter, &beacon_pool_path); + post_beacon_pool_execution_proofs(&network_tx_filter, &sync_tx_filter, &beacon_pool_path); let beacon_rewards_path = eth_v1 .clone() diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs index f2b859ebe59..4dfba8a8636 100644 --- a/beacon_node/http_api/src/utils.rs +++ b/beacon_node/http_api/src/utils.rs @@ -3,7 +3,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::types::EndpointVersion; use lighthouse_network::PubsubMessage; use lighthouse_network::rpc::methods::MetaData; -use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use network::{NetworkMessage, SyncMessage, ValidatorSubscriptionMessage}; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::mpsc::{Sender, UnboundedSender}; @@ -20,6 +20,8 @@ pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner< pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; pub type NetworkTxFilter = BoxedFilter<(UnboundedSender::EthSpec>>,)>; +pub type SyncTxFilter = + BoxedFilter<(UnboundedSender::EthSpec>>,)>; pub type OptionalConsensusVersionHeaderFilter = BoxedFilter<(Option,)>; pub fn from_meta_data( diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 2a7fedb53e9..d6b4303d04f 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -14,3 +14,4 @@ pub use lighthouse_network::NetworkConfig; pub use service::{ NetworkMessage, NetworkReceivers, NetworkSenders, NetworkService, ValidatorSubscriptionMessage, }; +pub use sync::manager::SyncMessage; diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index f5bf65c9777..6ccfb55ddee 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -88,14 +88,13 @@ impl Router { invalid_block_storage: InvalidBlockStorage, beacon_processor_send: BeaconProcessorSend, fork_context: Arc, + sync_send: mpsc::UnboundedSender>, + sync_recv: mpsc::UnboundedReceiver>, ) -> Result>, String> { trace!("Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // generate the message channel - let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let network_beacon_processor = NetworkBeaconProcessor { beacon_processor_send, duplicate_cache: DuplicateCache::default(), diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0869b442aec..dcb1fd5a507 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,6 +5,7 @@ use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::{SubnetService, SubnetServiceMessage, Subscription}; +use crate::sync::manager::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::BeaconProcessorSend; use futures::channel::mpsc::Sender; @@ -138,11 +139,13 @@ pub enum ValidatorSubscriptionMessage { pub struct NetworkSenders { network_send: mpsc::UnboundedSender>, validator_subscription_send: mpsc::Sender, + sync_send: mpsc::UnboundedSender>, } pub struct NetworkReceivers { pub network_recv: mpsc::UnboundedReceiver>, pub validator_subscription_recv: mpsc::Receiver, + pub sync_recv: mpsc::UnboundedReceiver>, } impl NetworkSenders { @@ -150,13 +153,16 @@ impl NetworkSenders { let (network_send, network_recv) = mpsc::unbounded_channel::>(); let (validator_subscription_send, validator_subscription_recv) = mpsc::channel(VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE); + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); let senders = Self { network_send, validator_subscription_send, + sync_send, }; let receivers = NetworkReceivers { network_recv, validator_subscription_recv, + sync_recv, }; (senders, receivers) } @@ -168,6 +174,10 @@ impl NetworkSenders { pub fn validator_subscription_send(&self) -> mpsc::Sender { self.validator_subscription_send.clone() } + + pub fn sync_send(&self) -> mpsc::UnboundedSender> { + self.sync_send.clone() + } } /// Service that handles communication between internal services and the `lighthouse_network` network service. @@ -320,6 +330,8 @@ impl NetworkService { invalid_block_storage, beacon_processor_send, fork_context.clone(), + network_senders.sync_send(), + network_receivers.sync_recv, )?; // attestation and sync committee subnet service @@ -338,6 +350,7 @@ impl NetworkService { let NetworkReceivers { network_recv, validator_subscription_recv, + sync_recv: _, } = network_receivers; // create the network service and spawn the task