diff --git a/Cargo.lock b/Cargo.lock index 55f5cd08..62584f04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,7 +126,9 @@ checksum = "dd208e8a87fbc2ca1a3822dd1ea03b0a7a4a841e6fa70db2c236dd30ae2e7018" dependencies = [ "alloy-primitives 1.5.2", "alloy-rlp", + "arbitrary", "num_enum", + "proptest", "serde", "strum 0.27.2", ] @@ -478,6 +480,7 @@ dependencies = [ "alloy-hardforks", "alloy-primitives 1.5.2", "auto_impl", + "serde", ] [[package]] @@ -1940,7 +1943,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-node", "reth-optimism-primitives", - "reth-optimism-rpc", + "reth-optimism-rpc 1.9.3", "reth-primitives-traits", "reth-provider", "reth-rpc-layer", @@ -1996,7 +1999,7 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-node", "reth-optimism-primitives", - "reth-optimism-rpc", + "reth-optimism-rpc 1.9.3", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -2094,7 +2097,8 @@ dependencies = [ "base-txpool", "clap", "reth-cli-util", - "reth-optimism-cli", + "reth-optimism-cli 0.2.1", + "reth-optimism-exex", "reth-optimism-node", ] @@ -3921,6 +3925,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "dtoa" version = "1.0.11" @@ -4436,6 +4446,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" + [[package]] name = "fs_extra" version = "1.3.0" @@ -6782,6 +6798,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "serde", "simd-adler32", ] @@ -6797,6 +6814,32 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "mockall" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "modular-bitfield" version = "0.11.2" @@ -7476,14 +7519,14 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-optimism-chainspec", - "reth-optimism-cli", + "reth-optimism-cli 1.9.3", "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", "reth-optimism-primitives", - "reth-optimism-rpc", + "reth-optimism-rpc 1.9.3", "reth-optimism-txpool", "reth-payload-builder", "reth-payload-builder-primitives", @@ -8121,6 +8164,32 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty_assertions" version = "1.4.1" @@ -8971,6 +9040,7 @@ dependencies = [ "alloy-eips", "alloy-primitives 1.5.2", "alloy-rlp", + "arbitrary", "backon", "clap", "comfy-table", @@ -8983,6 +9053,8 @@ dependencies = [ "itertools 0.14.0", "lz4", "metrics", + "proptest", + "proptest-arbitrary-interop", "ratatui", "reqwest", "reth-chainspec", @@ -9003,6 +9075,7 @@ dependencies = [ "reth-era-downloader", "reth-era-utils", "reth-eth-wire", + "reth-ethereum-primitives", "reth-etl", "reth-evm", "reth-exex", @@ -9019,8 +9092,10 @@ dependencies = [ "reth-primitives-traits", "reth-provider", "reth-prune", + "reth-prune-types", "reth-revm", "reth-stages", + "reth-stages-types", "reth-static-file", "reth-static-file-types", "reth-tasks", @@ -9685,6 +9760,7 @@ dependencies = [ "alloy-chains", "alloy-primitives 1.5.2", "alloy-rlp", + "arbitrary", "bytes", "derive_more", "futures", @@ -9716,8 +9792,11 @@ dependencies = [ "alloy-hardforks", "alloy-primitives 1.5.2", "alloy-rlp", + "arbitrary", "bytes", "derive_more", + "proptest", + "proptest-arbitrary-interop", "reth-chainspec", "reth-codecs-derive", "reth-ethereum-primitives", @@ -10554,6 +10633,56 @@ dependencies = [ "thiserror 2.0.17", ] +[[package]] +name = "reth-optimism-cli" +version = "0.2.1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives 1.5.2", + "alloy-rlp", + "clap", + "derive_more", + "eyre", + "futures-util", + "op-alloy-consensus", + "proptest", + "reth-chainspec", + "reth-cli", + "reth-cli-commands", + "reth-cli-runner", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-db-common", + "reth-downloaders", + "reth-execution-types", + "reth-fs-util", + "reth-node-builder", + "reth-node-core", + "reth-node-events", + "reth-node-metrics", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-evm", + "reth-optimism-node", + "reth-optimism-primitives", + "reth-optimism-trie", + "reth-primitives-traits", + "reth-provider", + "reth-prune", + "reth-rpc-server-types", + "reth-stages", + "reth-static-file", + "reth-static-file-types", + "reth-tracing", + "serde", + "tempfile", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "reth-optimism-cli" version = "1.9.3" @@ -10655,6 +10784,37 @@ dependencies = [ "thiserror 2.0.17", ] +[[package]] +name = "reth-optimism-exex" +version = "0.2.1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "base-client-node", + "clap", + "derive_more", + "eyre", + "futures", + "futures-util", + "humantime", + "reth-db", + "reth-execution-types", + "reth-exex", + "reth-node-api", + "reth-node-builder", + "reth-node-types", + "reth-optimism-chainspec", + "reth-optimism-node", + "reth-optimism-rpc 0.2.1", + "reth-optimism-trie", + "reth-provider", + "reth-tasks", + "reth-trie", + "tempfile", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-flashblocks" version = "1.9.3" @@ -10733,7 +10893,7 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-payload-builder", "reth-optimism-primitives", - "reth-optimism-rpc", + "reth-optimism-rpc 1.9.3", "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", @@ -10808,6 +10968,77 @@ dependencies = [ "serde_with", ] +[[package]] +name = "reth-optimism-rpc" +version = "0.2.1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-op-hardforks", + "alloy-primitives 1.5.2", + "alloy-rlp", + "alloy-rpc-client", + "alloy-rpc-types-debug", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-transport", + "alloy-transport-http", + "async-trait", + "derive_more", + "eyre", + "futures", + "jsonrpsee", + "jsonrpsee-core", + "jsonrpsee-types", + "metrics", + "op-alloy-consensus", + "op-alloy-network", + "op-alloy-rpc-jsonrpsee", + "op-alloy-rpc-types", + "op-alloy-rpc-types-engine", + "op-revm", + "reqwest", + "reth-basic-payload-builder", + "reth-chain-state", + "reth-chainspec", + "reth-evm", + "reth-metrics", + "reth-node-api", + "reth-node-builder", + "reth-optimism-chainspec", + "reth-optimism-evm", + "reth-optimism-flashblocks", + "reth-optimism-forks", + "reth-optimism-payload-builder", + "reth-optimism-primitives", + "reth-optimism-trie", + "reth-optimism-txpool", + "reth-payload-util", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-rpc", + "reth-rpc-api", + "reth-rpc-engine-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-storage-api", + "reth-tasks", + "reth-transaction-pool", + "revm", + "serde", + "serde_json", + "strum 0.27.2", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + [[package]] name = "reth-optimism-rpc" version = "1.9.3" @@ -10879,6 +11110,50 @@ dependencies = [ "reth-storage-api", ] +[[package]] +name = "reth-optimism-trie" +version = "0.2.1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives 1.5.2", + "auto_impl", + "bincode", + "bytes", + "derive_more", + "eyre", + "metrics", + "mockall", + "reth-chainspec", + "reth-codecs", + "reth-db", + "reth-db-api", + "reth-db-common", + "reth-ethereum-primitives", + "reth-evm", + "reth-evm-ethereum", + "reth-execution-errors", + "reth-metrics", + "reth-node-api", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-storage-errors", + "reth-tasks", + "reth-testing-utils", + "reth-trie", + "secp256k1 0.30.0", + "serde", + "serial_test", + "strum 0.27.2", + "tempfile", + "test-case", + "thiserror 2.0.17", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-txpool" version = "1.9.3" @@ -12559,6 +12834,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.28" @@ -12609,6 +12893,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + [[package]] name = "sec1" version = "0.7.3" @@ -12935,6 +13225,32 @@ dependencies = [ "serde", ] +[[package]] +name = "serial_test" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" +dependencies = [ + "futures-executor", + "futures-util", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "sha1" version = "0.10.6" @@ -13526,6 +13842,45 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + +[[package]] +name = "test-case" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" +dependencies = [ + "test-case-macros", +] + +[[package]] +name = "test-case-core" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "test-case-macros" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", + "test-case-core", +] + [[package]] name = "testcontainers" version = "0.24.0" diff --git a/Cargo.toml b/Cargo.toml index 3564297e..cba005e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ exclude = [".github/"] [workspace] resolver = "2" -members = ["bin/*", "crates/client/*", "crates/shared/*", "crates/builder/*"] +members = ["bin/*", "crates/client/*", "crates/shared/*", "crates/builder/*", "crates/optimism/*"] default-members = ["bin/node"] [workspace.metadata.cargo-udeps.ignore] @@ -65,6 +65,9 @@ base-metering = { path = "crates/client/metering" } base-txpool = { path = "crates/client/txpool" } base-flashblocks = { path = "crates/client/flashblocks" } +reth-optimism-exex = { path = "crates/optimism/exex" } +reth-optimism-trie = { path = "crates/optimism/trie" } + # reth reth = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } reth-ipc = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } @@ -99,6 +102,16 @@ reth-optimism-primitives = { git = "https://github.com/paradigmxyz/reth", rev = reth-db = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8", features = [ "op", ] } +reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-consensus = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-downloaders = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-fs-util = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-node-events = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-prune = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-stages = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-static-file = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } +reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } reth-chain-state = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } reth-errors = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", rev = "6d952f39fe47411164ec0b3978c43fdc7eefe3a8" } @@ -185,6 +198,8 @@ alloy-network-primitives = "1.2.1" alloy-transport = "1.2.1" alloy-node-bindings = "1.2.1" alloy-rpc-types-beacon = { version = "1.2.1", features = ["ssz"] } +alloy-rpc-types-debug = { version = "1.2.1", default-features = false } + # op-alloy op-alloy-flz = { version = "0.13.1", default-features = false } @@ -264,6 +279,16 @@ opentelemetry = { version = "0.31", features = ["trace"] } jsonrpsee-core = "0.26.0" ethereum_ssz = "0.9.0" ethereum_ssz_derive = "0.9.0" +strum = { version = "0.27", default-features = false } +tower = "0.5" +bincode = "1.3" +mockall = "0.13.1" +serial_test = "3.2.0" +tempfile = "3.20" +test-case = "3" +humantime = "2.1" +proptest = "1.7" + # base concurrent-queue = "2.5.0" diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 43f2ce23..30abcc64 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -22,8 +22,9 @@ base-txpool.workspace = true # reth reth-optimism-node.workspace = true -reth-optimism-cli.workspace = true +reth-optimism-cli = { path = "../../crates/optimism/cli" } reth-cli-util.workspace = true +reth-optimism-exex.workspace = true # misc clap.workspace = true diff --git a/bin/node/src/cli.rs b/bin/node/src/cli.rs index 36aaf70c..aced2a7f 100644 --- a/bin/node/src/cli.rs +++ b/bin/node/src/cli.rs @@ -2,6 +2,7 @@ use base_flashblocks::FlashblocksConfig; use base_txpool::TxpoolConfig; +use reth_optimism_exex::ProofsHistoryConfig; use reth_optimism_node::args::RollupArgs; /// CLI Arguments @@ -12,6 +13,10 @@ pub struct Args { #[command(flatten)] pub rollup_args: RollupArgs, + /// Proofs history arguments + #[command(flatten)] + pub proofs_history_args: ProofsHistoryConfig, + /// The websocket url used for flashblocks. #[arg(long = "websocket-url", value_name = "WEBSOCKET_URL")] pub websocket_url: Option, diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 10b591ee..9392f6d0 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -9,6 +9,7 @@ use base_client_node::BaseNodeRunner; use base_flashblocks::FlashblocksExtension; use base_metering::MeteringExtension; use base_txpool::TxPoolExtension; +use reth_optimism_exex::ProofsHistoryExtension; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); @@ -29,7 +30,8 @@ fn main() { // Feature extensions (FlashblocksExtension must be last - uses replace_configured) runner.install_ext::(args.clone().into()); runner.install_ext::(args.enable_metering); - runner.install_ext::(args.into()); + runner.install_ext::(args.clone().into()); + runner.install_ext::(args.proofs_history_args.clone()); let handle = runner.run(builder); handle.await diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml new file mode 100644 index 00000000..bfc5a81c --- /dev/null +++ b/crates/optimism/cli/Cargo.toml @@ -0,0 +1,103 @@ +[package] +name = "reth-optimism-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-static-file-types = { workspace = true, features = ["clap"] } +reth-cli.workspace = true +reth-cli-commands.workspace = true +reth-consensus.workspace = true +reth-rpc-server-types.workspace = true +reth-primitives-traits.workspace = true +reth-db = { workspace = true, features = ["mdbx", "op"] } +reth-db-api.workspace = true +reth-db-common.workspace = true +reth-downloaders.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-stages.workspace = true +reth-static-file.workspace = true +reth-execution-types.workspace = true +reth-node-core.workspace = true +reth-optimism-node.workspace = true +reth-fs-util.workspace = true + +# so jemalloc metrics can be included +reth-node-metrics.workspace = true + +## optimism +reth-optimism-primitives.workspace = true +reth-optimism-chainspec = { workspace = true, features = ["superchain-configs"] } +reth-optimism-consensus.workspace = true +reth-optimism-trie.workspace = true + +reth-chainspec.workspace = true +reth-node-events.workspace = true +reth-optimism-evm.workspace = true +reth-cli-runner.workspace = true +reth-node-builder = { workspace = true, features = ["op"] } +reth-tracing.workspace = true + +# eth +alloy-eips.workspace = true +alloy-consensus.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true + +# misc +futures-util.workspace = true +derive_more.workspace = true +serde.workspace = true +clap = { workspace = true, features = ["derive", "env"] } + +tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } +tokio-util = { workspace = true, features = ["codec"] } +tracing.workspace = true +eyre.workspace = true + +# reth test-vectors +proptest = { workspace = true, optional = true } +op-alloy-consensus.workspace = true + +[dev-dependencies] +tempfile.workspace = true +reth-stages = { workspace = true, features = ["test-utils"] } + +[build-dependencies] +reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } + +[features] +default = [] + +# Opentelemtry feature to activate metrics export +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] + +asm-keccak = [ + "alloy-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "reth-optimism-node/asm-keccak", +] + +# Jemalloc feature for vergen to generate correct env vars +jemalloc = ["reth-node-core/jemalloc", "reth-node-metrics/jemalloc"] + +dev = ["dep:proptest", "reth-cli-commands/arbitrary"] + +serde = [ + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "op-alloy-consensus/serde", + "reth-execution-types/serde", + "reth-optimism-primitives/serde", + "reth-primitives-traits/serde", + "reth-optimism-chainspec/serde", +] diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs new file mode 100644 index 00000000..e405dcc8 --- /dev/null +++ b/crates/optimism/cli/src/app.rs @@ -0,0 +1,145 @@ +use crate::{Cli, Commands}; +use eyre::{eyre, Result}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::launcher::Launcher; +use reth_cli_runner::CliRunner; +use reth_node_core::args::OtlpInitStatus; +use reth_node_metrics::recorder::install_prometheus_recorder; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_consensus::OpBeaconConsensus; +use reth_optimism_node::{OpExecutorProvider, OpNode}; +use reth_rpc_server_types::RpcModuleValidator; +use reth_tracing::{FileWorkerGuard, Layers}; +use std::{fmt, sync::Arc}; +use tracing::{info, warn}; + +/// A wrapper around a parsed CLI that handles command execution. +#[derive(Debug)] +pub struct CliApp { + cli: Cli, + runner: Option, + layers: Option, + guard: Option, +} + +impl CliApp +where + C: ChainSpecParser, + Ext: clap::Args + fmt::Debug, + Rpc: RpcModuleValidator, +{ + pub(crate) fn new(cli: Cli) -> Self { + Self { cli, runner: None, layers: Some(Layers::new()), guard: None } + } + + /// Sets the runner for the CLI commander. + /// + /// This replaces any existing runner with the provided one. + pub fn set_runner(&mut self, runner: CliRunner) { + self.runner = Some(runner); + } + + /// Access to tracing layers. + /// + /// Returns a mutable reference to the tracing layers, or error + /// if tracing initialized and layers have detached already. + pub fn access_tracing_layers(&mut self) -> Result<&mut Layers> { + self.layers.as_mut().ok_or_else(|| eyre!("Tracing already initialized")) + } + + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run(mut self, launcher: impl Launcher) -> Result<()> { + let runner = match self.runner.take() { + Some(runner) => runner, + None => CliRunner::try_default_runtime()?, + }; + + // add network name to logs dir + // Add network name if available to the logs dir + if let Some(chain_spec) = self.cli.command.chain_spec() { + self.cli.logs.log_file_directory = + self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); + } + + self.init_tracing(&runner)?; + + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + + let components = |spec: Arc| { + (OpExecutorProvider::optimism(spec.clone()), Arc::new(OpBeaconConsensus::new(spec))) + }; + + match self.cli.command { + Commands::Node(command) => { + // Validate RPC modules using the configured validator + if let Some(http_api) = &command.rpc.http_api { + Rpc::validate_selection(http_api, "http.api").map_err(|e| eyre!("{e}"))?; + } + if let Some(ws_api) = &command.rpc.ws_api { + Rpc::validate_selection(ws_api, "ws.api").map_err(|e| eyre!("{e}"))?; + } + + runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) + } + Commands::Init(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::InitState(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::ImportOp(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::ImportReceiptsOp(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => { + runner.run_blocking_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Stage(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) + } + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + Commands::ReExecute(command) => { + runner.run_until_ctrl_c(command.execute::(components)) + } + Commands::OpProofs(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function stores guard to the struct. + /// For gRPC OTLP, it requires tokio runtime context. + pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { + if self.guard.is_none() { + let mut layers = self.layers.take().unwrap_or_default(); + + let otlp_status = runner.block_on(self.cli.traces.init_otlp_tracing(&mut layers))?; + + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); + match otlp_status { + OtlpInitStatus::Started(endpoint) => { + info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.cli.traces.protocol); + } + OtlpInitStatus::NoFeature => { + warn!(target: "reth::cli", "Provided OTLP tracing arguments do not have effect, compile with the `otlp` feature") + } + OtlpInitStatus::Disabled => {} + } + } + Ok(()) + } +} diff --git a/crates/optimism/cli/src/chainspec.rs b/crates/optimism/cli/src/chainspec.rs new file mode 100644 index 00000000..14e7450c --- /dev/null +++ b/crates/optimism/cli/src/chainspec.rs @@ -0,0 +1,45 @@ +use reth_cli::chainspec::{parse_genesis, ChainSpecParser}; +use reth_optimism_chainspec::{generated_chain_value_parser, OpChainSpec, SUPPORTED_CHAINS}; +use std::sync::Arc; + +/// Optimism chain specification parser. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct OpChainSpecParser; + +impl ChainSpecParser for OpChainSpecParser { + type ChainSpec = OpChainSpec; + + const SUPPORTED_CHAINS: &'static [&'static str] = SUPPORTED_CHAINS; + + fn parse(s: &str) -> eyre::Result> { + chain_value_parser(s) + } +} + +/// Clap value parser for [`OpChainSpec`]s. +/// +/// The value parser matches either a known chain, the path +/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. +pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { + if let Some(op_chain_spec) = generated_chain_value_parser(s) { + Ok(op_chain_spec) + } else { + Ok(Arc::new(parse_genesis(s)?.into())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_known_chain_spec() { + for &chain in OpChainSpecParser::SUPPORTED_CHAINS { + assert!( + ::parse(chain).is_ok(), + "Failed to parse {chain}" + ); + } + } +} diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs new file mode 100644 index 00000000..74656511 --- /dev/null +++ b/crates/optimism/cli/src/commands/import.rs @@ -0,0 +1,169 @@ +//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a +//! file. +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::{ + common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}, + import::build_import_pipeline, +}; +use reth_consensus::noop::NoopConsensus; +use reth_db_api::{tables, transaction::DbTx}; +use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}; +use reth_node_builder::BlockTy; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_evm::OpExecutorProvider; +use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives}; +use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, StageCheckpointReader}; +use reth_prune::PruneModes; +use reth_stages::StageId; +use reth_static_file::StaticFileProducer; +use std::{path::PathBuf, sync::Arc}; +use tracing::{debug, error, info}; + +/// Syncs RLP encoded blocks from a file. +#[derive(Debug, Parser)] +pub struct ImportOpCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// Chunk byte length to read from file. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + /// The path to a block file for import. + /// + /// The online stages (headers and bodies) are replaced by a file import, after which the + /// remaining stages are executed. + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl> ImportOpCommand { + /// Execute `import` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + + info!(target: "reth::cli", + "Disabled stages requiring state, since cannot execute OVM state changes" + ); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" + ); + + let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; + + // we use noop here because we expect the inputs to be valid + let consensus = Arc::new(NoopConsensus::default()); + + // open file + let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; + + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + let mut total_filtered_out_dup_txns = 0; + + let mut sealed_header = provider_factory + .sealed_header(provider_factory.last_block_number()?)? + .expect("should have genesis"); + + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + + while let Some(mut file_client) = + reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? + { + // create a new FileClient from chunk read from file + info!(target: "reth::cli", + "Importing chain file chunk" + ); + + let tip = file_client.tip().ok_or_else(|| eyre::eyre!("file client has no tip"))?; + info!(target: "reth::cli", "Chain file chunk read"); + + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.total_transactions(); + + for (block_number, body) in file_client.bodies_iter_mut() { + body.transactions.retain(|_| { + if is_dup_tx(block_number) { + total_filtered_out_dup_txns += 1; + return false + } + true + }) + } + + let (mut pipeline, events) = build_import_pipeline( + &config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + static_file_producer.clone(), + true, + OpExecutorProvider::optimism(provider_factory.chain_spec()), + )?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::cli", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events)); + + // Run pipeline + info!(target: "reth::cli", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => {}, + } + + sealed_header = provider_factory + .sealed_header(provider_factory.last_block_number()?)? + .expect("should have genesis"); + } + + let provider = provider_factory.provider()?; + + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; + + if total_decoded_blocks != total_imported_blocks || + total_decoded_txns != total_imported_txns + total_filtered_out_dup_txns + { + error!(target: "reth::cli", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_filtered_out_dup_txns, + total_imported_txns, + "Chain was partially imported" + ); + } + + info!(target: "reth::cli", + total_imported_blocks, + total_imported_txns, + total_decoded_blocks, + total_decoded_txns, + total_filtered_out_dup_txns, + "Chain file imported" + ); + + Ok(()) + } +} + +impl ImportOpCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs new file mode 100644 index 00000000..db25afe9 --- /dev/null +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -0,0 +1,319 @@ +//! Command that imports OP mainnet receipts from Bedrock datadir, exported via +//! . + +use crate::receipt_file_codec::OpGethReceiptFileCodec; +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_db_api::tables; +use reth_downloaders::{ + file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + receipt_file_client::ReceiptFileClient, +}; +use reth_execution_types::ExecutionOutcome; +use reth_node_builder::ReceiptTy; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives, OpReceipt}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, OriginalValuesKnown, + ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StatsReader, +}; +use reth_stages::{StageCheckpoint, StageId}; +use reth_static_file_types::StaticFileSegment; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; +use tracing::{debug, info, trace, warn}; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct ImportReceiptsOpCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// Chunk byte length to read from file. + #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] + chunk_len: Option, + + /// The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for + /// exporting OP chain segment below Bedrock block via testinprod/op-geth). + /// + /// + #[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)] + path: PathBuf, +} + +impl> ImportReceiptsOpCommand { + /// Execute `import` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + + debug!(target: "reth::cli", + chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking receipts import" + ); + + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; + + import_receipts_from_file( + provider_factory, + self.path, + self.chunk_len, + |first_block, receipts| { + let mut total_filtered_out_dup_txns = 0; + for (index, receipts_for_block) in receipts.iter_mut().enumerate() { + if is_dup_tx(first_block + index as u64) { + receipts_for_block.clear(); + total_filtered_out_dup_txns += 1; + } + } + + total_filtered_out_dup_txns + }, + ) + .await + } +} + +impl ImportReceiptsOpCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} + +/// Imports receipts to static files from file in chunks. See [`import_receipts_from_reader`]. +pub async fn import_receipts_from_file( + provider_factory: ProviderFactory, + path: P, + chunk_len: Option, + filter: F, +) -> eyre::Result<()> +where + N: ProviderNodeTypes>, + P: AsRef, + F: FnMut(u64, &mut Vec>) -> usize, +{ + for stage in StageId::ALL { + let checkpoint = provider_factory.database_provider_ro()?.get_stage_checkpoint(stage)?; + trace!(target: "reth::cli", + ?stage, + ?checkpoint, + "Read stage checkpoints from db" + ); + } + + // open file + let reader = ChunkedFileReader::new(&path, chunk_len).await?; + + // import receipts + let _ = import_receipts_from_reader(&provider_factory, reader, filter).await?; + + info!(target: "reth::cli", + "Receipt file imported" + ); + + Ok(()) +} + +/// Imports receipts to static files. Takes a filter callback as parameter, that returns the total +/// number of filtered out receipts. +/// +/// Caution! Filter callback must replace completely filtered out receipts for a block, with empty +/// vectors, rather than `vec!(None)`. This is since the code for writing to static files, expects +/// indices in the receipts list, to map to sequential block numbers. +pub async fn import_receipts_from_reader( + provider_factory: &ProviderFactory, + mut reader: ChunkedFileReader, + mut filter: F, +) -> eyre::Result +where + N: ProviderNodeTypes>, + F: FnMut(u64, &mut Vec>>) -> usize, +{ + let static_file_provider = provider_factory.static_file_provider(); + + // Ensure that receipts hasn't been initialized apart from `init_genesis`. + if let Some(num_receipts) = + static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts) && + num_receipts > 0 + { + eyre::bail!("Expected no receipts in storage, but found {num_receipts}."); + } + match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) { + Some(receipts_block) => { + if receipts_block > 0 { + eyre::bail!("Expected highest receipt block to be 0, but found {receipts_block}."); + } + } + None => { + eyre::bail!( + "Receipts was not initialized. Please import blocks and transactions before calling this command." + ); + } + } + + let provider = provider_factory.database_provider_rw()?; + let mut total_decoded_receipts = 0; + let mut total_receipts = 0; + let mut total_filtered_out_dup_txns = 0; + let mut highest_block_receipts = 0; + + let highest_block_transactions = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Transactions) + .expect("transaction static files must exist before importing receipts"); + + while let Some(file_client) = + reader.next_receipts_chunk::>>().await? + { + if highest_block_receipts == highest_block_transactions { + warn!(target: "reth::cli", highest_block_receipts, highest_block_transactions, "Ignoring all other blocks in the file since we have reached the desired height"); + break + } + + // create a new file client from chunk read from file + let ReceiptFileClient { + mut receipts, + mut first_block, + total_receipts: total_receipts_chunk, + .. + } = file_client; + + // mark these as decoded + total_decoded_receipts += total_receipts_chunk; + + total_filtered_out_dup_txns += filter(first_block, &mut receipts); + + info!(target: "reth::cli", + first_receipts_block=?first_block, + total_receipts_chunk, + "Importing receipt file chunk" + ); + + // It is possible for the first receipt returned by the file client to be the genesis + // block. In this case, we just prepend empty receipts to the current list of receipts. + // When initially writing to static files, the provider expects the first block to be block + // one. So, if the first block returned by the file client is the genesis block, we remove + // those receipts. + if first_block == 0 { + // remove the first empty receipts + let genesis_receipts = receipts.remove(0); + debug_assert!(genesis_receipts.is_empty()); + // this ensures the execution outcome and static file producer start at block 1 + first_block = 1; + } + highest_block_receipts = first_block + receipts.len() as u64 - 1; + + // RLP file may have too many blocks. We ignore the excess, but warn the user. + if highest_block_receipts > highest_block_transactions { + let excess = highest_block_receipts - highest_block_transactions; + highest_block_receipts -= excess; + + // Remove the last `excess` blocks + receipts.truncate(receipts.len() - excess as usize); + + warn!(target: "reth::cli", highest_block_receipts, "Too many decoded blocks, ignoring the last {excess}."); + } + + // Update total_receipts after all filtering + total_receipts += receipts.iter().map(|v| v.len()).sum::(); + + let execution_outcome = + ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); + + // finally, write the receipts + provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?; + } + + // Only commit if we have imported as many receipts as the number of transactions. + let total_imported_txns = static_file_provider + .count_entries::() + .expect("transaction static files must exist before importing receipts"); + + if total_receipts != total_imported_txns { + eyre::bail!( + "Number of receipts ({total_receipts}) inconsistent with transactions {total_imported_txns}" + ) + } + + // Only commit if the receipt block height matches the one from transactions. + if highest_block_receipts != highest_block_transactions { + eyre::bail!( + "Receipt block height ({highest_block_receipts}) inconsistent with transactions' {highest_block_transactions}" + ) + } + + // Required or any access-write provider factory will attempt to unwind to 0. + provider + .save_stage_checkpoint(StageId::Execution, StageCheckpoint::new(highest_block_receipts))?; + + provider.commit()?; + + Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns }) +} + +/// Result of importing receipts in chunks. +#[derive(Debug)] +pub struct ImportReceiptsResult { + /// Total decoded receipts. + pub total_decoded_receipts: usize, + /// Total filtered out receipts. + pub total_filtered_out_dup_txns: usize, +} + +#[cfg(test)] +mod test { + use alloy_primitives::hex; + use reth_db_common::init::init_genesis; + use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_node::OpNode; + use reth_provider::test_utils::create_test_provider_factory_with_node_types; + use reth_stages::test_utils::TestStageDB; + use tempfile::tempfile; + use tokio::{ + fs::File, + io::{AsyncSeekExt, AsyncWriteExt, SeekFrom}, + }; + + use crate::receipt_file_codec::test::{ + HACK_RECEIPT_ENCODED_BLOCK_1, HACK_RECEIPT_ENCODED_BLOCK_2, HACK_RECEIPT_ENCODED_BLOCK_3, + }; + + use super::*; + + /// No receipts for genesis block + const EMPTY_RECEIPTS_GENESIS_BLOCK: &[u8] = &hex!("c0"); + + #[ignore] + #[tokio::test] + async fn filter_out_genesis_block_receipts() { + let mut f: File = tempfile().unwrap().into(); + f.write_all(EMPTY_RECEIPTS_GENESIS_BLOCK).await.unwrap(); + f.write_all(HACK_RECEIPT_ENCODED_BLOCK_1).await.unwrap(); + f.write_all(HACK_RECEIPT_ENCODED_BLOCK_2).await.unwrap(); + f.write_all(HACK_RECEIPT_ENCODED_BLOCK_3).await.unwrap(); + f.flush().await.unwrap(); + f.seek(SeekFrom::Start(0)).await.unwrap(); + + let reader = ChunkedFileReader::from_file(f, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, false) + .await + .unwrap(); + + let db = TestStageDB::default(); + init_genesis(&db.factory).unwrap(); + + let provider_factory = + create_test_provider_factory_with_node_types::(OP_MAINNET.clone()); + let ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns } = + import_receipts_from_reader(&provider_factory, reader, |_, _| 0).await.unwrap(); + + assert_eq!(total_decoded_receipts, 3); + assert_eq!(total_filtered_out_dup_txns, 0); + } +} diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs new file mode 100644 index 00000000..93de3986 --- /dev/null +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -0,0 +1,114 @@ +//! Command that initializes the node from a genesis file. + +use alloy_consensus::Header; +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; +use reth_db_common::init::init_from_state_dump; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::{ + bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH}, + OpPrimitives, +}; +use reth_primitives_traits::{header::HeaderMut, SealedHeader}; +use reth_provider::{ + BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, + StaticFileWriter, +}; +use std::{io::BufReader, sync::Arc}; +use tracing::info; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommandOp { + #[command(flatten)] + init_state: reth_cli_commands::init_state::InitStateCommand, + + /// Specifies whether to initialize the state without relying on OVM or EVM historical data. + /// + /// When enabled, and before inserting the state, it creates a dummy chain up to the last OVM + /// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. This is + /// hardcoded for OP mainnet, for other OP chains you will need to pass in a header. + /// + /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be + /// ignored. + #[arg(long, default_value = "false")] + without_ovm: bool, +} + +impl> InitStateCommandOp { + /// Execute the `init` command + pub async fn execute>( + mut self, + ) -> eyre::Result<()> { + // If using --without-ovm for OP mainnet, handle the special case with hardcoded Bedrock + // header. Otherwise delegate to the base InitStateCommand implementation. + if self.without_ovm { + if self.init_state.env.chain.is_optimism_mainnet() { + return self.execute_with_bedrock_header::(); + } + + // For non-mainnet OP chains with --without-ovm, use the base implementation + // by setting the without_evm flag + self.init_state.without_evm = true; + } + + self.init_state.execute::().await + } + + /// Execute init-state with hardcoded Bedrock header for OP mainnet. + fn execute_with_bedrock_header< + N: CliNodeTypes, + >( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting for OP mainnet"); + let env = self.init_state.env.init::(AccessRights::RW)?; + + let Environment { config, provider_factory, .. } = env; + let static_file_provider = provider_factory.static_file_provider(); + let provider_rw = provider_factory.database_provider_rw()?; + + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + |number| { + let mut header = Header::default(); + header.set_number(number); + header + }, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwound according to database checkpoints. + // + // Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-ovm." + )) + } + + info!(target: "reth::cli", "Initiating state dump"); + + let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?); + let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; + + provider_rw.commit()?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} + +impl InitStateCommandOp { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + self.init_state.chain_spec() + } +} diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs new file mode 100644 index 00000000..24efdd89 --- /dev/null +++ b/crates/optimism/cli/src/commands/mod.rs @@ -0,0 +1,95 @@ +use crate::chainspec::OpChainSpecParser; +use clap::Subcommand; +use import::ImportOpCommand; +use import_receipts::ImportReceiptsOpCommand; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::{ + config_cmd, db, dump_genesis, init_cmd, + node::{self, NoArgs}, + p2p, prune, re_execute, stage, +}; +use std::{fmt, sync::Arc}; + +pub mod import; +pub mod import_receipts; +pub mod init_state; +pub mod op_proofs; + +#[cfg(feature = "dev")] +pub mod test_vectors; + +/// Commands to be executed +#[derive(Debug, Subcommand)] +pub enum Commands +{ + /// Start the node + #[command(name = "node")] + Node(Box>), + /// Initialize the database from a genesis file. + #[command(name = "init")] + Init(init_cmd::InitCommand), + /// Initialize the database from a state dump file. + #[command(name = "init-state")] + InitState(init_state::InitStateCommandOp), + /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. + #[command(name = "import-op")] + ImportOp(ImportOpCommand), + /// This imports RLP encoded receipts from a file. + #[command(name = "import-receipts-op")] + ImportReceiptsOp(ImportReceiptsOpCommand), + /// Dumps genesis block JSON configuration to stdout. + DumpGenesis(dump_genesis::DumpGenesisCommand), + /// Database debugging utilities + #[command(name = "db")] + Db(db::Command), + /// Manipulate individual stages. + #[command(name = "stage")] + Stage(Box>), + /// P2P Debugging utilities + #[command(name = "p2p")] + P2P(Box>), + /// Write config to stdout + #[command(name = "config")] + Config(config_cmd::Command), + /// Prune according to the configuration without any limits + #[command(name = "prune")] + Prune(prune::PruneCommand), + /// Generate Test Vectors + #[cfg(feature = "dev")] + #[command(name = "test-vectors")] + TestVectors(test_vectors::Command), + /// Re-execute blocks in parallel to verify historical sync correctness. + #[command(name = "re-execute")] + ReExecute(re_execute::Command), + /// Manage storage of historical proofs in expanded trie db in fault proof window. + #[command(name = "proofs")] + OpProofs(op_proofs::Command), +} + +impl< + C: ChainSpecParser, + Ext: clap::Args + fmt::Debug, + > Commands +{ + /// Returns the underlying chain being used for commands + pub fn chain_spec(&self) -> Option<&Arc> { + match self { + Self::Node(cmd) => cmd.chain_spec(), + Self::Init(cmd) => cmd.chain_spec(), + Self::InitState(cmd) => cmd.chain_spec(), + Self::DumpGenesis(cmd) => cmd.chain_spec(), + Self::Db(cmd) => cmd.chain_spec(), + Self::Stage(cmd) => cmd.chain_spec(), + Self::P2P(cmd) => cmd.chain_spec(), + Self::Config(_) => None, + Self::Prune(cmd) => cmd.chain_spec(), + Self::ImportOp(cmd) => cmd.chain_spec(), + Self::ImportReceiptsOp(cmd) => cmd.chain_spec(), + #[cfg(feature = "dev")] + Self::TestVectors(_) => None, + Self::ReExecute(cmd) => cmd.chain_spec(), + Self::OpProofs(cmd) => cmd.chain_spec(), + } + } +} diff --git a/crates/optimism/cli/src/commands/op_proofs/init.rs b/crates/optimism/cli/src/commands/op_proofs/init.rs new file mode 100644 index 00000000..a9aba66d --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/init.rs @@ -0,0 +1,100 @@ +//! Command that initializes the OP proofs storage with the current state of the chain. + +use clap::Parser; +use reth_chainspec::ChainInfo; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{db::MdbxProofsStorage, BackfillJob, OpProofsStorage, OpProofsStore}; +use reth_provider::{BlockNumReader, DBProvider, DatabaseProviderFactory}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Initializes the proofs storage with the current state of the chain. +/// +/// This command must be run before starting the node with proofs history enabled. +/// It backfills the proofs storage with trie nodes from the current chain state. +#[derive(Debug, Parser)] +pub struct InitCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + /// + /// This should match the path used when starting the node with + /// `--proofs-history.storage-path`. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, +} + +impl> InitCommand { + /// Execute `initialize-op-proofs` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Initializing OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Create the proofs storage + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + // Check if already initialized + if let Some((block_number, block_hash)) = storage.get_earliest_block_number().await? { + info!( + target: "reth::cli", + block_number = block_number, + block_hash = ?block_hash, + "Proofs storage already initialized" + ); + return Ok(()); + } + + // Get the current chain state + let ChainInfo { best_number, best_hash, .. } = provider_factory.chain_info()?; + + info!( + target: "reth::cli", + best_number = best_number, + best_hash = ?best_hash, + "Starting backfill job for current chain state" + ); + + // Run the backfill job + { + let db_provider = + provider_factory.database_provider_ro()?.disable_long_read_transaction_safety(); + let db_tx = db_provider.into_tx(); + + BackfillJob::new(storage.clone(), &db_tx).run(best_number, best_hash).await?; + } + + info!( + target: "reth::cli", + best_number = best_number, + best_hash = ?best_hash, + "Proofs storage initialized successfully" + ); + + Ok(()) + } +} + +impl InitCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/cli/src/commands/op_proofs/mod.rs b/crates/optimism/cli/src/commands/op_proofs/mod.rs new file mode 100644 index 00000000..44ac1fc3 --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/mod.rs @@ -0,0 +1,61 @@ +//! OP Proofs management commands + +use clap::{Parser, Subcommand}; +use reth_cli::chainspec::ChainSpecParser; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use std::sync::Arc; + +pub mod init; +pub mod prune; +pub mod unwind; + +/// `op-reth op-proofs` command +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +impl> Command { + /// Execute `op-proofs` command + pub async fn execute< + N: reth_cli_commands::common::CliNodeTypes< + ChainSpec = C::ChainSpec, + Primitives = OpPrimitives, + >, + >( + self, + ) -> eyre::Result<()> { + match self.command { + Subcommands::Init(cmd) => cmd.execute::().await, + Subcommands::Prune(cmd) => cmd.execute::().await, + Subcommands::Unwind(cmd) => cmd.execute::().await, + } + } +} + +impl Command { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + match &self.command { + Subcommands::Init(cmd) => cmd.chain_spec(), + Subcommands::Prune(cmd) => cmd.chain_spec(), + Subcommands::Unwind(cmd) => cmd.chain_spec(), + } + } +} + +/// `op-reth op-proofs` subcommands +#[derive(Debug, Subcommand)] +pub enum Subcommands { + /// Initialize the proofs storage with the current state of the chain + #[command(name = "init")] + Init(init::InitCommand), + /// Prune old proof history to reclaim space + #[command(name = "prune")] + Prune(prune::PruneCommand), + /// Unwind the proofs storage to a specific block + #[command(name = "unwind")] + Unwind(unwind::UnwindCommand), +} diff --git a/crates/optimism/cli/src/commands/op_proofs/prune.rs b/crates/optimism/cli/src/commands/op_proofs/prune.rs new file mode 100644 index 00000000..48d30e1a --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/prune.rs @@ -0,0 +1,90 @@ +//! Command that prunes the OP proofs storage. + +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{ + db::MdbxProofsStorage, OpProofStoragePruner, OpProofsStorage, OpProofsStore, +}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Prunes the proofs storage by removing old proof history and state updates. +#[derive(Debug, Parser)] +pub struct PruneCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, + + /// The window to span blocks for proofs history. Value is the number of blocks. + /// Default is 1 month of blocks based on 2 seconds block time. + /// 30 * 24 * 60 * 60 / 2 = `1_296_000` + #[arg( + long = "proofs-history.window", + default_value_t = 1_296_000, + value_name = "PROOFS_HISTORY_WINDOW" + )] + pub proofs_history_window: u64, + + /// The batch size for pruning operations. + #[arg( + long = "proofs-history.prune-batch-size", + default_value_t = 1000, + value_name = "PROOFS_HISTORY_PRUNE_BATCH_SIZE" + )] + pub proofs_history_prune_batch_size: u64, +} + +impl> PruneCommand { + /// Execute [`PruneCommand`]. + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Pruning OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + let earliest_block = storage.get_earliest_block_number().await?; + let latest_block = storage.get_latest_block_number().await?; + info!( + target: "reth::cli", + ?earliest_block, + ?latest_block, + "Current proofs storage block range" + ); + + let pruner = OpProofStoragePruner::new( + storage, + provider_factory, + self.proofs_history_window, + self.proofs_history_prune_batch_size, + ); + pruner.run().await; + Ok(()) + } +} + +impl PruneCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/cli/src/commands/op_proofs/unwind.rs b/crates/optimism/cli/src/commands/op_proofs/unwind.rs new file mode 100644 index 00000000..0d4e67bf --- /dev/null +++ b/crates/optimism/cli/src/commands/op_proofs/unwind.rs @@ -0,0 +1,106 @@ +//! Command that unwinds the OP proofs storage to a specific block number. + +use clap::Parser; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use reth_node_core::version::version_metadata; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_trie::{db::MdbxProofsStorage, OpProofsStorage, OpProofsStore}; +use reth_provider::{BlockReader, TransactionVariant}; +use std::{path::PathBuf, sync::Arc}; +use tracing::{info, warn}; + +/// Unwinds the proofs storage to a specific block number. +/// +/// This command removes all proof history and state updates after the target block number. +#[derive(Debug, Parser)] +pub struct UnwindCommand { + #[command(flatten)] + env: EnvironmentArgs, + + /// The path to the storage DB for proofs history. + #[arg( + long = "proofs-history.storage-path", + value_name = "PROOFS_HISTORY_STORAGE_PATH", + required = true + )] + pub storage_path: PathBuf, + + /// The target block number to unwind to. + /// + /// All history *after* this block will be removed. + #[arg(long, value_name = "TARGET_BLOCK")] + pub target: u64, +} + +impl UnwindCommand { + /// Validates that the target block number is within a valid range for unwinding. + async fn validate_unwind_range( + &self, + storage: &OpProofsStorage, + ) -> eyre::Result { + let (Some((earliest, _)), Some((latest, _))) = + (storage.get_earliest_block_number().await?, storage.get_latest_block_number().await?) + else { + warn!(target: "reth::cli", "No blocks found in proofs storage. Nothing to unwind."); + return Ok(false); + }; + + if self.target <= earliest { + warn!(target: "reth::cli", unwind_target = ?self.target, ?earliest, "Target block is less than the earliest block in proofs storage. Nothing to unwind."); + return Ok(false); + } + + if self.target > latest { + warn!(target: "reth::cli", unwind_target = ?self.target, ?latest, "Target block is not less than the latest block in proofs storage. Nothing to unwind."); + return Ok(false); + } + + Ok(true) + } +} + +impl> UnwindCommand { + /// Execute [`UnwindCommand`]. + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "reth {} starting", version_metadata().short_version); + info!(target: "reth::cli", "Unwinding OP proofs storage at: {:?}", self.storage_path); + + // Initialize the environment with read-only access + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Create the proofs storage + let storage: OpProofsStorage> = Arc::new( + MdbxProofsStorage::new(&self.storage_path) + .map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}"))?, + ) + .into(); + + // Validate that the target block is within a valid range for unwinding + if !self.validate_unwind_range(&storage).await? { + return Ok(()); + } + + // Get the target block from the main database + let block = provider_factory + .recovered_block(self.target.into(), TransactionVariant::NoHash)? + .ok_or_else(|| { + eyre::eyre!("Target block {} not found in the main database", self.target) + })?; + + info!(target: "reth::cli", block_number = block.number, block_hash = %block.hash(), "Unwinding to target block"); + storage.unwind_history(block.block_with_parent()).await?; + + Ok(()) + } +} + +impl UnwindCommand { + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/optimism/cli/src/commands/test_vectors.rs b/crates/optimism/cli/src/commands/test_vectors.rs new file mode 100644 index 00000000..c018dafe --- /dev/null +++ b/crates/optimism/cli/src/commands/test_vectors.rs @@ -0,0 +1,78 @@ +//! Command for generating test vectors. + +use clap::{Parser, Subcommand}; +use op_alloy_consensus::TxDeposit; +use proptest::test_runner::TestRunner; +use reth_chainspec::ChainSpec; +use reth_cli_commands::{ + compact_types, + test_vectors::{ + compact, + compact::{ + generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS, + READ_VECTORS as ETH_READ_VECTORS, + }, + tables, + }, +}; +use std::sync::Arc; + +/// Generate test-vectors for different data types. +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +#[derive(Subcommand, Debug)] +/// `reth test-vectors` subcommands +pub enum Subcommands { + /// Generates test vectors for specified tables. If no table is specified, generate for all. + Tables { + /// List of table names. Case-sensitive. + names: Vec, + }, + /// Generates test vectors for `Compact` types with `--write`. Reads and checks generated + /// vectors with `--read`. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, +} + +impl Command { + /// Execute the command + pub async fn execute(self) -> eyre::Result<()> { + match self.command { + Subcommands::Tables { names } => { + tables::generate_vectors(names)?; + } + Subcommands::Compact { write, .. } => { + compact_types!( + regular: [ + TxDeposit + ], identifier: [] + ); + + if write { + compact::generate_vectors_with(ETH_GENERATE_VECTORS)?; + compact::generate_vectors_with(GENERATE_VECTORS)?; + } else { + compact::read_vectors_with(ETH_READ_VECTORS)?; + compact::read_vectors_with(READ_VECTORS)?; + } + } + } + Ok(()) + } + /// Returns the underlying chain being used to run this command + pub const fn chain_spec(&self) -> Option<&Arc> { + None + } +} diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs new file mode 100644 index 00000000..52fdcc2d --- /dev/null +++ b/crates/optimism/cli/src/lib.rs @@ -0,0 +1,215 @@ +//! OP-Reth CLI implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +/// A configurable App on top of the cli parser. +pub mod app; +/// Optimism chain specification parser. +pub mod chainspec; +/// Optimism CLI commands. +pub mod commands; +/// Module with a codec for reading and encoding receipts in files. +/// +/// Enables decoding and encoding `OpGethReceipt` type. See . +/// +/// Currently configured to use codec [`OpGethReceipt`](receipt_file_codec::OpGethReceipt) based on +/// export of below Bedrock data using . Codec can +/// be replaced with regular encoding of receipts for export. +/// +/// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit +/// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `OpGethReceipt` type (originally +/// made for op-erigon's import needs). +pub mod receipt_file_codec; + +/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction +/// not having a signature back then. +/// Enables decoding and encoding `Block` types within file contexts. +pub mod ovm_file_codec; + +pub use app::CliApp; +pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; +use reth_optimism_chainspec::OpChainSpec; +use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator}; + +use std::{ffi::OsString, fmt, marker::PhantomData, sync::Arc}; + +use chainspec::OpChainSpecParser; +use clap::Parser; +use commands::Commands; +use futures_util::Future; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::launcher::FnLauncher; +use reth_cli_runner::CliRunner; +use reth_db::DatabaseEnv; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; +use reth_optimism_node::args::RollupArgs; + +// This allows us to manually enable node metrics features, required for proper jemalloc metric +// reporting +use reth_node_metrics as _; + +/// The main op-reth cli interface. +/// +/// This is the entrypoint to the executable. +#[derive(Debug, Parser)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +pub struct Cli< + Spec: ChainSpecParser = OpChainSpecParser, + Ext: clap::Args + fmt::Debug = RollupArgs, + Rpc: RpcModuleValidator = DefaultRpcModuleValidator, +> { + /// The command to run + #[command(subcommand)] + pub command: Commands, + + /// The logging configuration for the CLI. + #[command(flatten)] + pub logs: LogArgs, + + /// The metrics configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + + /// Type marker for the RPC module validator + #[arg(skip)] + _phantom: PhantomData, +} + +impl Cli { + /// Parsers only the default CLI arguments + pub fn parse_args() -> Self { + Self::parse() + } + + /// Parsers only the default CLI arguments from the given iterator + pub fn try_parse_args_from(itr: I) -> Result + where + I: IntoIterator, + T: Into + Clone, + { + Self::try_parse_from(itr) + } +} + +impl Cli +where + C: ChainSpecParser, + Ext: clap::Args + fmt::Debug, + Rpc: RpcModuleValidator, +{ + /// Configures the CLI and returns a [`CliApp`] instance. + /// + /// This method is used to prepare the CLI for execution by wrapping it in a + /// [`CliApp`] that can be further configured before running. + pub fn configure(self) -> CliApp { + CliApp::new(self) + } + + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run(self, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, + Fut: Future>, + { + self.with_runner(CliRunner::try_default_runtime()?, launcher) + } + + /// Execute the configured cli command with the provided [`CliRunner`]. + pub fn with_runner(self, runner: CliRunner, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, + Fut: Future>, + { + let mut this = self.configure(); + this.set_runner(runner); + this.run(FnLauncher::new::(async move |builder, chain_spec| { + launcher(builder, chain_spec).await + })) + } +} + +#[cfg(test)] +mod test { + use crate::{chainspec::OpChainSpecParser, commands::Commands, Cli}; + use clap::Parser; + use reth_cli_commands::{node::NoArgs, NodeCommand}; + use reth_optimism_chainspec::{BASE_MAINNET, OP_DEV}; + use reth_optimism_node::args::RollupArgs; + + #[test] + fn parse_dev() { + let cmd = NodeCommand::::parse_from(["op-reth", "--dev"]); + let chain = OP_DEV.clone(); + assert_eq!(cmd.chain.chain, chain.chain); + assert_eq!(cmd.chain.genesis_hash(), chain.genesis_hash()); + assert_eq!( + cmd.chain.paris_block_and_final_difficulty, + chain.paris_block_and_final_difficulty + ); + assert_eq!(cmd.chain.hardforks, chain.hardforks); + + assert!(cmd.rpc.http); + assert!(cmd.network.discovery.disable_discovery); + + assert!(cmd.dev.dev); + } + + #[test] + fn parse_node() { + let cmd = Cli::::parse_from([ + "op-reth", + "node", + "--chain", + "base", + "--datadir", + "/mnt/datadirs/base", + "--instance", + "2", + "--http", + "--http.addr", + "0.0.0.0", + "--ws", + "--ws.addr", + "0.0.0.0", + "--http.api", + "admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots", + "--rollup.sequencer-http", + "https://mainnet-sequencer.base.org", + "--rpc-max-tracing-requests", + "1000000", + "--rpc.gascap", + "18446744073709551615", + "--rpc.max-connections", + "429496729", + "--rpc.max-logs-per-response", + "0", + "--rpc.max-subscriptions-per-connection", + "10000", + "--metrics", + "9003", + "--tracing-otlp=http://localhost:4318/v1/traces", + "--log.file.max-size", + "100", + ]); + + match cmd.command { + Commands::Node(command) => { + assert_eq!(command.chain.as_ref(), BASE_MAINNET.as_ref()); + } + _ => panic!("unexpected command"), + } + } +} diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs new file mode 100644 index 00000000..83f3e487 --- /dev/null +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -0,0 +1,383 @@ +use alloy_consensus::{ + transaction::{from_eip155_value, RlpEcdsaDecodableTx, RlpEcdsaEncodableTx}, + Header, TxEip1559, TxEip2930, TxEip7702, TxLegacy, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip4895::Withdrawals, + Typed2718, +}; +use alloy_primitives::{ + bytes::{Buf, BytesMut}, + keccak256, Signature, TxHash, B256, U256, +}; +use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable}; +use derive_more::{AsRef, Deref}; +use op_alloy_consensus::{OpTxType, OpTypedTransaction, TxDeposit}; +use reth_downloaders::file_client::FileClientError; +use serde::{Deserialize, Serialize}; +use tokio_util::codec::Decoder; + +#[expect(dead_code)] +/// Specific codec for reading raw block bodies from a file +/// with optimism-specific signature handling +pub(crate) struct OvmBlockFileCodec; + +impl Decoder for OvmBlockFileCodec { + type Item = OvmBlock; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None); + } + + let buf_slice = &mut src.as_ref(); + let body = + OvmBlock::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + src.advance(src.len() - buf_slice.len()); + + Ok(Some(body)) + } +} + +/// OVM block, same as EVM block but with different transaction signature handling +/// Pre-bedrock system transactions on Optimism were sent from the zero address +/// with an empty signature, +#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)] +pub struct OvmBlock { + /// Block header + pub header: Header, + /// Block body + pub body: OvmBlockBody, +} + +impl OvmBlock { + /// Decodes a `Block` from the given byte slice. + pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let header = Header::decode(buf)?; + let body = OvmBlockBody::decode(buf)?; + Ok(Self { header, body }) + } +} + +/// The body of a block for OVM +#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)] +#[rlp(trailing)] +pub struct OvmBlockBody { + /// Transactions in the block + pub transactions: Vec, + /// Uncle headers for the given block + pub ommers: Vec
, + /// Withdrawals in the block. + pub withdrawals: Option, +} + +/// Signed transaction pre bedrock. +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +pub struct OvmTransactionSigned { + /// Transaction hash + pub hash: TxHash, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: OpTypedTransaction, +} + +impl AsRef for OvmTransactionSigned { + fn as_ref(&self) -> &Self { + self + } +} + +impl OvmTransactionSigned { + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + pub fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + pub fn from_transaction_and_signature( + transaction: OpTypedTransaction, + signature: Signature, + ) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx + } + + /// Decodes legacy transaction from the data buffer into a tuple. + /// + /// This expects `rlp(legacy_tx)` + /// + /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact + /// format expected. + pub(crate) fn decode_rlp_legacy_transaction_tuple( + data: &mut &[u8], + ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { + let original_encoding = *data; + + let header = alloy_rlp::Header::decode(data)?; + let remaining_len = data.len(); + + let transaction_payload_len = header.payload_length; + + if transaction_payload_len > remaining_len { + return Err(RlpError::InputTooShort); + } + + let mut transaction = TxLegacy { + nonce: Decodable::decode(data)?, + gas_price: Decodable::decode(data)?, + gas_limit: Decodable::decode(data)?, + to: Decodable::decode(data)?, + value: Decodable::decode(data)?, + input: Decodable::decode(data)?, + chain_id: None, + }; + + let v = Decodable::decode(data)?; + let r: U256 = Decodable::decode(data)?; + let s: U256 = Decodable::decode(data)?; + + let tx_length = header.payload_length + header.length(); + let hash = keccak256(&original_encoding[..tx_length]); + + // Handle both pre-bedrock and regular cases + let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() { + // Pre-bedrock system transactions case + (Signature::new(r, s, false), None) + } else { + // Regular transaction case + let (parity, chain_id) = from_eip155_value(v) + .ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?; + (Signature::new(r, s, parity), chain_id) + }; + + // Set chain ID and verify length + transaction.chain_id = chain_id; + let decoded = remaining_len - data.len(); + if decoded != transaction_payload_len { + return Err(RlpError::UnexpectedLength); + } + + Ok((transaction, hash, signature)) + } + + /// Decodes legacy transaction from the data buffer. + /// + /// This should be used _only_ be used in general transaction decoding methods, which have + /// already ensured that the input is a legacy transaction with the following format: + /// `rlp(legacy_tx)` + /// + /// Legacy transactions are encoded as lists, so the input should start with a RLP list header. + /// + /// This expects `rlp(legacy_tx)` + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { + let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; + let signed = Self { transaction: OpTypedTransaction::Legacy(transaction), hash, signature }; + Ok(signed) + } +} + +impl Decodable for OvmTransactionSigned { + /// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used + /// by p2p. + /// + /// The p2p encoding format always includes an RLP header, although the type RLP header depends + /// on whether or not the transaction is a legacy transaction. + /// + /// If the transaction is a legacy transaction, it is just encoded as a RLP list: + /// `rlp(tx-data)`. + /// + /// If the transaction is a typed transaction, it is encoded as a RLP string: + /// `rlp(tx-type || rlp(tx-data))` + /// + /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. + /// + /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since + /// the EIP-4844 variant of [`OvmTransactionSigned`] does not include the blob sidecar. + /// + /// For a method suitable for decoding pooled transactions, see \[`PooledTransaction`\]. + /// + /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed + /// transaction is encoded in this format, and does not start with a RLP header: + /// `tx-type || rlp(tx-data)`. + /// + /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 + /// string header if the first byte is less than `0xf7`. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Typed2718 for OvmTransactionSigned { + fn ty(&self) -> u8 { + self.transaction.tx_type() as u8 + } +} + +impl Encodable2718 for OvmTransactionSigned { + fn type_flag(&self) -> Option { + match self.transaction.tx_type() { + OpTxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match &self.transaction { + OpTypedTransaction::Legacy(legacy_tx) => { + legacy_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.eip2718_encode(&self.signature, out) + } +} + +impl Decodable2718 for OvmTransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + OpTxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: OpTypedTransaction::Eip2930(tx), signature, hash }) + } + OpTxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: OpTypedTransaction::Eip1559(tx), signature, hash }) + } + OpTxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: OpTypedTransaction::Eip7702(tx), signature, hash }) + } + OpTxType::Deposit => Ok(Self::from_transaction_and_signature( + OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + Ok(Self::decode_rlp_legacy_transaction(buf)?) + } +} + +#[cfg(test)] +mod tests { + use crate::ovm_file_codec::OvmTransactionSigned; + use alloy_consensus::Typed2718; + use alloy_primitives::{address, b256, hex, TxKind, U256}; + use op_alloy_consensus::OpTypedTransaction; + const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25]; + use alloy_rlp::Decodable; + + #[test] + fn test_decode_legacy_transactions() { + // Test Case 1: contract deposit - regular L2 transaction calling deposit() function + // tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c + let deposit_tx_bytes = hex!( + "f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def" + ); + let deposit_decoded = OvmTransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap(); + + // Verify deposit transaction + let deposit_tx = match &deposit_decoded.transaction { + OpTypedTransaction::Legacy(tx) => tx, + _ => panic!("Expected legacy transaction for NFT deposit"), + }; + + assert_eq!( + deposit_tx.to, + TxKind::Call(address!("0xa75127121d28a9bf848f3b70e7eea26570aa7700")) + ); + assert_eq!(deposit_tx.nonce, 240); + assert_eq!(deposit_tx.gas_price, 1001500); + assert_eq!(deposit_tx.gas_limit, 814661); + assert_eq!(deposit_tx.value, U256::ZERO); + assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR); + assert_eq!(deposit_tx.chain_id, Some(10)); + assert_eq!( + deposit_decoded.signature.r(), + U256::from_str_radix( + "d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f", + 16 + ) + .unwrap() + ); + assert_eq!( + deposit_decoded.signature.s(), + U256::from_str_radix( + "2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def", + 16 + ) + .unwrap() + ); + + // Test Case 2: pre-bedrock system transaction from block 105235052 + // tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e + let system_tx_bytes = hex!( + "f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4" + ); + let system_decoded = OvmTransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap(); + + // Verify system transaction + assert!(system_decoded.is_legacy()); + + let system_tx = match &system_decoded.transaction { + OpTypedTransaction::Legacy(tx) => tx, + _ => panic!("Expected Legacy transaction"), + }; + + assert_eq!(system_tx.nonce, 887187); + assert_eq!(system_tx.gas_price, 1200000); + assert_eq!(system_tx.gas_limit, 173950); + assert_eq!( + system_tx.to, + TxKind::Call(address!("0xa0cc33dd6f4819d473226257792afe230ec3c67f")) + ); + assert_eq!(system_tx.value, U256::ZERO); + assert_eq!(system_tx.chain_id, Some(10)); + + assert_eq!( + system_decoded.signature.r(), + U256::from_str_radix( + "e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.signature.s(), + U256::from_str_radix( + "13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.hash, + b256!("0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e") + ); + } +} diff --git a/crates/optimism/cli/src/receipt_file_codec.rs b/crates/optimism/cli/src/receipt_file_codec.rs new file mode 100644 index 00000000..e12af039 --- /dev/null +++ b/crates/optimism/cli/src/receipt_file_codec.rs @@ -0,0 +1,324 @@ +//! Codec for reading raw receipts from a file. + +use alloy_consensus::Receipt; +use alloy_primitives::{ + bytes::{Buf, BytesMut}, + Address, Bloom, Bytes, Log, B256, +}; +use alloy_rlp::{Decodable, RlpDecodable}; +use op_alloy_consensus::{OpDepositReceipt, OpTxType}; +use reth_optimism_primitives::OpReceipt; +use tokio_util::codec::Decoder; + +use reth_downloaders::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber}; + +/// Codec for reading raw receipts from a file. +/// +/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the +/// framed reader has capacity for the entire receipts file. Otherwise, the decoder will return +/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP receipts can only be +/// decoded if the internal buffer is large enough to contain the entire receipt. +/// +/// Without ensuring the framed reader has capacity for the entire file, a receipt is likely to +/// fall across two read buffers, the decoder will not be able to decode the receipt, which will +/// cause it to fail. +/// +/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set +/// the capacity of the framed reader to the size of the file. +#[derive(Debug)] +pub struct OpGethReceiptFileCodec(core::marker::PhantomData); + +impl Default for OpGethReceiptFileCodec { + fn default() -> Self { + Self(Default::default()) + } +} + +impl Decoder for OpGethReceiptFileCodec +where + R: TryFrom>, +{ + type Item = Option>; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None) + } + + let buf_slice = &mut src.as_ref(); + let receipt = OpGethReceiptContainer::decode(buf_slice) + .map_err(|err| Self::Error::Rlp(err, src.to_vec()))? + .0; + src.advance(src.len() - buf_slice.len()); + + Ok(Some( + receipt + .map(|receipt| { + let number = receipt.block_number; + receipt + .try_into() + .map_err(Into::into) + .map(|receipt| ReceiptWithBlockNumber { receipt, number }) + }) + .transpose()?, + )) + } +} + +/// See +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +pub struct OpGethReceipt { + tx_type: u8, + post_state: Bytes, + status: u64, + cumulative_gas_used: u64, + bloom: Bloom, + /// + logs: Vec, + tx_hash: B256, + contract_address: Address, + gas_used: u64, + block_hash: B256, + block_number: u64, + transaction_index: u32, + l1_gas_price: u64, + l1_gas_used: u64, + l1_fee: u64, + fee_scalar: String, +} + +#[derive(Debug, PartialEq, Eq, RlpDecodable)] +#[rlp(trailing)] +struct OpGethReceiptContainer(Option); + +impl TryFrom for OpReceipt { + type Error = FileClientError; + + fn try_from(exported_receipt: OpGethReceipt) -> Result { + let OpGethReceipt { tx_type, status, cumulative_gas_used, logs, .. } = exported_receipt; + + let tx_type = OpTxType::try_from(tx_type.to_be_bytes()[0]) + .map_err(|e| FileClientError::Rlp(e.into(), vec![tx_type]))?; + + let receipt = + alloy_consensus::Receipt { status: (status != 0).into(), cumulative_gas_used, logs }; + + match tx_type { + OpTxType::Legacy => Ok(Self::Legacy(receipt)), + OpTxType::Eip2930 => Ok(Self::Eip2930(receipt)), + OpTxType::Eip1559 => Ok(Self::Eip1559(receipt)), + OpTxType::Eip7702 => Ok(Self::Eip7702(receipt)), + OpTxType::Deposit => Ok(Self::Deposit(OpDepositReceipt { + inner: receipt, + deposit_nonce: None, + deposit_receipt_version: None, + })), + } + } +} + +#[cfg(test)] +pub(crate) mod test { + use alloy_consensus::{Receipt, TxReceipt}; + use alloy_primitives::{address, b256, hex, LogData}; + + use super::*; + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!( + "f9030ff9030c8080018303183db9010000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb6834800080a05e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a9400000000000000000000000000000000000000008303183da0bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e8754530180018212c2821c2383312e35" + ); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!( + "f90271f9026e8080018301c60db9010000080000000200000000000000000008000000000000000000000100008000000000000000000000000000000000000000000000000000000000400000000000100000000000000000000000020000000000000000000000000000000000004000000000000000000000000000000000400000000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000100000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000008400000000000000000010000000000000000020000000020000000000000000000000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007eda7867e0c7d4800080a0af6ed8a6864d44989adc47c84f6fe0aeb1819817505c42cde6cbbcd5e14dd3179400000000000000000000000000000000000000008301c60da045fd6ce41bb8ebb2bccdaa92dd1619e287704cb07722039901a7eba63dea1d130280018212c2821c2383312e35" + ); + + pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!( + "f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35" + ); + + fn hack_receipt_1() -> OpGethReceipt { + let receipt = receipt_block_1(); + + OpGethReceipt { + tx_type: receipt.receipt.tx_type() as u8, + post_state: Bytes::default(), + status: receipt.receipt.status() as u64, + cumulative_gas_used: receipt.receipt.cumulative_gas_used(), + bloom: Bloom::from(hex!( + "00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000" + )), + logs: receipt.receipt.into_logs(), + tx_hash: b256!("0x5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a"), contract_address: Address::ZERO, gas_used: 202813, + block_hash: b256!("0xbee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453"), + block_number: receipt.number, + transaction_index: 0, + l1_gas_price: 1, + l1_gas_used: 4802, + l1_fee: 7203, + fee_scalar: String::from("1.5"), + } + } + + pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271"), + b256!("0x0000000000000000000000000000000000000000000000000000000000014218"), + b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"), + ], + Bytes::from(hex!( + "00000000000000000000000000000000000000000000000000000000618d8837" + )), + ) + .unwrap(), + }; + + let log_2 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"), + b256!("0x00000000000000000000000000000000000000000000000000000000d0e3ebf0"), + b256!("0x0000000000000000000000000000000000000000000000000000000000014218"), + b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_3 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"), + b256!("0x00000000000000000000000000000000000000000000007edc6ca0bb68348000"), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let receipt = OpReceipt::Legacy(Receipt { + status: true.into(), + cumulative_gas_used: 202813, + logs: vec![log_1, log_2, log_3], + }); + + ReceiptWithBlockNumber { receipt, number: 1 } + } + + pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"), + b256!("0x00000000000000000000000000000000000000000000000000000000d0ea0e40"), + b256!("0x0000000000000000000000000000000000000000000000000000000000014218"), + b256!("0x000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240"), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"), + b256!("0x00000000000000000000000000000000000000000000007eda7867e0c7d48000"), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let receipt = OpReceipt::Legacy(Receipt { + status: true.into(), + cumulative_gas_used: 116237, + logs: vec![log_1, log_2], + }); + + ReceiptWithBlockNumber { receipt, number: 2 } + } + + pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber { + let log_1 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"), + b256!("0x00000000000000000000000000000000000000000000000000000000d101e54b"), + b256!("0x0000000000000000000000000000000000000000000000000000000000014218"), + b256!("0x000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99"), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let log_2 = Log { + address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"), + data: LogData::new( + vec![ + b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"), + b256!("0x00000000000000000000000000000000000000000000007ed8842f0627748000"), + ], + Bytes::default(), + ) + .unwrap(), + }; + + let receipt = OpReceipt::Legacy(Receipt { + status: true.into(), + cumulative_gas_used: 116237, + logs: vec![log_1, log_2], + }); + + ReceiptWithBlockNumber { receipt, number: 3 } + } + + #[test] + fn decode_hack_receipt() { + let receipt = hack_receipt_1(); + + let decoded = OpGethReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..]) + .unwrap() + .0 + .unwrap(); + + assert_eq!(receipt, decoded); + } + + #[test] + fn receipts_codec() { + // rig + + let mut receipt_1_to_3 = HACK_RECEIPT_ENCODED_BLOCK_1.to_vec(); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2); + receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3); + + let encoded = &mut BytesMut::from(&receipt_1_to_3[..]); + + let mut codec = OpGethReceiptFileCodec::default(); + + // test + + let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_1(), first_decoded_receipt); + + let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_2(), second_decoded_receipt); + + let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap(); + + assert_eq!(receipt_block_3(), third_decoded_receipt); + } +} diff --git a/crates/optimism/exex/Cargo.toml b/crates/optimism/exex/Cargo.toml new file mode 100644 index 00000000..8190b6a1 --- /dev/null +++ b/crates/optimism/exex/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "reth-optimism-exex" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Execution extensions for OP-Reth" + +[lints] +workspace = true + +[dependencies] +# reth +reth-exex.workspace = true +reth-execution-types.workspace = true +reth-node-types.workspace = true +reth-node-api.workspace = true +reth-trie.workspace = true +reth-provider.workspace = true + +# op-reth +# proofs exex handles `TrieUpdates` in notifications +reth-optimism-trie = { workspace = true, features = ["serde-bincode-compat", "metrics"] } +reth-optimism-rpc = { path = "../rpc" } + +reth-tasks.workspace = true +tokio.workspace = true + + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true + +# misc +eyre.workspace = true +futures-util.workspace = true +derive_more.workspace = true +tracing.workspace = true +clap.workspace = true +base-client-node.workspace = true +humantime.workspace = true +reth-db.workspace = true + + +[dev-dependencies] +tokio = { workspace = true, features = ["test-util", "rt-multi-thread", "macros"] } +futures.workspace = true +reth-db = { workspace = true, features = ["op", "test-utils"] } +reth-node-builder.workspace = true +reth-optimism-node.workspace = true +reth-optimism-chainspec.workspace = true +tempfile.workspace = true + +[features] +test-utils = [ + "reth-db/test-utils", + "reth-trie/test-utils", + "reth-node-builder/test-utils", + "reth-optimism-node/test-utils", + "reth-provider/test-utils", +] + +[package.metadata.cargo-udeps.ignore] +development = [ + "reth-node-builder", + "reth-optimism-node", + "reth-optimism-chainspec", + "tempfile.workspace", +] diff --git a/crates/optimism/exex/src/extension.rs b/crates/optimism/exex/src/extension.rs new file mode 100644 index 00000000..370ffccf --- /dev/null +++ b/crates/optimism/exex/src/extension.rs @@ -0,0 +1,176 @@ +use clap::{builder::ArgPredicate}; +use base_client_node::{BaseNodeExtension, FromExtensionConfig, OpBuilder}; +use reth_db::database_metrics::DatabaseMetrics; +use reth_optimism_trie::{MdbxProofsStorage, OpProofsStorage}; +use std::path::PathBuf; +use std::time::Duration; +use tracing::{error, info}; +use std::sync::Arc; +use crate::OpProofsExEx; +use reth_optimism_rpc::{ + debug::{DebugApiExt, DebugApiOverrideServer}, + eth::proofs::{EthApiExt, EthApiOverrideServer}, +}; +use reth_tasks::TaskExecutor; +use tokio::time::sleep; +use reth_node_api::FullNodeComponents; +// use reth_rpc_eth_api::node::RpcNodeCore; + +/// Transaction pool configuration. +#[derive(Debug, Clone, clap::Args, Eq, PartialEq)] +pub struct ProofsHistoryConfig { + + /// If true, initialize external-proofs exex to save and serve trie nodes to provide proofs + /// faster. + #[arg( + long = "proofs-history", + value_name = "PROOFS_HISTORY", + default_value_ifs([ + ("proofs-history.storage-path", ArgPredicate::IsPresent, "true") + ]) + )] + pub proofs_history: bool, + + /// The path to the storage DB for proofs history. + #[arg(long = "proofs-history.storage-path", value_name = "PROOFS_HISTORY_STORAGE_PATH")] + pub proofs_history_storage_path: Option, + + /// The window to span blocks for proofs history. Value is the number of blocks. + /// Default is 1 month of blocks based on 2 seconds block time. + /// 30 * 24 * 60 * 60 / 2 = `1_296_000` + #[arg( + long = "proofs-history.window", + default_value_t = 1_296_000, + value_name = "PROOFS_HISTORY_WINDOW" + )] + pub proofs_history_window: u64, + + /// Interval between proof-storage prune runs. Accepts human-friendly durations + /// like "100s", "5m", "1h". Defaults to 15s. + /// + /// - Shorter intervals prune smaller batches more often, so each prune run tends to be faster + /// and the blocking pause for writes is shorter, at the cost of more frequent pauses. + /// - Longer intervals prune larger batches less often, which reduces how often pruning runs, + /// but each run can take longer and block writes for longer. + /// + /// A shorter interval is preferred so that prune + /// runs stay small and don’t stall writes for too long. + /// + /// CLI: `--proofs-history.prune-interval 10m` + #[arg( + long = "proofs-history.prune-interval", + value_name = "PROOFS_HISTORY_PRUNE_INTERVAL", + default_value = "15s", + value_parser = humantime::parse_duration + )] + pub proofs_history_prune_interval: Duration, +} + +/// Helper struct that wires the transaction pool features into the node builder. +#[derive(Debug, Clone)] +pub struct ProofsHistoryExtension { + /// Transaction pool configuration. + config: ProofsHistoryConfig, +} + +impl ProofsHistoryExtension { + /// Creates a new transaction pool extension helper. + pub const fn new(config: ProofsHistoryConfig) -> Self { + Self { config } + } +} + +impl BaseNodeExtension for ProofsHistoryExtension { + /// Applies the extension to the supplied builder. + fn apply(self: Box, mut builder: OpBuilder) -> OpBuilder { + let args = self.config; + let proofs_history_enabled = args.proofs_history; + let proofs_history_window = args.proofs_history_window; + let proofs_history_prune_interval = args.proofs_history_prune_interval; + + if proofs_history_enabled { + let path = args + .proofs_history_storage_path + .clone() + .expect("Path must be provided if not using in-memory storage"); + info!(target: "reth::cli", "Using on-disk storage for proofs history"); + + + let mdbx = match MdbxProofsStorage::new(&path).map_err(|e| eyre::eyre!("Failed to create MdbxProofsStorage: {e}")) { + Ok(mdbx) => mdbx, + Err(e) => { + error!(target: "reth::cli", "Failed to create MdbxProofsStorage: {:?}, continuing without proofs history", e); + return builder; + } + }; + let mdbx = Arc::new( + mdbx, + ); + let storage: OpProofsStorage<_> = mdbx.clone().into(); + + let storage_exec = storage.clone(); + + builder = builder + .on_node_started(move |node| { + spawn_proofs_db_metrics( + node.task_executor, + mdbx, + node.config.metrics.push_gateway_interval, + ); + Ok(()) + }) + .install_exex("proofs-history", async move |exex_context| { + Ok(OpProofsExEx::new( + exex_context, + storage_exec, + proofs_history_window, + proofs_history_prune_interval, + ) + .run()) + }) + .extend_rpc_modules(move |ctx| { + let api_ext = EthApiExt::new(ctx.registry.eth_api().clone(), storage.clone()); + let debug_ext = DebugApiExt::new( + ctx.node().provider().clone(), + ctx.registry.eth_api().clone(), + storage, + Box::new(ctx.node().task_executor().clone()), + ctx.node().evm_config().clone(), + ); + ctx.modules.replace_configured(api_ext.into_rpc())?; + ctx.modules.replace_configured(debug_ext.into_rpc())?; + Ok(()) + }); + } + builder + } +} + +impl FromExtensionConfig for ProofsHistoryExtension { + type Config = ProofsHistoryConfig; + + fn from_config(config: Self::Config) -> Self { + Self::new(config) + } +} + + +/// Spawns a task that periodically reports metrics for the proofs DB. +fn spawn_proofs_db_metrics( + executor: TaskExecutor, + storage: Arc, + metrics_report_interval: Duration, +) { + executor.spawn_critical("op-proofs-storage-metrics", async move { + info!( + target: "reth::cli", + ?metrics_report_interval, + "Starting op-proofs-storage metrics task" + ); + + loop { + sleep(metrics_report_interval).await; + storage.report_metrics(); + } + }); +} \ No newline at end of file diff --git a/crates/optimism/exex/src/lib.rs b/crates/optimism/exex/src/lib.rs new file mode 100644 index 00000000..11b910ae --- /dev/null +++ b/crates/optimism/exex/src/lib.rs @@ -0,0 +1,402 @@ +//! ExEx unique for OP-Reth. See also [`reth_exex`] for more op-reth execution extensions. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_consensus::BlockHeader; +use alloy_eips::eip1898::BlockWithParent; +use derive_more::Constructor; +use futures_util::TryStreamExt; +use reth_execution_types::Chain; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_types::NodeTypes; +use reth_optimism_trie::{ + live::LiveTrieCollector, OpProofStoragePrunerTask, OpProofsStorage, OpProofsStore, +}; +use reth_provider::{BlockReader, TransactionVariant}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted}; +use std::{sync::Arc, time::Duration}; +use tracing::{debug, info}; + +// Safety threshold for maximum blocks to prune automatically on startup. +// If the required prune exceeds this, the node will error out and require manual pruning. +const MAX_PRUNE_BLOCKS_STARTUP: u64 = 1000; + +mod extension; +pub use extension::{ProofsHistoryConfig, ProofsHistoryExtension}; + +/// OP Proofs ExEx - processes blocks and tracks state changes within fault proof window. +/// +/// Saves and serves trie nodes to make proofs faster. This handles the process of +/// saving the current state, new blocks as they're added, and serving proof RPCs +/// based on the saved data. +/// +/// # Examples +/// +/// The following example shows how to install the ExEx with either in-memory or persistent storage. +/// This can be used when launching an OP-Reth node via a binary. +/// We are currently using it in optimism/bin/src/main.rs. +/// +/// ``` +/// use futures_util::FutureExt; +/// use reth_db::test_utils::create_test_rw_db; +/// use reth_node_api::NodeTypesWithDBAdapter; +/// use reth_node_builder::{NodeBuilder, NodeConfig}; +/// use reth_optimism_chainspec::BASE_MAINNET; +/// use reth_optimism_exex::OpProofsExEx; +/// use reth_optimism_node::{args::RollupArgs, OpNode}; +/// use reth_optimism_trie::{db::MdbxProofsStorage, InMemoryProofsStorage, OpProofsStorage}; +/// use reth_provider::providers::BlockchainProvider; +/// use std::{sync::Arc, time::Duration}; +/// +/// let config = NodeConfig::new(BASE_MAINNET.clone()); +/// let db = create_test_rw_db(); +/// let args = RollupArgs::default(); +/// let op_node = OpNode::new(args); +/// +/// // Create in-memory or persistent storage +/// let storage: OpProofsStorage> = +/// Arc::new(InMemoryProofsStorage::new()).into(); +/// +/// // Example for creating persistent storage +/// # let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); +/// # let storage_path = temp_dir.path().join("proofs_storage"); +/// +/// # let storage: OpProofsStorage> = Arc::new( +/// # MdbxProofsStorage::new(&storage_path).expect("Failed to create MdbxProofsStorage"), +/// # ).into(); +/// +/// let storage_exec = storage.clone(); +/// let proofs_history_window = 1_296_000u64; +/// let proofs_history_prune_interval = Duration::from_secs(3600); +/// // Can also use install_exex_if along with a boolean flag +/// // Set this based on your configuration or CLI args +/// let _builder = NodeBuilder::new(config) +/// .with_database(db) +/// .with_types_and_provider::>>() +/// .with_components(op_node.components()) +/// .install_exex("proofs-history", move |exex_context| async move { +/// Ok(OpProofsExEx::new( +/// exex_context, +/// storage_exec, +/// proofs_history_window, +/// proofs_history_prune_interval, +/// ) +/// .run() +/// .boxed()) +/// }) +/// .on_node_started(|_full_node| Ok(())) +/// .check_launch(); +/// ``` +#[derive(Debug, Constructor)] +pub struct OpProofsExEx +where + Node: FullNodeComponents, +{ + /// The ExEx context containing the node related utilities e.g. provider, notifications, + /// events. + ctx: ExExContext, + /// The type of storage DB. + storage: OpProofsStorage, + /// The window to span blocks for proofs history. Value is the number of blocks, received as + /// cli arg. + proofs_history_window: u64, + /// Interval between proof-storage prune runs + proofs_history_prune_interval: Duration, +} + +impl OpProofsExEx +where + Node: FullNodeComponents>, + Primitives: NodePrimitives, + Storage: OpProofsStore + Clone + 'static, +{ + /// Main execution loop for the ExEx + pub async fn run(mut self) -> eyre::Result<()> { + self.ensure_initialized().await?; + + let prune_task = OpProofStoragePrunerTask::new( + self.storage.clone(), + self.ctx.provider().clone(), + self.proofs_history_window, + self.proofs_history_prune_interval, + ); + self.ctx + .task_executor() + .spawn_with_graceful_shutdown_signal(|signal| Box::pin(prune_task.run(signal))); + + let collector = LiveTrieCollector::new( + self.ctx.evm_config().clone(), + self.ctx.provider().clone(), + &self.storage, + ); + + while let Some(notification) = self.ctx.notifications.try_next().await? { + self.handle_notification(notification, &collector).await?; + } + + Ok(()) + } + + /// Ensure proofs storage is initialized + async fn ensure_initialized(&self) -> eyre::Result<()> { + // Check if proofs storage is initialized + let earliest_block_number = match self.storage.get_earliest_block_number().await? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!( + "Proofs storage not initialized. Please run 'op-reth initialize-op-proofs --proofs-history.storage-path ' first." + )); + } + }; + + let latest_block_number = match self.storage.get_latest_block_number().await? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!( + "Proofs storage not initialized. Please run 'op-reth initialize-op-proofs --proofs-history.storage-path ' first." + )); + } + }; + + // Check if we have accumulated too much history for the configured window. + // If the gap between what we have and what we want to keep is too large, the auto-pruner + // will stall the node. + let target_earliest = latest_block_number.saturating_sub(self.proofs_history_window); + if target_earliest > earliest_block_number { + let blocks_to_prune = target_earliest - earliest_block_number; + if blocks_to_prune > MAX_PRUNE_BLOCKS_STARTUP { + return Err(eyre::eyre!( + "Configuration requires pruning {} blocks, which exceeds the safety threshold of {}. \ + Huge prune operations can stall the node. \ + Please run 'op-reth proofs prune' manually before starting the node.", + blocks_to_prune, + MAX_PRUNE_BLOCKS_STARTUP + )); + } + } + + // Need to update the earliest block metric on startup as this is not called frequently and + // can show outdated info. When metrics are disabled, this is a no-op. + self.storage + .metrics() + .block_metrics() + .earliest_number + .set(earliest_block_number as f64); + + Ok(()) + } + + async fn handle_notification( + &self, + notification: ExExNotification, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + let latest_stored = match self.storage.get_latest_block_number().await? { + Some((n, _)) => n, + None => { + return Err(eyre::eyre!("No blocks stored in proofs storage")); + } + }; + + match ¬ification { + ExExNotification::ChainCommitted { new } => { + self.handle_chain_committed(new.clone(), latest_stored, collector).await? + } + ExExNotification::ChainReorged { old, new } => { + self.handle_chain_reorged(old.clone(), new.clone(), latest_stored, collector) + .await? + } + ExExNotification::ChainReverted { old } => { + self.handle_chain_reverted(old.clone(), latest_stored, collector).await? + } + } + + if let Some(committed_chain) = notification.committed_chain() { + self.ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + + Ok(()) + } + + async fn handle_chain_committed( + &self, + new: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + block_hash = ?new.tip().hash(), + "ChainCommitted notification received", + ); + + // If tip is not newer than what we have, nothing to do. + if new.tip().number() <= latest_stored { + debug!( + target: "optimism::exex", + block_number = new.tip().number(), + latest_stored, + "Already processed, skipping" + ); + return Ok(()); + } + + // Process each block from latest_stored + 1 to tip + let start = latest_stored.saturating_add(1); + for block_number in start..=new.tip().number() { + self.process_block(block_number, &new, collector).await?; + } + + Ok(()) + } + + /// Process a single block - either from chain or provider + async fn process_block( + &self, + block_number: u64, + chain: &Chain, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + // Try to get block data from the chain first + // 1. Fast Path: Try to use pre-computed state from the notification + if let Some(block) = chain.blocks().get(&block_number) { + // Check if we have BOTH trie updates and hashed state. + // If either is missing, we fall back to execution to ensure data integrity. + if let (Some(trie_updates), Some(hashed_state)) = + (chain.trie_updates_at(block_number), chain.hashed_state_at(block_number)) + { + debug!( + target: "optimism::exex", + block_number, + "Using pre-computed state updates from notification" + ); + + collector + .store_block_updates( + block.block_with_parent(), + (**trie_updates).clone(), + (**hashed_state).clone(), + ) + .await?; + + return Ok(()); + } + + debug!( + target: "optimism::exex", + block_number, + "Block present in notification but state updates missing, falling back to execution" + ); + } + + // 2. Slow Path: Block not in chain (or state missing), fetch from provider and execute + debug!( + target: "optimism::exex", + block_number, + "Fetching block from provider for execution", + ); + + let block = self + .ctx + .provider() + .recovered_block(block_number.into(), TransactionVariant::NoHash)? + .ok_or_else(|| eyre::eyre!("Missing block {} in provider", block_number))?; + + collector.execute_and_store_block_updates(&block).await?; + Ok(()) + } + + async fn handle_chain_reorged( + &self, + old: Arc>, + new: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + info!( + old_block_number = old.tip().number(), + old_block_hash = ?old.tip().hash(), + new_block_number = new.tip().number(), + new_block_hash = ?new.tip().hash(), + "ChainReorged notification received", + ); + + if old.first().number() > latest_stored { + debug!(target: "optimism::exex", "Reorg beyond stored blocks, skipping"); + return Ok(()); + } + + // find the common ancestor + let mut block_updates: Vec<( + BlockWithParent, + Arc, + Arc, + )> = Vec::with_capacity(new.len()); + for block_number in new.blocks().keys() { + // verify if the fork point matches + if old.fork_block() != new.fork_block() { + return Err(eyre::eyre!( + "Fork blocks do not match: old fork block {:?}, new fork block {:?}", + old.fork_block(), + new.fork_block() + )); + } + + let block = new + .blocks() + .get(block_number) + .ok_or_else(|| eyre::eyre!("Missing block {} in new chain", block_number))?; + let trie_updates = new.trie_updates_at(*block_number).ok_or_else(|| { + eyre::eyre!("Missing Trie updates for block {} in new chain", block_number) + })?; + let hashed_state = new.hashed_state_at(*block_number).ok_or_else(|| { + eyre::eyre!("Missing Hashed state for block {} in new chain", block_number) + })?; + + block_updates.push(( + block.block_with_parent(), + trie_updates.clone(), + hashed_state.clone(), + )); + } + + collector.unwind_and_store_block_updates(block_updates).await?; + + Ok(()) + } + + async fn handle_chain_reverted( + &self, + old: Arc>, + latest_stored: u64, + collector: &LiveTrieCollector<'_, Node::Evm, Node::Provider, Storage>, + ) -> eyre::Result<()> { + info!( + target: "optimism::exex", + old_block_number = old.tip().number(), + old_block_hash = ?old.tip().hash(), + "ChainReverted notification received", + ); + + if old.first().number() > latest_stored { + debug!( + target: "optimism::exex", + first_block_number = old.first().number(), + latest_stored = latest_stored, + "Fork block number is greater than latest stored, skipping", + ); + return Ok(()); + } + + collector.unwind_history(old.first().block_with_parent()).await?; + Ok(()) + } +} diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml new file mode 100644 index 00000000..d9e6aa8f --- /dev/null +++ b/crates/optimism/rpc/Cargo.toml @@ -0,0 +1,104 @@ +[package] +name = "reth-optimism-rpc" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Ethereum RPC implementation for optimism." + +[lints] +workspace = true + +[dependencies] +# reth +reth-basic-payload-builder.workspace = true +reth-evm.workspace = true +reth-primitives-traits = { workspace = true, features = ["op"] } +reth-storage-api.workspace = true +reth-rpc-eth-api = { workspace = true, features = ["op"] } +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-transaction-pool.workspace = true +reth-revm.workspace = true +reth-rpc.workspace = true +reth-rpc-api.workspace = true +reth-node-api.workspace = true +reth-node-builder.workspace = true +reth-chainspec.workspace = true +reth-chain-state.workspace = true +reth-rpc-engine-api.workspace = true +reth-payload-util.workspace = true +reth-provider.workspace = true + +# op-reth +reth-optimism-evm.workspace = true +reth-optimism-flashblocks.workspace = true +reth-optimism-payload-builder.workspace = true +reth-optimism-txpool.workspace = true +# TODO remove node-builder import +reth-optimism-primitives = { workspace = true, features = ["reth-codec", "serde-bincode-compat", "serde"] } +reth-optimism-forks.workspace = true +reth-optimism-trie.workspace = true + +# ethereum +alloy-eips.workspace = true +alloy-json-rpc.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true +alloy-rpc-client.workspace = true +alloy-rpc-types-eth.workspace = true +alloy-rpc-types-debug.workspace = true +alloy-serde.workspace = true +alloy-transport.workspace = true +alloy-transport-http.workspace = true +alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true +op-alloy-network.workspace = true +op-alloy-rpc-types.workspace = true +op-alloy-rpc-types-engine.workspace = true +op-alloy-rpc-jsonrpsee.workspace = true +op-alloy-consensus.workspace = true +revm.workspace = true +op-revm.workspace = true + +# async +serde.workspace = true +tokio.workspace = true +futures.workspace = true +tokio-stream.workspace = true +reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } +async-trait.workspace = true +tower.workspace = true + +# rpc +jsonrpsee-core.workspace = true +jsonrpsee-types.workspace = true +jsonrpsee.workspace = true +serde_json.workspace = true + +# misc +eyre.workspace = true +thiserror.workspace = true +tracing.workspace = true +derive_more = { workspace = true, features = ["constructor"] } + +# metrics +reth-metrics.workspace = true +metrics.workspace = true + +# enum +strum.workspace = true + +[dev-dependencies] +reth-optimism-chainspec.workspace = true +alloy-op-hardforks.workspace = true + +[features] +client = [ + "jsonrpsee/client", + "jsonrpsee/async-client", + "reth-rpc-eth-api/client", +] diff --git a/crates/optimism/rpc/src/debug.rs b/crates/optimism/rpc/src/debug.rs new file mode 100644 index 00000000..48dcb75d --- /dev/null +++ b/crates/optimism/rpc/src/debug.rs @@ -0,0 +1,337 @@ +//! Historical proofs RPC server implementation for `debug_` namespace. + +use crate::{ + metrics::{DebugApiExtMetrics, DebugApis}, + state::OpStateProviderFactory, +}; +use alloy_consensus::BlockHeader; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::B256; +use alloy_rlp::Encodable; +use alloy_rpc_types_debug::ExecutionWitness; +use async_trait::async_trait; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::RpcResult; +use jsonrpsee_types::error::ErrorObject; +use reth_basic_payload_builder::PayloadConfig; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_node_api::{BuildNextEnv, NodePrimitives, PayloadBuilderError}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_payload_builder::{ + builder::{OpBuilder, OpPayloadBuilderCtx}, + OpAttributes, OpPayloadPrimitives, +}; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore}; +use reth_optimism_txpool::OpPooledTransaction as OpPooledTx2; +use reth_payload_util::NoopPayloadTransactions; +use reth_primitives_traits::{SealedHeader, TxTy}; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, HeaderProvider, NodePrimitivesProvider, ProviderError, + ProviderResult, StateProviderFactory, +}; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord, State}; +use reth_rpc_api::eth::helpers::FullEthApi; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_tasks::TaskSpawner; +use serde::{Deserialize, Serialize}; +use std::{marker::PhantomData, sync::Arc}; +use tokio::sync::{oneshot, Semaphore}; + +/// Represents the current proofs sync status. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +pub struct ProofsSyncStatus { + /// The earliest block number for which proofs are available. + earliest: Option, + /// The latest block number for which proofs are available. + latest: Option, +} + +#[cfg_attr(not(test), rpc(server, namespace = "debug"))] +#[cfg_attr(test, rpc(server, client, namespace = "debug"))] +pub trait DebugApiOverride { + /// Executes a payload and returns the execution witness. + #[method(name = "executePayload")] + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attributes, + ) -> RpcResult; + + /// Returns the execution witness for a given block. + #[method(name = "executionWitness")] + async fn execution_witness(&self, block: BlockNumberOrTag) -> RpcResult; + + /// Returns the current proofs sync status. + #[method(name = "proofsSyncStatus")] + async fn proofs_sync_status(&self) -> RpcResult; +} + +#[derive(Debug)] +/// Overrides applied to the `debug_` namespace of the RPC API for the OP Proofs ExEx. +pub struct DebugApiExt { + inner: Arc>, +} + +impl DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + Storage: OpProofsStore + Clone + 'static, + Provider: BlockReaderIdExt + NodePrimitivesProvider, + EvmConfig: ConfigureEvm + 'static, +{ + /// Creates a new instance of the `DebugApiExt`. + pub fn new( + provider: Provider, + eth_api: Eth, + preimage_store: OpProofsStorage, + task_spawner: Box, + evm_config: EvmConfig, + ) -> Self { + Self { + inner: Arc::new(DebugApiExtInner::new( + provider, + eth_api, + preimage_store, + task_spawner, + evm_config, + )), + } + } +} + +#[derive(Debug)] +/// Overrides applied to the `debug_` namespace of the RPC API for historical proofs ExEx. +pub struct DebugApiExtInner { + provider: Provider, + eth_api: Eth, + storage: OpProofsStorage, + state_provider_factory: OpStateProviderFactory, + evm_config: EvmConfig, + task_spawner: Box, + semaphore: Semaphore, + _attrs: PhantomData, + metrics: DebugApiExtMetrics, +} + +impl DebugApiExtInner +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Provider: NodePrimitivesProvider, +{ + fn new( + provider: Provider, + eth_api: Eth, + storage: OpProofsStorage

, + task_spawner: Box, + evm_config: EvmConfig, + ) -> Self { + Self { + provider, + storage: storage.clone(), + state_provider_factory: OpStateProviderFactory::new(eth_api.clone(), storage), + eth_api, + evm_config, + task_spawner, + semaphore: Semaphore::new(3), + _attrs: PhantomData, + metrics: DebugApiExtMetrics::new(), + } + } +} + +impl DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Provider: BlockReaderIdExt + + NodePrimitivesProvider + + HeaderProvider

::BlockHeader>, +{ + fn parent_header( + &self, + parent_block_hash: B256, + ) -> ProviderResult> { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +#[async_trait] +impl DebugApiOverrideServer + for DebugApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, + Attrs: OpAttributes>, + N: OpPayloadPrimitives, + EvmConfig: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + > + 'static, + Provider: BlockReaderIdExt
+ + StateProviderFactory + + ChainSpecProvider + + NodePrimitivesProvider + + HeaderProvider
+ + Clone + + 'static, + op_alloy_consensus::OpPooledTransaction: + TryFrom<::_TX, Error: core::error::Error>, + ::_TX: From, +{ + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attrs::RpcPayloadAttributes, + ) -> RpcResult { + self.inner + .metrics + .record_operation_async(DebugApis::DebugExecutePayload, async { + let _permit = self.inner.semaphore.acquire().await; + + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + + let (tx, rx) = oneshot::channel(); + let this = self.inner.clone(); + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let result = async { + let parent_hash = parent_header.hash(); + let attributes = Attrs::try_new(parent_hash, attributes, 3) + .map_err(PayloadBuilderError::other)?; + + let config = + PayloadConfig { parent_header: Arc::new(parent_header), attributes }; + let ctx = OpPayloadBuilderCtx { + evm_config: this.evm_config.clone(), + chain_spec: this.provider.chain_spec(), + config, + cancel: Default::default(), + best_payload: Default::default(), + builder_config: Default::default(), + }; + + let state_provider = this + .state_provider_factory + .state_provider(Some(BlockId::Hash(parent_hash.into()))) + .await + .map_err(PayloadBuilderError::other)?; + + let builder = OpBuilder::new(|_| { + NoopPayloadTransactions::< + OpPooledTx2< + ::_TX, + op_alloy_consensus::OpPooledTransaction, + >, + >::default() + }); + + builder.witness(state_provider, &ctx).map_err(PayloadBuilderError::other) + }; + + let _ = tx.send(result.await); + })); + + rx.await + .map_err(|err| internal_rpc_err(err.to_string()))? + .map_err(|err| internal_rpc_err(err.to_string())) + }) + .await + } + + async fn execution_witness(&self, block_id: BlockNumberOrTag) -> RpcResult { + self.inner + .metrics + .record_operation_async(DebugApis::DebugExecutionWitness, async { + let _permit = self.inner.semaphore.acquire().await; + + let block = self + .inner + .eth_api + .recovered_block(block_id.into()) + .await? + .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; + + let this = self.inner.clone(); + let block_number = block.header().number(); + + let state_provider = this + .state_provider_factory + .state_provider(Some(BlockId::Number(block.parent_num_hash().number.into()))) + .await + .map_err(EthApiError::from)?; + let db = StateProviderDatabase::new(&state_provider); + let block_executor = this.eth_api.evm_config().executor(db); + + let mut witness_record = ExecutionWitnessRecord::default(); + + let _ = block_executor + .execute_with_state_closure(&block, |statedb: &State<_>| { + witness_record.record_executed_state(statedb); + }) + .map_err(EthApiError::from)?; + + let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } = + witness_record; + + let state = state_provider + .witness(Default::default(), hashed_state) + .map_err(EthApiError::from)?; + let mut exec_witness = + ExecutionWitness { state, codes, keys, ..Default::default() }; + + let smallest = match lowest_block_number { + Some(smallest) => smallest, + None => { + // Return only the parent header, if there were no calls to the + // BLOCKHASH opcode. + block_number.saturating_sub(1) + } + }; + + let range = smallest..block_number; + exec_witness.headers = self + .inner + .provider + .headers_range(range) + .map_err(EthApiError::from)? + .into_iter() + .map(|header| { + let mut serialized_header = Vec::new(); + header.encode(&mut serialized_header); + serialized_header.into() + }) + .collect(); + + Ok(exec_witness) + }) + .await + } + + async fn proofs_sync_status(&self) -> RpcResult { + let earliest = self + .inner + .storage + .get_earliest_block_number() + .await + .map_err(|err| internal_rpc_err(err.to_string()))?; + let latest = self + .inner + .storage + .get_latest_block_number() + .await + .map_err(|err| internal_rpc_err(err.to_string()))?; + + Ok(ProofsSyncStatus { + earliest: earliest.map(|(block_number, _)| block_number), + latest: latest.map(|(block_number, _)| block_number), + }) + } +} diff --git a/crates/optimism/rpc/src/engine.rs b/crates/optimism/rpc/src/engine.rs new file mode 100644 index 00000000..a31a64da --- /dev/null +++ b/crates/optimism/rpc/src/engine.rs @@ -0,0 +1,412 @@ +//! Implements the Optimism engine API RPC methods. + +use alloy_eips::eip7685::Requests; +use alloy_primitives::{BlockHash, B256, B64, U64}; +use alloy_rpc_types_engine::{ + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV3, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, +}; +use derive_more::Constructor; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::{server::RpcModule, RpcResult}; +use op_alloy_rpc_types_engine::{ + OpExecutionData, OpExecutionPayloadV4, ProtocolVersion, ProtocolVersionFormatV0, + SuperchainSignal, +}; +use reth_chainspec::EthereumHardforks; +use reth_node_api::{EngineApiValidator, EngineTypes}; +use reth_rpc_api::IntoEngineApiRpcModule; +use reth_rpc_engine_api::EngineApi; +use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; +use reth_transaction_pool::TransactionPool; +use tracing::{debug, info, trace}; + +/// The list of all supported Engine capabilities available over the engine endpoint. +/// +/// Spec: +pub const OP_ENGINE_CAPABILITIES: &[&str] = &[ + "engine_forkchoiceUpdatedV1", + "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", + "engine_getClientVersionV1", + "engine_getPayloadV2", + "engine_getPayloadV3", + "engine_getPayloadV4", + "engine_newPayloadV2", + "engine_newPayloadV3", + "engine_newPayloadV4", + "engine_getPayloadBodiesByHashV1", + "engine_getPayloadBodiesByRangeV1", + "engine_signalSuperchainV1", +]; + +/// OP Stack protocol version +/// See also: +pub const OP_STACK_SUPPORT: ProtocolVersion = ProtocolVersion::V0(ProtocolVersionFormatV0 { + build: B64::ZERO, + major: 9, + minor: 0, + patch: 0, + pre_release: 0, +}); + +/// Extension trait that gives access to Optimism engine API RPC methods. +/// +/// Note: +/// > The provider should use a JWT authentication layer. +/// +/// This follows the Optimism specs that can be found at: +/// +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "engine"), server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "engine", client_bounds(Engine::PayloadAttributes: jsonrpsee::core::Serialize + Clone), server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned)))] +pub trait OpEngineApi { + /// Sends the given payload to the execution layer client, as specified for the Shanghai fork. + /// + /// See also + /// + /// No modifications needed for OP compatibility. + #[method(name = "newPayloadV2")] + async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult; + + /// Sends the given payload to the execution layer client, as specified for the Cancun fork. + /// + /// See also + /// + /// OP modifications: + /// - expected versioned hashes MUST be an empty array: therefore the `versioned_hashes` + /// parameter is removed. + /// - parent beacon block root MUST be the parent beacon block root from the L1 origin block of + /// the L2 block. + /// - blob versioned hashes MUST be empty list. + #[method(name = "newPayloadV3")] + async fn new_payload_v3( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult; + + /// Sends the given payload to the execution layer client, as specified for the Prague fork. + /// + /// See also + /// + /// - blob versioned hashes MUST be empty list. + /// - execution layer requests MUST be empty list. + #[method(name = "newPayloadV4")] + async fn new_payload_v4( + &self, + payload: OpExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Requests, + ) -> RpcResult; + + /// See also + /// + /// This exists because it is used by op-node: + /// + /// Caution: This should not accept the `withdrawals` field in the payload attributes. + #[method(name = "forkchoiceUpdatedV1")] + async fn fork_choice_updated_v1( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult; + + /// Updates the execution layer client with the given fork choice, as specified for the Shanghai + /// fork. + /// + /// Caution: This should not accept the `parentBeaconBlockRoot` field in the payload attributes. + /// + /// See also + /// + /// OP modifications: + /// - The `payload_attributes` parameter is extended with the [`EngineTypes::PayloadAttributes`](EngineTypes) type as described in + #[method(name = "forkchoiceUpdatedV2")] + async fn fork_choice_updated_v2( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult; + + /// Updates the execution layer client with the given fork choice, as specified for the Cancun + /// fork. + /// + /// See also + /// + /// OP modifications: + /// - Must be called with an Ecotone payload + /// - Attributes must contain the parent beacon block root field + /// - The `payload_attributes` parameter is extended with the [`EngineTypes::PayloadAttributes`](EngineTypes) type as described in + #[method(name = "forkchoiceUpdatedV3")] + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult; + + /// Retrieves an execution payload from a previously started build process, as specified for the + /// Shanghai fork. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + /// + /// No modifications needed for OP compatibility. + #[method(name = "getPayloadV2")] + async fn get_payload_v2( + &self, + payload_id: PayloadId, + ) -> RpcResult; + + /// Retrieves an execution payload from a previously started build process, as specified for the + /// Cancun fork. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + /// + /// OP modifications: + /// - the response type is extended to [`EngineTypes::ExecutionPayloadEnvelopeV3`]. + #[method(name = "getPayloadV3")] + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult; + + /// Returns the most recent version of the payload that is available in the corresponding + /// payload build process at the time of receiving this call. + /// + /// See also + /// + /// Note: + /// > Provider software MAY stop the corresponding build process after serving this call. + /// + /// OP modifications: + /// - the response type is extended to [`EngineTypes::ExecutionPayloadEnvelopeV4`]. + #[method(name = "getPayloadV4")] + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult; + + /// Returns the execution payload bodies by the given hash. + /// + /// See also + #[method(name = "getPayloadBodiesByHashV1")] + async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec, + ) -> RpcResult; + + /// Returns the execution payload bodies by the range starting at `start`, containing `count` + /// blocks. + /// + /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus + /// layer p2p specification, meaning the input should be treated as untrusted or potentially + /// adversarial. + /// + /// Implementers should take care when acting on the input to this method, specifically + /// ensuring that the range is limited properly, and that the range boundaries are computed + /// correctly and without panics. + /// + /// See also + #[method(name = "getPayloadBodiesByRangeV1")] + async fn get_payload_bodies_by_range_v1( + &self, + start: U64, + count: U64, + ) -> RpcResult; + + /// Signals superchain information to the Engine. + /// Returns the latest supported OP-Stack protocol version of the execution engine. + /// See also + #[method(name = "engine_signalSuperchainV1")] + async fn signal_superchain_v1(&self, _signal: SuperchainSignal) -> RpcResult; + + /// Returns the execution client version information. + /// + /// Note: + /// > The `client_version` parameter identifies the consensus client. + /// + /// See also + #[method(name = "getClientVersionV1")] + async fn get_client_version_v1( + &self, + client_version: ClientVersionV1, + ) -> RpcResult>; + + /// Returns the list of Engine API methods supported by the execution layer client software. + /// + /// See also + #[method(name = "exchangeCapabilities")] + async fn exchange_capabilities(&self, capabilities: Vec) -> RpcResult>; +} + +/// The Engine API implementation that grants the Consensus layer access to data and +/// functions in the Execution layer that are crucial for the consensus process. +#[derive(Debug, Constructor)] +pub struct OpEngineApi { + inner: EngineApi, +} + +impl Clone + for OpEngineApi +where + PayloadT: EngineTypes, +{ + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + +#[async_trait::async_trait] +impl OpEngineApiServer + for OpEngineApi +where + Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static, + EngineT: EngineTypes, + Pool: TransactionPool + 'static, + Validator: EngineApiValidator, + ChainSpec: EthereumHardforks + Send + Sync + 'static, +{ + async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); + let payload = OpExecutionData::v2(payload); + Ok(self.inner.new_payload_v2_metered(payload).await?) + } + + async fn new_payload_v3( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); + let payload = OpExecutionData::v3(payload, versioned_hashes, parent_beacon_block_root); + + Ok(self.inner.new_payload_v3_metered(payload).await?) + } + + async fn new_payload_v4( + &self, + payload: OpExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Requests, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); + let payload = OpExecutionData::v4( + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ); + + Ok(self.inner.new_payload_v4_metered(payload).await?) + } + + async fn fork_choice_updated_v1( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult { + Ok(self.inner.fork_choice_updated_v1_metered(fork_choice_state, payload_attributes).await?) + } + + async fn fork_choice_updated_v2( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV2"); + Ok(self.inner.fork_choice_updated_v2_metered(fork_choice_state, payload_attributes).await?) + } + + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV3"); + Ok(self.inner.fork_choice_updated_v3_metered(fork_choice_state, payload_attributes).await?) + } + + async fn get_payload_v2( + &self, + payload_id: PayloadId, + ) -> RpcResult { + debug!(target: "rpc::engine", id = %payload_id, "Serving engine_getPayloadV2"); + Ok(self.inner.get_payload_v2_metered(payload_id).await?) + } + + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); + Ok(self.inner.get_payload_v3_metered(payload_id).await?) + } + + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadV4"); + Ok(self.inner.get_payload_v4_metered(payload_id).await?) + } + + async fn get_payload_bodies_by_hash_v1( + &self, + block_hashes: Vec, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1"); + Ok(self.inner.get_payload_bodies_by_hash_v1_metered(block_hashes).await?) + } + + async fn get_payload_bodies_by_range_v1( + &self, + start: U64, + count: U64, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); + Ok(self.inner.get_payload_bodies_by_range_v1_metered(start.to(), count.to()).await?) + } + + async fn signal_superchain_v1(&self, signal: SuperchainSignal) -> RpcResult { + trace!(target: "rpc::engine", "Serving signal_superchain_v1"); + info!( + target: "rpc::engine", + "Received superchain version signal local={:?} required={:?} recommended={:?}", + OP_STACK_SUPPORT, + signal.required, + signal.recommended + ); + Ok(OP_STACK_SUPPORT) + } + + async fn get_client_version_v1( + &self, + client: ClientVersionV1, + ) -> RpcResult> { + trace!(target: "rpc::engine", "Serving engine_getClientVersionV1"); + Ok(self.inner.get_client_version_v1(client)?) + } + + async fn exchange_capabilities(&self, _capabilities: Vec) -> RpcResult> { + Ok(self.inner.capabilities().list()) + } +} + +impl IntoEngineApiRpcModule + for OpEngineApi +where + EngineT: EngineTypes, + Self: OpEngineApiServer, +{ + fn into_rpc_module(self) -> RpcModule<()> { + self.into_rpc().remove_context() + } +} diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs new file mode 100644 index 00000000..b457ce9d --- /dev/null +++ b/crates/optimism/rpc/src/error.rs @@ -0,0 +1,235 @@ +//! RPC errors specific to OP. + +use alloy_json_rpc::ErrorPayload; +use alloy_primitives::Bytes; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; +use alloy_transport::{RpcError, TransportErrorKind}; +use jsonrpsee_types::error::{INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE}; +use op_revm::{OpHaltReason, OpTransactionError}; +use reth_evm::execute::ProviderError; +use reth_optimism_evm::OpBlockExecutionError; +use reth_rpc_eth_api::{AsEthApiError, EthTxEnvError, TransactionConversionError}; +use reth_rpc_eth_types::{ + error::api::{FromEvmHalt, FromRevert}, + EthApiError, +}; +use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; +use revm::context_interface::result::{EVMError, InvalidTransaction}; +use std::{convert::Infallible, fmt::Display}; + +/// Optimism specific errors, that extend [`EthApiError`]. +#[derive(Debug, thiserror::Error)] +pub enum OpEthApiError { + /// L1 ethereum error. + #[error(transparent)] + Eth(#[from] EthApiError), + /// EVM error originating from invalid optimism data. + #[error(transparent)] + Evm(#[from] OpBlockExecutionError), + /// Thrown when calculating L1 gas fee. + #[error("failed to calculate l1 gas fee")] + L1BlockFeeError, + /// Thrown when calculating L1 gas used + #[error("failed to calculate l1 gas used")] + L1BlockGasError, + /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). + #[error(transparent)] + InvalidTransaction(#[from] OpInvalidTransactionError), + /// Sequencer client error. + #[error(transparent)] + Sequencer(#[from] SequencerClientError), +} + +impl AsEthApiError for OpEthApiError { + fn as_err(&self) -> Option<&EthApiError> { + match self { + Self::Eth(err) => Some(err), + _ => None, + } + } +} + +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: OpEthApiError) -> Self { + match err { + OpEthApiError::Eth(err) => err.into(), + OpEthApiError::InvalidTransaction(err) => err.into(), + OpEthApiError::Evm(_) | + OpEthApiError::L1BlockFeeError | + OpEthApiError::L1BlockGasError => internal_rpc_err(err.to_string()), + OpEthApiError::Sequencer(err) => err.into(), + } + } +} + +/// Optimism specific invalid transaction errors +#[derive(thiserror::Error, Debug)] +pub enum OpInvalidTransactionError { + /// A deposit transaction was submitted as a system transaction post-regolith. + #[error("no system transactions allowed after regolith")] + DepositSystemTxPostRegolith, + /// A deposit transaction halted post-regolith + #[error("deposit transaction halted after regolith")] + HaltedDepositPostRegolith, + /// The encoded transaction was missing during evm execution. + #[error("missing enveloped transaction bytes")] + MissingEnvelopedTx, + /// Transaction conditional errors. + #[error(transparent)] + TxConditionalErr(#[from] TxConditionalErr), +} + +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: OpInvalidTransactionError) -> Self { + match err { + OpInvalidTransactionError::DepositSystemTxPostRegolith | + OpInvalidTransactionError::HaltedDepositPostRegolith | + OpInvalidTransactionError::MissingEnvelopedTx => { + rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) + } + OpInvalidTransactionError::TxConditionalErr(_) => err.into(), + } + } +} + +impl TryFrom for OpInvalidTransactionError { + type Error = InvalidTransaction; + + fn try_from(err: OpTransactionError) -> Result { + match err { + OpTransactionError::DepositSystemTxPostRegolith => { + Ok(Self::DepositSystemTxPostRegolith) + } + OpTransactionError::HaltedDepositPostRegolith => Ok(Self::HaltedDepositPostRegolith), + OpTransactionError::MissingEnvelopedTx => Ok(Self::MissingEnvelopedTx), + OpTransactionError::Base(err) => Err(err), + } + } +} + +/// Transaction conditional related errors. +#[derive(Debug, thiserror::Error)] +pub enum TxConditionalErr { + /// Transaction conditional cost exceeded maximum allowed + #[error("conditional cost exceeded maximum allowed")] + ConditionalCostExceeded, + /// Invalid conditional parameters + #[error("invalid conditional parameters")] + InvalidCondition, + /// Internal error + #[error("internal error: {0}")] + Internal(String), + /// Thrown if the conditional's storage value doesn't match the latest state's. + #[error("storage value mismatch")] + StorageValueMismatch, + /// Thrown when the conditional's storage root doesn't match the latest state's root. + #[error("storage root mismatch")] + StorageRootMismatch, +} + +impl TxConditionalErr { + /// Creates an internal error variant + pub fn internal(err: E) -> Self { + Self::Internal(err.to_string()) + } +} + +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: TxConditionalErr) -> Self { + let code = match &err { + TxConditionalErr::Internal(_) => INTERNAL_ERROR_CODE, + _ => INVALID_PARAMS_CODE, + }; + + jsonrpsee_types::error::ErrorObject::owned(code, err.to_string(), None::) + } +} + +/// Error type when interacting with the Sequencer +#[derive(Debug, thiserror::Error)] +pub enum SequencerClientError { + /// Wrapper around an [`RpcError`]. + #[error(transparent)] + HttpError(#[from] RpcError), +} + +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: SequencerClientError) -> Self { + match err { + SequencerClientError::HttpError(RpcError::ErrorResp(ErrorPayload { + code, + message, + data, + })) => jsonrpsee_types::error::ErrorObject::owned(code as i32, message, data), + err => jsonrpsee_types::error::ErrorObject::owned( + INTERNAL_ERROR_CODE, + err.to_string(), + None::, + ), + } + } +} + +impl From> for OpEthApiError +where + T: Into, +{ + fn from(error: EVMError) -> Self { + match error { + EVMError::Transaction(err) => match err.try_into() { + Ok(err) => Self::InvalidTransaction(err), + Err(err) => Self::Eth(EthApiError::InvalidTransaction(err.into())), + }, + EVMError::Database(err) => Self::Eth(err.into()), + EVMError::Header(err) => Self::Eth(err.into()), + EVMError::Custom(err) => Self::Eth(EthApiError::EvmCustom(err)), + } + } +} + +impl FromEvmHalt for OpEthApiError { + fn from_evm_halt(halt: OpHaltReason, gas_limit: u64) -> Self { + match halt { + OpHaltReason::FailedDeposit => { + OpInvalidTransactionError::HaltedDepositPostRegolith.into() + } + OpHaltReason::Base(halt) => EthApiError::from_evm_halt(halt, gas_limit).into(), + } + } +} + +impl FromRevert for OpEthApiError { + fn from_revert(output: Bytes) -> Self { + Self::Eth(EthApiError::from_revert(output)) + } +} + +impl From for OpEthApiError { + fn from(value: TransactionConversionError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for OpEthApiError { + fn from(value: EthTxEnvError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for OpEthApiError { + fn from(value: ProviderError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for OpEthApiError { + fn from(value: BlockError) -> Self { + Self::Eth(EthApiError::from(value)) + } +} + +impl From for OpEthApiError { + fn from(value: Infallible) -> Self { + match value {} + } +} diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs new file mode 100644 index 00000000..0efd9aea --- /dev/null +++ b/crates/optimism/rpc/src/eth/block.rs @@ -0,0 +1,23 @@ +//! Loads and formats OP block RPC response. + +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use reth_rpc_eth_api::{ + helpers::{EthBlocks, LoadBlock}, + FromEvmError, RpcConvert, +}; + +impl EthBlocks for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ +} + +impl LoadBlock for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ +} diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs new file mode 100644 index 00000000..db96bda8 --- /dev/null +++ b/crates/optimism/rpc/src/eth/call.rs @@ -0,0 +1,43 @@ +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use reth_rpc_eth_api::{ + helpers::{estimate::EstimateCall, Call, EthCall}, + FromEvmError, RpcConvert, +}; + +impl EthCall for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ +} + +impl EstimateCall for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ +} + +impl Call for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ + #[inline] + fn call_gas_limit(&self) -> u64 { + self.inner.eth_api.gas_cap() + } + + #[inline] + fn max_simulate_blocks(&self) -> u64 { + self.inner.eth_api.max_simulate_blocks() + } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.eth_api.evm_memory_limit() + } +} diff --git a/crates/optimism/rpc/src/eth/ext.rs b/crates/optimism/rpc/src/eth/ext.rs new file mode 100644 index 00000000..6c4e1bc7 --- /dev/null +++ b/crates/optimism/rpc/src/eth/ext.rs @@ -0,0 +1,200 @@ +//! Eth API extension. + +use crate::{error::TxConditionalErr, OpEthApiError, SequencerClient}; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::{Bytes, StorageKey, B256, U256}; +use alloy_rpc_types_eth::erc4337::{AccountStorage, TransactionConditional}; +use jsonrpsee_core::RpcResult; +use reth_optimism_txpool::conditional::MaybeConditionalTransaction; +use reth_rpc_eth_api::L2EthApiExtServer; +use reth_rpc_eth_types::utils::recover_raw_transaction; +use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; +use std::sync::Arc; +use tokio::sync::Semaphore; + +/// Maximum execution const for conditional transactions. +const MAX_CONDITIONAL_EXECUTION_COST: u64 = 5000; + +const MAX_CONCURRENT_CONDITIONAL_VALIDATIONS: usize = 3; + +/// OP-Reth `Eth` API extensions implementation. +/// +/// Separate from [`super::OpEthApi`] to allow to enable it conditionally, +#[derive(Clone, Debug)] +pub struct OpEthExtApi { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, + inner: Arc>, +} + +impl OpEthExtApi +where + Provider: BlockReaderIdExt + StateProviderFactory + Clone + 'static, +{ + /// Creates a new [`OpEthExtApi`]. + pub fn new(sequencer_client: Option, pool: Pool, provider: Provider) -> Self { + let inner = Arc::new(OpEthExtApiInner::new(pool, provider)); + Self { sequencer_client, inner } + } + + /// Returns the configured sequencer client, if any. + const fn sequencer_client(&self) -> Option<&SequencerClient> { + self.sequencer_client.as_ref() + } + + #[inline] + fn pool(&self) -> &Pool { + self.inner.pool() + } + + #[inline] + fn provider(&self) -> &Provider { + self.inner.provider() + } + + /// Validates the conditional's `known accounts` settings against the current state. + async fn validate_known_accounts( + &self, + condition: &TransactionConditional, + ) -> Result<(), TxConditionalErr> { + if condition.known_accounts.is_empty() { + return Ok(()); + } + + let _permit = + self.inner.validation_semaphore.acquire().await.map_err(TxConditionalErr::internal)?; + + let state = self + .provider() + .state_by_block_number_or_tag(BlockNumberOrTag::Latest) + .map_err(TxConditionalErr::internal)?; + + for (address, storage) in &condition.known_accounts { + match storage { + AccountStorage::Slots(slots) => { + for (slot, expected_value) in slots { + let current = state + .storage(*address, StorageKey::from(*slot)) + .map_err(TxConditionalErr::internal)? + .unwrap_or_default(); + + if current != U256::from_be_bytes(**expected_value) { + return Err(TxConditionalErr::StorageValueMismatch); + } + } + } + AccountStorage::RootHash(expected_root) => { + let actual_root = state + .storage_root(*address, Default::default()) + .map_err(TxConditionalErr::internal)?; + + if *expected_root != actual_root { + return Err(TxConditionalErr::StorageRootMismatch); + } + } + } + } + + Ok(()) + } +} + +#[async_trait::async_trait] +impl L2EthApiExtServer for OpEthExtApi +where + Provider: BlockReaderIdExt + StateProviderFactory + Clone + 'static, + Pool: TransactionPool + 'static, +{ + async fn send_raw_transaction_conditional( + &self, + bytes: Bytes, + condition: TransactionConditional, + ) -> RpcResult { + // calculate and validate cost + let cost = condition.cost(); + if cost > MAX_CONDITIONAL_EXECUTION_COST { + return Err(TxConditionalErr::ConditionalCostExceeded.into()); + } + + let recovered_tx = recover_raw_transaction(&bytes).map_err(|_| { + OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::FailedToDecodeSignedTransaction) + })?; + + let mut tx = ::Transaction::from_pooled(recovered_tx); + + // get current header + let header_not_found = || { + OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::HeaderNotFound( + alloy_eips::BlockId::Number(BlockNumberOrTag::Latest), + )) + }; + let header = self + .provider() + .latest_header() + .map_err(|_| header_not_found())? + .ok_or_else(header_not_found)?; + + // Ensure that the condition can still be met by checking the max bounds + if condition.has_exceeded_block_number(header.header().number()) || + condition.has_exceeded_timestamp(header.header().timestamp()) + { + return Err(TxConditionalErr::InvalidCondition.into()); + } + + // Validate Account + self.validate_known_accounts(&condition).await?; + + if let Some(sequencer) = self.sequencer_client() { + // If we have a sequencer client, forward the transaction + let _ = sequencer + .forward_raw_transaction_conditional(bytes.as_ref(), condition) + .await + .map_err(OpEthApiError::Sequencer)?; + Ok(*tx.hash()) + } else { + // otherwise, add to pool with the appended conditional + tx.set_conditional(condition); + let AddedTransactionOutcome { hash, .. } = + self.pool().add_transaction(TransactionOrigin::Private, tx).await.map_err(|e| { + OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::PoolError(e.into())) + })?; + + Ok(hash) + } + } +} + +#[derive(Debug)] +struct OpEthExtApiInner { + /// The transaction pool of the node. + pool: Pool, + /// The provider type used to interact with the node. + provider: Provider, + /// The semaphore used to limit the number of concurrent conditional validations. + validation_semaphore: Semaphore, +} + +impl OpEthExtApiInner { + fn new(pool: Pool, provider: Provider) -> Self { + Self { + pool, + provider, + validation_semaphore: Semaphore::new(MAX_CONCURRENT_CONDITIONAL_VALIDATIONS), + } + } + + #[inline] + const fn pool(&self) -> &Pool { + &self.pool + } + + #[inline] + const fn provider(&self) -> &Provider { + &self.provider + } +} diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs new file mode 100644 index 00000000..3c51afca --- /dev/null +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -0,0 +1,571 @@ +//! OP-Reth `eth_` endpoint implementation. + +pub mod ext; +pub mod proofs; +pub mod receipt; +pub mod transaction; + +mod block; +mod call; +mod pending_block; + +use crate::{ + eth::{receipt::OpReceiptConverter, transaction::OpTxInfoMapper}, + OpEthApiError, SequencerClient, +}; +use alloy_consensus::BlockHeader; +use alloy_primitives::{B256, U256}; +use eyre::WrapErr; +use op_alloy_network::Optimism; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; +use reqwest::Url; +use reth_chainspec::{EthereumHardforks, Hardforks}; +use reth_evm::ConfigureEvm; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; +use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; +use reth_optimism_flashblocks::{ + FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, + FlashBlockConsensusClient, FlashBlockRx, FlashBlockService, FlashblocksListeners, + PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, +}; +use reth_rpc::eth::core::EthApiInner; +use reth_rpc_eth_api::{ + helpers::{ + pending_block::BuildPendingEnv, EthApiSpec, EthFees, EthState, LoadFee, LoadPendingBlock, + LoadState, SpawnBlocking, Trace, + }, + EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, + RpcNodeCoreExt, RpcTypes, +}; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock}; +use reth_storage_api::{BlockReaderIdExt, ProviderHeader}; +use reth_tasks::{ + pool::{BlockingTaskGuard, BlockingTaskPool}, + TaskSpawner, +}; +use std::{ + fmt::{self, Formatter}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; +use tokio::{sync::watch, time}; +use tracing::info; + +/// Maximum duration to wait for a fresh flashblock when one is being built. +const MAX_FLASHBLOCK_WAIT_DURATION: Duration = Duration::from_millis(50); + +/// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. +pub type EthApiNodeBackend = EthApiInner; + +/// OP-Reth `Eth` API implementation. +/// +/// This type provides the functionality for handling `eth_` related requests. +/// +/// This wraps a default `Eth` implementation, and provides additional functionality where the +/// optimism spec deviates from the default (ethereum) spec, e.g. transaction forwarding to the +/// sequencer, receipts, additional RPC fields for transaction receipts. +/// +/// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented +/// all the `Eth` helper traits and prerequisite traits. +pub struct OpEthApi { + /// Gateway to node's core components. + inner: Arc>, +} + +impl Clone for OpEthApi { + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + +impl OpEthApi { + /// Creates a new `OpEthApi`. + pub fn new( + eth_api: EthApiNodeBackend, + sequencer_client: Option, + min_suggested_priority_fee: U256, + flashblocks: Option>, + ) -> Self { + let inner = Arc::new(OpEthApiInner { + eth_api, + sequencer_client, + min_suggested_priority_fee, + flashblocks, + }); + Self { inner } + } + + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() + } + + /// Returns a reference to the [`EthApiNodeBackend`]. + pub fn eth_api(&self) -> &EthApiNodeBackend { + self.inner.eth_api() + } + /// Returns the configured sequencer client, if any. + pub fn sequencer_client(&self) -> Option<&SequencerClient> { + self.inner.sequencer_client() + } + + /// Returns a cloned pending block receiver, if any. + pub fn pending_block_rx(&self) -> Option> { + self.inner.flashblocks.as_ref().map(|f| f.pending_block_rx.clone()) + } + + /// Returns a new subscription to received flashblocks. + pub fn subscribe_received_flashblocks(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.received_flashblocks.subscribe()) + } + + /// Returns a new subscription to flashblock sequences. + pub fn subscribe_flashblock_sequence(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.flashblocks_sequence.subscribe()) + } + + /// Returns information about the flashblock currently being built, if any. + fn flashblock_build_info(&self) -> Option { + self.inner.flashblocks.as_ref().and_then(|f| *f.in_progress_rx.borrow()) + } + + /// Extracts pending block if it matches the expected parent hash. + fn extract_matching_block( + &self, + block: Option<&PendingFlashBlock>, + parent_hash: B256, + ) -> Option> { + block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) + } + + /// Awaits a fresh flashblock if one is being built, otherwise returns current. + async fn flashblock( + &self, + parent_hash: B256, + ) -> eyre::Result>> { + let Some(rx) = self.inner.flashblocks.as_ref().map(|f| &f.pending_block_rx) else { + return Ok(None) + }; + + // Check if a flashblock is being built + if let Some(build_info) = self.flashblock_build_info() { + let current_index = rx.borrow().as_ref().map(|b| b.last_flashblock_index); + + // Check if this is the first flashblock or the next consecutive index + let is_next_index = current_index.is_none_or(|idx| build_info.index == idx + 1); + + // Wait only for relevant flashblocks: matching parent and next in sequence + if build_info.parent_hash == parent_hash && is_next_index { + let mut rx_clone = rx.clone(); + // Wait up to MAX_FLASHBLOCK_WAIT_DURATION for a new flashblock to arrive + let _ = time::timeout(MAX_FLASHBLOCK_WAIT_DURATION, rx_clone.changed()).await; + } + } + + // Fall back to current block + Ok(self.extract_matching_block(rx.borrow().as_ref(), parent_hash)) + } + + /// Returns a [`PendingBlock`] that is built out of flashblocks. + /// + /// If flashblocks receiver is not set, then it always returns `None`. + /// + /// It may wait up to 50ms for a fresh flashblock if one is currently being built. + pub async fn pending_flashblock(&self) -> eyre::Result>> + where + OpEthApiError: FromEvmError, + Rpc: RpcConvert, + { + let Some(latest) = self.provider().latest_header()? else { + return Ok(None); + }; + + self.flashblock(latest.hash()).await + } +} + +impl EthApiTypes for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ + type Error = OpEthApiError; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; + + fn converter(&self) -> &Self::RpcConvert { + self.inner.eth_api.converter() + } +} + +impl RpcNodeCore for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ + type Primitives = N::Primitives; + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = N::Evm; + type Network = N::Network; + + #[inline] + fn pool(&self) -> &Self::Pool { + self.inner.eth_api.pool() + } + + #[inline] + fn evm_config(&self) -> &Self::Evm { + self.inner.eth_api.evm_config() + } + + #[inline] + fn network(&self) -> &Self::Network { + self.inner.eth_api.network() + } + + #[inline] + fn provider(&self) -> &Self::Provider { + self.inner.eth_api.provider() + } +} + +impl RpcNodeCoreExt for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.eth_api.cache() + } +} + +impl EthApiSpec for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ + #[inline] + fn starting_block(&self) -> U256 { + self.inner.eth_api.starting_block() + } +} + +impl SpawnBlocking for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ + #[inline] + fn io_task_spawner(&self) -> impl TaskSpawner { + self.inner.eth_api.task_spawner() + } + + #[inline] + fn tracing_task_pool(&self) -> &BlockingTaskPool { + self.inner.eth_api.blocking_task_pool() + } + + #[inline] + fn tracing_task_guard(&self) -> &BlockingTaskGuard { + self.inner.eth_api.blocking_task_guard() + } + + #[inline] + fn blocking_io_task_guard(&self) -> &Arc { + self.inner.eth_api.blocking_io_request_semaphore() + } +} + +impl LoadFee for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ + #[inline] + fn gas_oracle(&self) -> &GasPriceOracle { + self.inner.eth_api.gas_oracle() + } + + #[inline] + fn fee_history_cache(&self) -> &FeeHistoryCache> { + self.inner.eth_api.fee_history_cache() + } + + async fn suggested_priority_fee(&self) -> Result { + self.inner + .eth_api + .gas_oracle() + .op_suggest_tip_cap(self.inner.min_suggested_priority_fee) + .await + .map_err(Into::into) + } +} + +impl LoadState for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, + Self: LoadPendingBlock, +{ +} + +impl EthState for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, + Self: LoadPendingBlock, +{ + #[inline] + fn max_proof_window(&self) -> u64 { + self.inner.eth_api.eth_proof_window() + } +} + +impl EthFees for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ +} + +impl Trace for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ +} + +impl fmt::Debug for OpEthApi { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OpEthApi").finish_non_exhaustive() + } +} + +/// Container type `OpEthApi` +pub struct OpEthApiInner { + /// Gateway to node's core components. + eth_api: EthApiNodeBackend, + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, + /// Minimum priority fee enforced by OP-specific logic. + /// + /// See also + min_suggested_priority_fee: U256, + /// Flashblocks listeners. + /// + /// If set, provides receivers for pending blocks, flashblock sequences, and build status. + flashblocks: Option>, +} + +impl fmt::Debug for OpEthApiInner { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OpEthApiInner").finish() + } +} + +impl OpEthApiInner { + /// Returns a reference to the [`EthApiNodeBackend`]. + const fn eth_api(&self) -> &EthApiNodeBackend { + &self.eth_api + } + + /// Returns the configured sequencer client, if any. + const fn sequencer_client(&self) -> Option<&SequencerClient> { + self.sequencer_client.as_ref() + } +} + +/// Converter for OP RPC types. +pub type OpRpcConvert = RpcConverter< + NetworkT, + ::Evm, + OpReceiptConverter<::Provider>, + (), + OpTxInfoMapper<::Provider>, +>; + +/// Builds [`OpEthApi`] for Optimism. +#[derive(Debug)] +pub struct OpEthApiBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_url: Option, + /// Headers to use for the sequencer client requests. + sequencer_headers: Vec, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, + /// A URL pointing to a secure websocket connection (wss) that streams out [flashblocks]. + /// + /// [flashblocks]: reth_optimism_flashblocks + flashblocks_url: Option, + /// Enable flashblock consensus client to drive the chain forward. + /// + /// When enabled, flashblock sequences are submitted to the engine API via + /// `newPayload` and `forkchoiceUpdated` calls, advancing the canonical chain state. + /// Requires `flashblocks_url` to be set. + flashblock_consensus: bool, + /// Marker for network types. + _nt: PhantomData, +} + +impl Default for OpEthApiBuilder { + fn default() -> Self { + Self { + sequencer_url: None, + sequencer_headers: Vec::new(), + min_suggested_priority_fee: 1_000_000, + flashblocks_url: None, + flashblock_consensus: false, + _nt: PhantomData, + } + } +} + +impl OpEthApiBuilder { + /// Creates a [`OpEthApiBuilder`] instance from core components. + pub const fn new() -> Self { + Self { + sequencer_url: None, + sequencer_headers: Vec::new(), + min_suggested_priority_fee: 1_000_000, + flashblocks_url: None, + flashblock_consensus: false, + _nt: PhantomData, + } + } + + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_url: Option) -> Self { + self.sequencer_url = sequencer_url; + self + } + + /// With headers to use for the sequencer client requests. + pub fn with_sequencer_headers(mut self, sequencer_headers: Vec) -> Self { + self.sequencer_headers = sequencer_headers; + self + } + + /// With minimum suggested priority fee (tip). + pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self { + self.min_suggested_priority_fee = min; + self + } + + /// With a subscription to flashblocks secure websocket connection. + pub fn with_flashblocks(mut self, flashblocks_url: Option) -> Self { + self.flashblocks_url = flashblocks_url; + self + } + + /// With flashblock consensus client enabled to drive chain forward + pub const fn with_flashblock_consensus(mut self, flashblock_consensus: bool) -> Self { + self.flashblock_consensus = flashblock_consensus; + self + } +} + +impl EthApiBuilder for OpEthApiBuilder +where + N: FullNodeComponents< + Evm: ConfigureEvm< + NextBlockEnvCtx: BuildPendingEnv> + + From + + Unpin, + >, + Types: NodeTypes< + ChainSpec: Hardforks + EthereumHardforks, + Payload: reth_node_api::PayloadTypes< + ExecutionData: for<'a> TryFrom< + &'a FlashBlockCompleteSequence, + Error: std::fmt::Display, + >, + >, + >, + >, + NetworkT: RpcTypes, + OpRpcConvert: RpcConvert, + OpEthApi>: + FullEthApiServer, +{ + type EthApi = OpEthApi>; + + async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { + let Self { + sequencer_url, + sequencer_headers, + min_suggested_priority_fee, + flashblocks_url, + flashblock_consensus, + .. + } = self; + let rpc_converter = + RpcConverter::new(OpReceiptConverter::new(ctx.components.provider().clone())) + .with_mapper(OpTxInfoMapper::new(ctx.components.provider().clone())); + + let sequencer_client = if let Some(url) = sequencer_url { + Some( + SequencerClient::new_with_headers(&url, sequencer_headers) + .await + .wrap_err_with(|| format!("Failed to init sequencer client with: {url}"))?, + ) + } else { + None + }; + + let flashblocks = if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); + + let (tx, pending_rx) = watch::channel(None); + let stream = WsFlashBlockStream::new(ws_url); + let service = FlashBlockService::new( + stream, + ctx.components.evm_config().clone(), + ctx.components.provider().clone(), + ctx.components.task_executor().clone(), + // enable state root calculation if flashblock_consensus is enabled. + flashblock_consensus, + ); + + let flashblocks_sequence = service.block_sequence_broadcaster().clone(); + let received_flashblocks = service.flashblocks_broadcaster().clone(); + let in_progress_rx = service.subscribe_in_progress(); + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); + + if flashblock_consensus { + info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); + let flashblock_client = FlashBlockConsensusClient::new( + ctx.engine_handle.clone(), + flashblocks_sequence.subscribe(), + )?; + ctx.components.task_executor().spawn(Box::pin(flashblock_client.run())); + } + + Some(FlashblocksListeners::new( + pending_rx, + flashblocks_sequence, + in_progress_rx, + received_flashblocks, + )) + } else { + None + }; + + let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); + + Ok(OpEthApi::new( + eth_api, + sequencer_client, + U256::from(min_suggested_priority_fee), + flashblocks, + )) + } +} diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs new file mode 100644 index 00000000..bf351d7d --- /dev/null +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -0,0 +1,79 @@ +//! Loads OP pending block for a RPC response. + +use crate::{OpEthApi, OpEthApiError}; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; +use reth_chain_state::BlockState; +use reth_rpc_eth_api::{ + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, + FromEvmError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, +}; +use reth_rpc_eth_types::{ + block::BlockAndReceipts, builder::config::PendingBlockKind, error::FromEthApiError, + EthApiError, PendingBlock, +}; +use reth_storage_api::{BlockReaderIdExt, StateProviderBox, StateProviderFactory}; + +impl LoadPendingBlock for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ + #[inline] + fn pending_block(&self) -> &tokio::sync::Mutex>> { + self.inner.eth_api.pending_block() + } + + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.eth_api.pending_env_builder() + } + + #[inline] + fn pending_block_kind(&self) -> PendingBlockKind { + self.inner.eth_api.pending_block_kind() + } + + /// Returns a [`StateProviderBox`] on a mem-pool built pending block overlaying latest. + async fn local_pending_state(&self) -> Result, Self::Error> + where + Self: SpawnBlocking, + { + let Ok(Some(pending_block)) = self.pending_flashblock().await else { + return Ok(None); + }; + + let latest_historical = self + .provider() + .history_by_block_hash(pending_block.block().parent_hash()) + .map_err(Self::Error::from_eth_err)?; + + let state = BlockState::from(pending_block); + + Ok(Some(Box::new(state.state_provider(latest_historical)) as StateProviderBox)) + } + + /// Returns the locally built pending block + async fn local_pending_block( + &self, + ) -> Result>, Self::Error> { + if let Ok(Some(pending)) = self.pending_flashblock().await { + return Ok(Some(pending.into_block_and_receipts())); + } + + // See: + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let latest = self + .cache() + .get_block_and_receipts(latest.hash()) + .await + .map_err(Self::Error::from_eth_err)? + .map(|(block, receipts)| BlockAndReceipts { block, receipts }); + Ok(latest) + } +} diff --git a/crates/optimism/rpc/src/eth/proofs.rs b/crates/optimism/rpc/src/eth/proofs.rs new file mode 100644 index 00000000..07522c96 --- /dev/null +++ b/crates/optimism/rpc/src/eth/proofs.rs @@ -0,0 +1,95 @@ +//! Historical proofs RPC server implementation. + +use crate::{metrics::EthApiExtMetrics, state::OpStateProviderFactory}; +use alloy_eips::BlockId; +use alloy_primitives::Address; +use alloy_rpc_types_eth::EIP1186AccountProofResponse; +use alloy_serde::JsonStorageKey; +use async_trait::async_trait; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee_core::RpcResult; +use jsonrpsee_types::error::ErrorObject; +use reth_optimism_trie::{OpProofsStorage, OpProofsStore}; +use reth_provider::StateProofProvider; +use reth_rpc_api::eth::helpers::FullEthApi; +use std::time::Instant; + +#[cfg_attr(not(test), rpc(server, namespace = "eth"))] +#[cfg_attr(test, rpc(server, client, namespace = "eth"))] +pub trait EthApiOverride { + /// Returns the account and storage values of the specified account including the Merkle-proof. + /// This call can be used to verify that the data you are pulling from is not tampered with. + #[method(name = "getProof")] + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult; +} + +#[derive(Debug)] +/// Overrides applied to the `eth_` namespace of the RPC API for historical proofs ExEx. +pub struct EthApiExt { + state_provider_factory: OpStateProviderFactory, + metrics: EthApiExtMetrics, +} + +impl EthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, +{ + /// Creates a new instance of the `EthApiExt`. + pub fn new(eth_api: Eth, preimage_store: OpProofsStorage

) -> Self { + let metrics = EthApiExtMetrics::default(); + Self { + state_provider_factory: OpStateProviderFactory::new(eth_api, preimage_store), + metrics, + } + } +} + +#[async_trait] +impl EthApiOverrideServer for EthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'static, +{ + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult { + let start = Instant::now(); + self.metrics.get_proof_requests.increment(1); + + let storage_keys = keys.iter().map(|key| key.as_b256()).collect::>(); + + let result = async { + let proof = self + .state_provider_factory + .state_provider(block_number) + .await + .map_err(Into::into)? + .proof(Default::default(), address, &storage_keys) + .map_err(Into::into)?; + + Ok(proof.into_eip1186_response(keys)) + } + .await; + + match &result { + Ok(_) => { + self.metrics.get_proof_latency.record(start.elapsed().as_secs_f64()); + self.metrics.get_proof_successful_responses.increment(1); + } + Err(_) => self.metrics.get_proof_failures.increment(1), + } + + result + } +} diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs new file mode 100644 index 00000000..e86aa615 --- /dev/null +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -0,0 +1,723 @@ +//! Loads and formats OP receipt RPC response. + +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use alloy_consensus::{BlockHeader, Receipt, ReceiptWithBloom, TxReceipt}; +use alloy_eips::eip2718::Encodable2718; +use alloy_rpc_types_eth::{Log, TransactionReceipt}; +use op_alloy_consensus::{OpReceipt, OpTransaction}; +use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; +use op_revm::estimate_tx_compressed_size; +use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_node_api::NodePrimitives; +use reth_optimism_evm::RethL1BlockInfo; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::SealedBlock; +use reth_rpc_eth_api::{ + helpers::LoadReceipt, + transaction::{ConvertReceiptInput, ReceiptConverter}, + RpcConvert, +}; +use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; +use reth_storage_api::BlockReader; +use std::fmt::Debug; + +impl LoadReceipt for OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ +} + +/// Converter for OP receipts. +#[derive(Debug, Clone)] +pub struct OpReceiptConverter { + provider: Provider, +} + +impl OpReceiptConverter { + /// Creates a new [`OpReceiptConverter`]. + pub const fn new(provider: Provider) -> Self { + Self { provider } + } +} + +impl ReceiptConverter for OpReceiptConverter +where + N: NodePrimitives, + Provider: + BlockReader + ChainSpecProvider + Debug + 'static, +{ + type RpcReceipt = OpTransactionReceipt; + type Error = OpEthApiError; + + fn convert_receipts( + &self, + inputs: Vec>, + ) -> Result, Self::Error> { + let Some(block_number) = inputs.first().map(|r| r.meta.block_number) else { + return Ok(Vec::new()); + }; + + let block = self + .provider + .block_by_number(block_number)? + .ok_or(EthApiError::HeaderNotFound(block_number.into()))?; + + self.convert_receipts_with_block(inputs, &SealedBlock::new_unhashed(block)) + } + + fn convert_receipts_with_block( + &self, + inputs: Vec>, + block: &SealedBlock, + ) -> Result, Self::Error> { + let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { + Ok(l1_block_info) => l1_block_info, + Err(err) => { + let genesis_number = + self.provider.chain_spec().genesis().number.unwrap_or_default(); + // If it is the genesis block (i.e. block number is 0), there is no L1 info, so + // we return an empty l1_block_info. + if block.header().number() == genesis_number { + return Ok(vec![]); + } + return Err(err.into()); + } + }; + + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + // We must clear this cache as different L2 transactions can have different + // L1 costs. A potential improvement here is to only clear the cache if the + // new transaction input has changed, since otherwise the L1 cost wouldn't. + l1_block_info.clear_tx_l1_cost(); + + receipts.push( + OpReceiptBuilder::new(&self.provider.chain_spec(), input, &mut l1_block_info)? + .build(), + ); + } + + Ok(receipts) + } +} + +/// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a +/// deposit transaction. +#[derive(Debug, Clone)] +pub struct OpReceiptFieldsBuilder { + /// Block number. + pub block_number: u64, + /// Block timestamp. + pub block_timestamp: u64, + /// The L1 fee for transaction. + pub l1_fee: Option, + /// L1 gas used by transaction. + pub l1_data_gas: Option, + /// L1 fee scalar. + pub l1_fee_scalar: Option, + /* ---------------------------------------- Bedrock ---------------------------------------- */ + /// The base fee of the L1 origin block. + pub l1_base_fee: Option, + /* --------------------------------------- Regolith ---------------------------------------- */ + /// Deposit nonce, if this is a deposit transaction. + pub deposit_nonce: Option, + /* ---------------------------------------- Canyon ----------------------------------------- */ + /// Deposit receipt version, if this is a deposit transaction. + pub deposit_receipt_version: Option, + /* ---------------------------------------- Ecotone ---------------------------------------- */ + /// The current L1 fee scalar. + pub l1_base_fee_scalar: Option, + /// The current L1 blob base fee. + pub l1_blob_base_fee: Option, + /// The current L1 blob base fee scalar. + pub l1_blob_base_fee_scalar: Option, + /* ---------------------------------------- Isthmus ---------------------------------------- */ + /// The current operator fee scalar. + pub operator_fee_scalar: Option, + /// The current L1 blob base fee scalar. + pub operator_fee_constant: Option, + /* ---------------------------------------- Jovian ----------------------------------------- */ + /// The current DA footprint gas scalar. + pub da_footprint_gas_scalar: Option, +} + +impl OpReceiptFieldsBuilder { + /// Returns a new builder. + pub const fn new(block_timestamp: u64, block_number: u64) -> Self { + Self { + block_number, + block_timestamp, + l1_fee: None, + l1_data_gas: None, + l1_fee_scalar: None, + l1_base_fee: None, + deposit_nonce: None, + deposit_receipt_version: None, + l1_base_fee_scalar: None, + l1_blob_base_fee: None, + l1_blob_base_fee_scalar: None, + operator_fee_scalar: None, + operator_fee_constant: None, + da_footprint_gas_scalar: None, + } + } + + /// Applies [`L1BlockInfo`](op_revm::L1BlockInfo). + pub fn l1_block_info( + mut self, + chain_spec: &impl OpHardforks, + tx: &T, + l1_block_info: &mut op_revm::L1BlockInfo, + ) -> Result { + let raw_tx = tx.encoded_2718(); + let timestamp = self.block_timestamp; + + self.l1_fee = Some( + l1_block_info + .l1_tx_data_fee(chain_spec, timestamp, &raw_tx, tx.is_deposit()) + .map_err(|_| OpEthApiError::L1BlockFeeError)? + .saturating_to(), + ); + + self.l1_data_gas = Some( + l1_block_info + .l1_data_gas(chain_spec, timestamp, &raw_tx) + .map_err(|_| OpEthApiError::L1BlockGasError)? + .saturating_add(l1_block_info.l1_fee_overhead.unwrap_or_default()) + .saturating_to(), + ); + + self.l1_fee_scalar = (!chain_spec.is_ecotone_active_at_timestamp(timestamp)) + .then_some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); + + self.l1_base_fee = Some(l1_block_info.l1_base_fee.saturating_to()); + self.l1_base_fee_scalar = Some(l1_block_info.l1_base_fee_scalar.saturating_to()); + self.l1_blob_base_fee = l1_block_info.l1_blob_base_fee.map(|fee| fee.saturating_to()); + self.l1_blob_base_fee_scalar = + l1_block_info.l1_blob_base_fee_scalar.map(|scalar| scalar.saturating_to()); + + // If the operator fee params are both set to 0, we don't add them to the receipt. + let operator_fee_scalar_has_non_zero_value: bool = + l1_block_info.operator_fee_scalar.is_some_and(|scalar| !scalar.is_zero()); + + let operator_fee_constant_has_non_zero_value = + l1_block_info.operator_fee_constant.is_some_and(|constant| !constant.is_zero()); + + if operator_fee_scalar_has_non_zero_value || operator_fee_constant_has_non_zero_value { + self.operator_fee_scalar = + l1_block_info.operator_fee_scalar.map(|scalar| scalar.saturating_to()); + self.operator_fee_constant = + l1_block_info.operator_fee_constant.map(|constant| constant.saturating_to()); + } + + self.da_footprint_gas_scalar = l1_block_info.da_footprint_gas_scalar; + + Ok(self) + } + + /// Applies deposit transaction metadata: deposit nonce. + pub const fn deposit_nonce(mut self, nonce: Option) -> Self { + self.deposit_nonce = nonce; + self + } + + /// Applies deposit transaction metadata: deposit receipt version. + pub const fn deposit_version(mut self, version: Option) -> Self { + self.deposit_receipt_version = version; + self + } + + /// Builds the [`OpTransactionReceiptFields`] object. + pub const fn build(self) -> OpTransactionReceiptFields { + let Self { + block_number: _, // used to compute other fields + block_timestamp: _, // used to compute other fields + l1_fee, + l1_data_gas: l1_gas_used, + l1_fee_scalar, + l1_base_fee: l1_gas_price, + deposit_nonce, + deposit_receipt_version, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + operator_fee_scalar, + operator_fee_constant, + da_footprint_gas_scalar, + } = self; + + OpTransactionReceiptFields { + l1_block_info: L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + operator_fee_scalar, + operator_fee_constant, + da_footprint_gas_scalar, + }, + deposit_nonce, + deposit_receipt_version, + } + } +} + +/// Builds an [`OpTransactionReceipt`]. +#[derive(Debug)] +pub struct OpReceiptBuilder { + /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. + pub core_receipt: TransactionReceipt>>, + /// Additional OP receipt fields. + pub op_receipt_fields: OpTransactionReceiptFields, +} + +impl OpReceiptBuilder { + /// Returns a new builder. + pub fn new( + chain_spec: &impl OpHardforks, + input: ConvertReceiptInput<'_, N>, + l1_block_info: &mut op_revm::L1BlockInfo, + ) -> Result + where + N: NodePrimitives, + { + let timestamp = input.meta.timestamp; + let block_number = input.meta.block_number; + let tx_signed = *input.tx.inner(); + let mut core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { + let map_logs = move |receipt: alloy_consensus::Receipt| { + let Receipt { status, cumulative_gas_used, logs } = receipt; + let logs = Log::collect_for_receipt(next_log_index, meta, logs); + Receipt { status, cumulative_gas_used, logs } + }; + let mapped_receipt: OpReceipt = match receipt { + OpReceipt::Legacy(receipt) => OpReceipt::Legacy(map_logs(receipt)), + OpReceipt::Eip2930(receipt) => OpReceipt::Eip2930(map_logs(receipt)), + OpReceipt::Eip1559(receipt) => OpReceipt::Eip1559(map_logs(receipt)), + OpReceipt::Eip7702(receipt) => OpReceipt::Eip7702(map_logs(receipt)), + OpReceipt::Deposit(receipt) => OpReceipt::Deposit(receipt.map_inner(map_logs)), + }; + mapped_receipt.into_with_bloom() + }); + + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + // We're computing the jovian blob gas used before building the receipt since the inputs get + // consumed by the `build_receipt` function. + chain_spec.is_jovian_active_at_timestamp(timestamp).then(|| { + // Estimate the size of the transaction in bytes and multiply by the DA + // footprint gas scalar. + // Jovian specs: `https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#da-footprint-block-limit` + let da_size = estimate_tx_compressed_size(tx_signed.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(l1_block_info.da_footprint_gas_scalar.unwrap_or_default().into()); + + core_receipt.blob_gas_used = Some(da_size); + }); + + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) + .l1_block_info(chain_spec, tx_signed, l1_block_info)? + .build(); + + Ok(Self { core_receipt, op_receipt_fields }) + } + + /// Builds [`OpTransactionReceipt`] by combining core (l1) receipt fields and additional OP + /// receipt fields. + pub fn build(self) -> OpTransactionReceipt { + let Self { core_receipt: inner, op_receipt_fields } = self; + + let OpTransactionReceiptFields { l1_block_info, .. } = op_receipt_fields; + + OpTransactionReceipt { inner, l1_block_info } + } +} + +#[cfg(test)] +mod test { + use super::*; + use alloy_consensus::{transaction::TransactionMeta, Block, BlockBody, Eip658Value, TxEip7702}; + use alloy_op_hardforks::{ + OpChainHardforks, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + }; + use alloy_primitives::{hex, Address, Bytes, Signature, U256}; + use op_alloy_consensus::OpTypedTransaction; + use op_alloy_network::eip2718::Decodable2718; + use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; + use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; + use reth_primitives_traits::Recovered; + + /// OP Mainnet transaction at index 0 in block 124665056. + /// + /// + const TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056: [u8; 251] = hex!( + "7ef8f8a0683079df94aa5b9cf86687d739a60a9b4f0835e520ec4d664e2e415dca17a6df94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985" + ); + + /// OP Mainnet transaction at index 1 in block 124665056. + /// + /// + const TX_1_OP_MAINNET_BLOCK_124665056: [u8; 1176] = hex!( + "02f904940a8303fba78401d6d2798401db2b6d830493e0943e6f4f7866654c18f536170780344aa8772950b680b904246a761202000000000000000000000000087000a300de7200382b55d40045000000e5d60e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000022482ad56cb0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000120000000000000000000000000dc6ff44d5d932cbd77b52e5612ba0529dc6226f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044095ea7b300000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c0000000000000000000000000000000000000000000000049b9ca9a6943400000000000000000000000000000000000000000000000000000000000000000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000024b6b55f250000000000000000000000000000000000000000000000049b9ca9a694340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000415ec214a3950bea839a7e6fbb0ba1540ac2076acd50820e2d5ef83d0902cdffb24a47aff7de5190290769c4f0a9c6fabf63012986a0d590b1b571547a8c7050ea1b00000000000000000000000000000000000000000000000000000000000000c080a06db770e6e25a617fe9652f0958bd9bd6e49281a53036906386ed39ec48eadf63a07f47cf51a4a40b4494cf26efc686709a9b03939e20ee27e59682f5faa536667e" + ); + + /// Timestamp of OP mainnet block 124665056. + /// + /// + const BLOCK_124665056_TIMESTAMP: u64 = 1724928889; + + /// L1 block info for transaction at index 1 in block 124665056. + /// + /// + const TX_META_TX_1_OP_MAINNET_BLOCK_124665056: OpTransactionReceiptFields = + OpTransactionReceiptFields { + l1_block_info: L1BlockInfo { + l1_gas_price: Some(1055991687), // since bedrock l1 base fee + l1_gas_used: Some(4471), + l1_fee: Some(24681034813), + l1_fee_scalar: None, + l1_base_fee_scalar: Some(5227), + l1_blob_base_fee: Some(1), + l1_blob_base_fee_scalar: Some(1014213), + operator_fee_scalar: None, + operator_fee_constant: None, + da_footprint_gas_scalar: None, + }, + deposit_nonce: None, + deposit_receipt_version: None, + }; + + #[test] + fn op_receipt_fields_from_block_and_tx() { + // rig + let tx_0 = OpTransactionSigned::decode_2718( + &mut TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056.as_slice(), + ) + .unwrap(); + + let tx_1 = + OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) + .unwrap(); + + let block: Block = Block { + body: BlockBody { transactions: [tx_0, tx_1.clone()].to_vec(), ..Default::default() }, + ..Default::default() + }; + + let mut l1_block_info = + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); + + // test + assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); + + let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) + .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + operator_fee_scalar, + operator_fee_constant, + da_footprint_gas_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!( + l1_gas_price, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_gas_price, + "incorrect l1 base fee (former gas price)" + ); + assert_eq!( + l1_gas_used, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_gas_used, + "incorrect l1 gas used" + ); + assert_eq!( + l1_fee, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_fee, + "incorrect l1 fee" + ); + assert_eq!( + l1_fee_scalar, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_fee_scalar, + "incorrect l1 fee scalar" + ); + assert_eq!( + l1_base_fee_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_base_fee_scalar, + "incorrect l1 base fee scalar" + ); + assert_eq!( + l1_blob_base_fee, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_blob_base_fee, + "incorrect l1 blob base fee" + ); + assert_eq!( + l1_blob_base_fee_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_blob_base_fee_scalar, + "incorrect l1 blob base fee scalar" + ); + assert_eq!( + operator_fee_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_scalar, + "incorrect operator fee scalar" + ); + assert_eq!( + operator_fee_constant, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_constant, + "incorrect operator fee constant" + ); + assert_eq!( + da_footprint_gas_scalar, + TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.da_footprint_gas_scalar, + "incorrect da footprint gas scalar" + ); + } + + #[test] + fn op_non_zero_operator_fee_params_included_in_receipt() { + let tx_1 = + OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) + .unwrap(); + + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::from(2)), + ..Default::default() + }; + + let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) + .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { operator_fee_scalar, operator_fee_constant, .. } = + receipt_meta.l1_block_info; + + assert_eq!(operator_fee_scalar, Some(0), "incorrect operator fee scalar"); + assert_eq!(operator_fee_constant, Some(2), "incorrect operator fee constant"); + } + + #[test] + fn op_zero_operator_fee_params_not_included_in_receipt() { + let tx_1 = + OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) + .unwrap(); + + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::ZERO), + ..Default::default() + }; + + let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) + .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { operator_fee_scalar, operator_fee_constant, .. } = + receipt_meta.l1_block_info; + + assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar"); + assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); + } + + // + #[test] + fn base_receipt_gas_fields() { + // https://basescan.org/tx/0x510fd4c47d78ba9f97c91b0f2ace954d5384c169c9545a77a373cf3ef8254e6e + let system = hex!( + "7ef8f8a0389e292420bcbf9330741f72074e39562a09ff5a00fd22e4e9eee7e34b81bca494deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000008dd00101c120000000000000004000000006721035b00000000014189960000000000000000000000000000000000000000000000000000000349b4dcdc000000000000000000000000000000000000000000000000000000004ef9325cc5991ce750960f636ca2ffbb6e209bb3ba91412f21dd78c14ff154d1930f1f9a0000000000000000000000005050f69a9786f081509234f1a7f4684b5e5b76c9" + ); + let tx_0 = OpTransactionSigned::decode_2718(&mut &system[..]).unwrap(); + + let block: alloy_consensus::Block = Block { + body: BlockBody { transactions: vec![tx_0], ..Default::default() }, + ..Default::default() + }; + let mut l1_block_info = + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); + + // https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94 + let tx = hex!( + "02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd" + ); + let tx_1 = OpTransactionSigned::decode_2718(&mut &tx[..]).unwrap(); + + let receipt_meta = OpReceiptFieldsBuilder::new(1730216981, 21713817) + .l1_block_info(&*BASE_MAINNET, &tx_1, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + operator_fee_scalar, + operator_fee_constant, + da_footprint_gas_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); + assert_eq!(l1_gas_used, Some(1600), "incorrect l1 gas used"); + assert_eq!(l1_fee, Some(191150293412), "incorrect l1 fee"); + assert!(l1_fee_scalar.is_none(), "incorrect l1 fee scalar"); + assert_eq!(l1_base_fee_scalar, Some(2269), "incorrect l1 base fee scalar"); + assert_eq!(l1_blob_base_fee, Some(1324954204), "incorrect l1 blob base fee"); + assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); + assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar"); + assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); + assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); + } + + #[test] + fn da_footprint_gas_scalar_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 10; + + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let receipt = OpReceiptFieldsBuilder::new(OP_MAINNET_JOVIAN_TIMESTAMP, u64::MAX) + .l1_block_info(&op_hardforks, &tx, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + assert_eq!(receipt.l1_block_info.da_footprint_gas_scalar, Some(DA_FOOTPRINT_GAS_SCALAR)); + } + + #[test] + fn blob_gas_used_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + let expected_blob_gas_used = estimate_tx_compressed_size(tx.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(DA_FOOTPRINT_GAS_SCALAR.into()); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, Some(expected_blob_gas_used)); + } + + #[test] + fn blob_gas_used_not_included_in_receipt_post_isthmus() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_ISTHMUS_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, None); + } +} diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs new file mode 100644 index 00000000..5dee6e14 --- /dev/null +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -0,0 +1,282 @@ +//! Loads and formats OP transaction RPC response. + +use crate::{OpEthApi, OpEthApiError, SequencerClient}; +use alloy_primitives::{Bytes, B256}; +use alloy_rpc_types_eth::TransactionInfo; +use futures::StreamExt; +use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; +use reth_chain_state::CanonStateSubscriptions; +use reth_optimism_primitives::DepositReceipt; +use reth_primitives_traits::{ + BlockBody, Recovered, SignedTransaction, SignerRecoverable, WithEncoded, +}; +use reth_rpc_eth_api::{ + helpers::{spec::SignersForRpc, EthTransactions, LoadReceipt, LoadTransaction, SpawnBlocking}, + try_into_op_tx_info, EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, + RpcReceipt, TxInfoMapper, +}; +use reth_rpc_eth_types::{EthApiError, TransactionSource}; +use reth_storage_api::{errors::ProviderError, ProviderTx, ReceiptProvider, TransactionsProvider}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, +}; +use std::{ + fmt::{Debug, Formatter}, + future::Future, + time::Duration, +}; +use tokio_stream::wrappers::WatchStream; + +impl EthTransactions for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ + fn signers(&self) -> &SignersForRpc { + self.inner.eth_api.signers() + } + + fn send_raw_transaction_sync_timeout(&self) -> Duration { + self.inner.eth_api.send_raw_transaction_sync_timeout() + } + + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); + + // broadcast raw transaction to subscribers if there is any. + self.eth_api().broadcast_raw_transaction(tx.clone()); + + let pool_transaction = ::Transaction::from_pooled(recovered); + + // On optimism, transactions are forwarded directly to the sequencer to be included in + // blocks that it builds. + if let Some(client) = self.raw_tx_forwarder().as_ref() { + tracing::debug!(target: "rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer"); + let hash = client.forward_raw_transaction(&tx).await.inspect_err(|err| { + tracing::debug!(target: "rpc::eth", %err, hash=% *pool_transaction.hash(), "failed to forward raw transaction"); + })?; + + // Retain tx in local tx pool after forwarding, for local RPC usage. + let _ = self.inner.eth_api.add_pool_transaction(pool_transaction).await.inspect_err(|err| { + tracing::warn!(target: "rpc::eth", %err, %hash, "successfully sent tx to sequencer, but failed to persist in local tx pool"); + }); + + return Ok(hash) + } + + // submit the transaction to the pool with a `Local` origin + let AddedTransactionOutcome { hash, .. } = self + .pool() + .add_transaction(TransactionOrigin::Local, pool_transaction) + .await + .map_err(Self::Error::from_eth_err)?; + + Ok(hash) + } + + /// Decodes and recovers the transaction and submits it to the pool. + /// + /// And awaits the receipt, checking both canonical blocks and flashblocks for faster + /// confirmation. + fn send_raw_transaction_sync( + &self, + tx: Bytes, + ) -> impl Future, Self::Error>> + Send { + let this = self.clone(); + let timeout_duration = self.send_raw_transaction_sync_timeout(); + async move { + let mut canonical_stream = this.provider().canonical_state_stream(); + let hash = EthTransactions::send_raw_transaction(&this, tx).await?; + let mut flashblock_stream = this.pending_block_rx().map(WatchStream::new); + + tokio::time::timeout(timeout_duration, async { + loop { + tokio::select! { + biased; + // check if the tx was preconfirmed in a new flashblock + flashblock = async { + if let Some(stream) = &mut flashblock_stream { + stream.next().await + } else { + futures::future::pending().await + } + } => { + if let Some(flashblock) = flashblock.flatten() { + // if flashblocks are supported, attempt to find id from the pending block + if let Some(receipt) = flashblock + .find_and_convert_transaction_receipt(hash, this.converter()) + { + return receipt; + } + } + } + // Listen for regular canonical block updates for inclusion + canonical_notification = canonical_stream.next() => { + if let Some(notification) = canonical_notification { + let chain = notification.committed(); + for block in chain.blocks_iter() { + if block.body().contains_transaction(&hash) + && let Some(receipt) = this.transaction_receipt(hash).await? { + return Ok(receipt); + } + } + } else { + // Canonical stream ended + break; + } + } + } + } + Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout { + hash, + duration: timeout_duration, + })) + }) + .await + .unwrap_or_else(|_elapsed| { + Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout { + hash, + duration: timeout_duration, + })) + }) + } + } + + /// Returns the transaction receipt for the given hash. + /// + /// With flashblocks, we should also lookup the pending block for the transaction + /// because this is considered confirmed/mined. + fn transaction_receipt( + &self, + hash: B256, + ) -> impl Future>, Self::Error>> + Send + { + let this = self.clone(); + async move { + // first attempt to fetch the mined transaction receipt data + let tx_receipt = this.load_transaction_and_receipt(hash).await?; + + if tx_receipt.is_none() { + // if flashblocks are supported, attempt to find id from the pending block + if let Ok(Some(pending_block)) = this.pending_flashblock().await && + let Some(Ok(receipt)) = pending_block + .find_and_convert_transaction_receipt(hash, this.converter()) + { + return Ok(Some(receipt)); + } + } + let Some((tx, meta, receipt)) = tx_receipt else { return Ok(None) }; + self.build_transaction_receipt(tx, meta, receipt).await.map(Some) + } + } +} + +impl LoadTransaction for OpEthApi +where + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, +{ + async fn transaction_by_hash( + &self, + hash: B256, + ) -> Result>>, Self::Error> { + // 1. Try to find the transaction on disk (historical blocks) + if let Some((tx, meta)) = self + .spawn_blocking_io(move |this| { + this.provider() + .transaction_by_hash_with_meta(hash) + .map_err(Self::Error::from_eth_err) + }) + .await? + { + let transaction = tx + .try_into_recovered_unchecked() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + return Ok(Some(TransactionSource::Block { + transaction, + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + })); + } + + // 2. check flashblocks (sequencer preconfirmations) + if let Ok(Some(pending_block)) = self.pending_flashblock().await && + let Some(indexed_tx) = pending_block.block().find_indexed(hash) + { + let meta = indexed_tx.meta(); + return Ok(Some(TransactionSource::Block { + transaction: indexed_tx.recovered_tx().cloned(), + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + })); + } + + // 3. check local pool + if let Some(tx) = self.pool().get(&hash).map(|tx| tx.transaction.clone_into_consensus()) { + return Ok(Some(TransactionSource::Pool(tx))); + } + + Ok(None) + } +} + +impl OpEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ + /// Returns the [`SequencerClient`] if one is set. + pub fn raw_tx_forwarder(&self) -> Option { + self.inner.sequencer_client.clone() + } +} + +/// Optimism implementation of [`TxInfoMapper`]. +/// +/// For deposits, receipt is fetched to extract `deposit_nonce` and `deposit_receipt_version`. +/// Otherwise, it works like regular Ethereum implementation, i.e. uses [`TransactionInfo`]. +pub struct OpTxInfoMapper { + provider: Provider, +} + +impl Clone for OpTxInfoMapper { + fn clone(&self) -> Self { + Self { provider: self.provider.clone() } + } +} + +impl Debug for OpTxInfoMapper { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OpTxInfoMapper").finish() + } +} + +impl OpTxInfoMapper { + /// Creates [`OpTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. + pub const fn new(provider: Provider) -> Self { + Self { provider } + } +} + +impl TxInfoMapper for OpTxInfoMapper +where + T: OpTransaction + SignedTransaction, + Provider: ReceiptProvider, +{ + type Out = OpTransactionInfo; + type Err = ProviderError; + + fn try_map(&self, tx: &T, tx_info: TransactionInfo) -> Result { + try_into_op_tx_info(&self.provider, tx, tx_info) + } +} diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs new file mode 100644 index 00000000..6037da4f --- /dev/null +++ b/crates/optimism/rpc/src/historical.rs @@ -0,0 +1,482 @@ +//! Client support for optimism historical RPC requests. + +use crate::sequencer::Error; +use alloy_eips::BlockId; +use alloy_json_rpc::{RpcRecv, RpcSend}; +use alloy_primitives::{BlockNumber, B256}; +use alloy_rpc_client::RpcClient; +use jsonrpsee::BatchResponseBuilder; +use jsonrpsee_core::{ + middleware::{Batch, BatchEntry, Notification, RpcServiceT}, + server::MethodResponse, +}; +use jsonrpsee_types::{Params, Request}; +use reth_storage_api::{BlockReaderIdExt, TransactionsProvider}; +use std::{future::Future, sync::Arc}; +use tracing::{debug, warn}; + +/// A client that can be used to forward RPC requests for historical data to an endpoint. +/// +/// This is intended to be used for OP-Mainnet pre-bedrock data, allowing users to query historical +/// state. +#[derive(Debug, Clone)] +pub struct HistoricalRpcClient { + inner: Arc, +} + +impl HistoricalRpcClient { + /// Constructs a new historical RPC client with the given endpoint URL. + pub fn new(endpoint: &str) -> Result { + let client = RpcClient::new_http( + endpoint.parse::().map_err(|err| Error::InvalidUrl(err.to_string()))?, + ); + + Ok(Self { + inner: Arc::new(HistoricalRpcClientInner { + historical_endpoint: endpoint.to_string(), + client, + }), + }) + } + + /// Returns a reference to the underlying RPC client + fn client(&self) -> &RpcClient { + &self.inner.client + } + + /// Forwards a JSON-RPC request to the historical endpoint + pub async fn request( + &self, + method: &str, + params: Params, + ) -> Result { + let resp = + self.client().request::(method.to_string(), params).await.inspect_err( + |err| { + warn!( + target: "rpc::historical", + %err, + "HTTP request to historical endpoint failed" + ); + }, + )?; + + Ok(resp) + } + + /// Returns the configured historical endpoint URL + pub fn endpoint(&self) -> &str { + &self.inner.historical_endpoint + } +} + +#[derive(Debug)] +struct HistoricalRpcClientInner { + historical_endpoint: String, + client: RpcClient, +} + +/// A layer that provides historical RPC forwarding functionality for a given service. +#[derive(Debug, Clone)] +pub struct HistoricalRpc

{ + inner: Arc>, +} + +impl

HistoricalRpc

{ + /// Constructs a new historical RPC layer with the given provider, client and bedrock block + /// number. + pub fn new(provider: P, client: HistoricalRpcClient, bedrock_block: BlockNumber) -> Self { + let inner = Arc::new(HistoricalRpcInner { provider, client, bedrock_block }); + + Self { inner } + } +} + +impl tower::Layer for HistoricalRpc

{ + type Service = HistoricalRpcService; + + fn layer(&self, inner: S) -> Self::Service { + HistoricalRpcService::new(inner, self.inner.clone()) + } +} + +/// A service that intercepts RPC calls and forwards pre-bedrock historical requests +/// to a dedicated endpoint. +/// +/// This checks if the request is for a pre-bedrock block and forwards it via the configured +/// historical RPC client. +#[derive(Debug, Clone)] +pub struct HistoricalRpcService { + /// The inner service that handles regular RPC requests + inner: S, + /// The context required to forward historical requests. + historical: Arc>, +} + +impl HistoricalRpcService { + /// Constructs a new historical RPC service with the given inner service, historical client, + /// provider, and bedrock block number. + const fn new(inner: S, historical: Arc>) -> Self { + Self { inner, historical } + } +} + +impl RpcServiceT for HistoricalRpcService +where + S: RpcServiceT< + MethodResponse = MethodResponse, + BatchResponse = MethodResponse, + NotificationResponse = MethodResponse, + > + Send + + Sync + + Clone + + 'static, + P: BlockReaderIdExt + TransactionsProvider + Send + Sync + Clone + 'static, +{ + type MethodResponse = S::MethodResponse; + type NotificationResponse = S::NotificationResponse; + type BatchResponse = S::BatchResponse; + + fn call<'a>(&self, req: Request<'a>) -> impl Future + Send + 'a { + let inner_service = self.inner.clone(); + let historical = self.historical.clone(); + + Box::pin(async move { + // Check if request should be forwarded to historical endpoint + if let Some(response) = historical.maybe_forward_request(&req).await { + return response + } + + // Handle the request with the inner service + inner_service.call(req).await + }) + } + + fn batch<'a>( + &self, + mut req: Batch<'a>, + ) -> impl Future + Send + 'a { + let this = self.clone(); + let historical = self.historical.clone(); + + async move { + let mut needs_forwarding = false; + for entry in req.iter_mut() { + if let Ok(BatchEntry::Call(call)) = entry && + historical.should_forward_request(call) + { + needs_forwarding = true; + break; + } + } + + if !needs_forwarding { + // no call needs to be forwarded and we can simply perform this batch request + return this.inner.batch(req).await; + } + + // the entire response is checked above so we can assume that these don't exceed + let mut batch_rp = BatchResponseBuilder::new_with_limit(usize::MAX); + let mut got_notification = false; + + for batch_entry in req { + match batch_entry { + Ok(BatchEntry::Call(req)) => { + let rp = this.call(req).await; + if let Err(err) = batch_rp.append(rp) { + return err; + } + } + Ok(BatchEntry::Notification(n)) => { + got_notification = true; + this.notification(n).await; + } + Err(err) => { + let (err, id) = err.into_parts(); + let rp = MethodResponse::error(id, err); + if let Err(err) = batch_rp.append(rp) { + return err; + } + } + } + } + + // If the batch is empty and we got a notification, we return an empty response. + if batch_rp.is_empty() && got_notification { + MethodResponse::notification() + } + // An empty batch is regarded as an invalid request here. + else { + MethodResponse::from_batch(batch_rp.finish()) + } + } + } + + fn notification<'a>( + &self, + n: Notification<'a>, + ) -> impl Future + Send + 'a { + self.inner.notification(n) + } +} + +#[derive(Debug)] +struct HistoricalRpcInner

{ + /// Provider used to determine if a block is pre-bedrock + provider: P, + /// Client used to forward historical requests + client: HistoricalRpcClient, + /// Bedrock transition block number + bedrock_block: BlockNumber, +} + +impl

HistoricalRpcInner

+where + P: BlockReaderIdExt + TransactionsProvider + Send + Sync + Clone, +{ + /// Checks if a request should be forwarded to the historical endpoint (synchronous check). + fn should_forward_request(&self, req: &Request<'_>) -> bool { + match req.method_name() { + "debug_traceTransaction" | + "eth_getTransactionByHash" | + "eth_getTransactionReceipt" | + "eth_getRawTransactionByHash" => self.should_forward_transaction(req), + method => self.should_forward_block_request(method, req), + } + } + + /// Checks if a request should be forwarded to the historical endpoint and returns + /// the response if it was forwarded. + async fn maybe_forward_request(&self, req: &Request<'_>) -> Option { + if self.should_forward_request(req) { + return self.forward_to_historical(req).await + } + None + } + + /// Determines if a transaction request should be forwarded + fn should_forward_transaction(&self, req: &Request<'_>) -> bool { + parse_transaction_hash_from_params(&req.params()) + .ok() + .map(|tx_hash| { + // Check if we can find the transaction locally and get its metadata + match self.provider.transaction_by_hash_with_meta(tx_hash) { + Ok(Some((_, meta))) => { + // Transaction found - check if it's pre-bedrock based on block number + let is_pre_bedrock = meta.block_number < self.bedrock_block; + if is_pre_bedrock { + debug!( + target: "rpc::historical", + ?tx_hash, + block_num = meta.block_number, + bedrock = self.bedrock_block, + "transaction found in pre-bedrock block, forwarding to historical endpoint" + ); + } + is_pre_bedrock + } + _ => { + // Transaction not found locally, optimistically forward to historical endpoint + debug!( + target: "rpc::historical", + ?tx_hash, + "transaction not found locally, forwarding to historical endpoint" + ); + true + } + } + }) + .unwrap_or(false) + } + + /// Determines if a block-based request should be forwarded + fn should_forward_block_request(&self, method: &str, req: &Request<'_>) -> bool { + let maybe_block_id = extract_block_id_for_method(method, &req.params()); + + maybe_block_id.map(|block_id| self.is_pre_bedrock(block_id)).unwrap_or(false) + } + + /// Checks if a block ID refers to a pre-bedrock block + fn is_pre_bedrock(&self, block_id: BlockId) -> bool { + match self.provider.block_number_for_id(block_id) { + Ok(Some(num)) => { + debug!( + target: "rpc::historical", + ?block_id, + block_num=num, + bedrock=self.bedrock_block, + "found block number" + ); + num < self.bedrock_block + } + Ok(None) if block_id.is_hash() => { + debug!( + target: "rpc::historical", + ?block_id, + "block hash not found locally, assuming pre-bedrock" + ); + true + } + _ => { + debug!( + target: "rpc::historical", + ?block_id, + "could not determine block number; not forwarding" + ); + false + } + } + } + + /// Forwards a request to the historical endpoint + async fn forward_to_historical(&self, req: &Request<'_>) -> Option { + debug!( + target: "rpc::historical", + method = %req.method_name(), + params=?req.params(), + "forwarding request to historical endpoint" + ); + + let params = req.params(); + let params_str = params.as_str().unwrap_or("[]"); + + let params = serde_json::from_str::(params_str).ok()?; + + let raw = + self.client.request::<_, serde_json::Value>(req.method_name(), params).await.ok()?; + + let payload = jsonrpsee_types::ResponsePayload::success(raw).into(); + Some(MethodResponse::response(req.id.clone(), payload, usize::MAX)) + } +} + +/// Error type for parameter parsing +#[derive(Debug)] +enum ParseError { + InvalidFormat, + MissingParameter, +} + +/// Extracts the block ID from request parameters based on the method name +fn extract_block_id_for_method(method: &str, params: &Params<'_>) -> Option { + match method { + "eth_getBlockByNumber" | + "eth_getBlockByHash" | + "debug_traceBlockByNumber" | + "debug_traceBlockByHash" => parse_block_id_from_params(params, 0), + "eth_getBalance" | + "eth_getCode" | + "eth_getTransactionCount" | + "eth_call" | + "eth_estimateGas" | + "eth_createAccessList" | + "debug_traceCall" => parse_block_id_from_params(params, 1), + "eth_getStorageAt" | "eth_getProof" => parse_block_id_from_params(params, 2), + _ => None, + } +} + +/// Parses a `BlockId` from the given parameters at the specified position. +fn parse_block_id_from_params(params: &Params<'_>, position: usize) -> Option { + let values: Vec = params.parse().ok()?; + let val = values.into_iter().nth(position)?; + serde_json::from_value::(val).ok() +} + +/// Parses a transaction hash from the first parameter. +fn parse_transaction_hash_from_params(params: &Params<'_>) -> Result { + let values: Vec = params.parse().map_err(|_| ParseError::InvalidFormat)?; + let val = values.into_iter().next().ok_or(ParseError::MissingParameter)?; + serde_json::from_value::(val).map_err(|_| ParseError::InvalidFormat) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::{BlockId, BlockNumberOrTag}; + use jsonrpsee::types::Params; + use jsonrpsee_core::middleware::layer::Either; + use reth_node_builder::rpc::RethRpcMiddleware; + use reth_storage_api::noop::NoopProvider; + use tower::layer::util::Identity; + + #[test] + fn check_historical_rpc() { + fn assert_historical_rpc() {} + assert_historical_rpc::>(); + assert_historical_rpc::, Identity>>(); + } + + /// Tests that various valid id types can be parsed from the first parameter. + #[test] + fn parses_block_id_from_first_param() { + // Test with a block number + let params_num = Params::new(Some(r#"["0x64"]"#)); // 100 + assert_eq!( + parse_block_id_from_params(¶ms_num, 0).unwrap(), + BlockId::Number(BlockNumberOrTag::Number(100)) + ); + + // Test with the "earliest" tag + let params_tag = Params::new(Some(r#"["earliest"]"#)); + assert_eq!( + parse_block_id_from_params(¶ms_tag, 0).unwrap(), + BlockId::Number(BlockNumberOrTag::Earliest) + ); + } + + /// Tests that the function correctly parses from a position other than 0. + #[test] + fn parses_block_id_from_second_param() { + let params = + Params::new(Some(r#"["0x0000000000000000000000000000000000000000", "latest"]"#)); + let result = parse_block_id_from_params(¶ms, 1).unwrap(); + assert_eq!(result, BlockId::Number(BlockNumberOrTag::Latest)); + } + + /// Tests that the function returns nothing if the parameter is missing or empty. + #[test] + fn defaults_to_latest_when_param_is_missing() { + let params = Params::new(Some(r#"["0x0000000000000000000000000000000000000000"]"#)); + let result = parse_block_id_from_params(¶ms, 1); + assert!(result.is_none()); + } + + /// Tests that the function doesn't parse anything if the parameter is not a valid block id. + #[test] + fn returns_error_for_invalid_input() { + let params = Params::new(Some(r#"[true]"#)); + let result = parse_block_id_from_params(¶ms, 0); + assert!(result.is_none()); + } + + /// Tests that transaction hashes can be parsed from params. + #[test] + fn parses_transaction_hash_from_params() { + let hash = "0xdbdfa0f88b2cf815fdc1621bd20c2bd2b0eed4f0c56c9be2602957b5a60ec702"; + let params_str = format!(r#"["{hash}"]"#); + let params = Params::new(Some(¶ms_str)); + let result = parse_transaction_hash_from_params(¶ms); + assert!(result.is_ok()); + let parsed_hash = result.unwrap(); + assert_eq!(format!("{parsed_hash:?}"), hash); + } + + /// Tests that invalid transaction hash returns error. + #[test] + fn returns_error_for_invalid_tx_hash() { + let params = Params::new(Some(r#"["not_a_hash"]"#)); + let result = parse_transaction_hash_from_params(¶ms); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), ParseError::InvalidFormat)); + } + + /// Tests that missing parameter returns appropriate error. + #[test] + fn returns_error_for_missing_parameter() { + let params = Params::new(Some(r#"[]"#)); + let result = parse_transaction_hash_from_params(¶ms); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), ParseError::MissingParameter)); + } +} diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs new file mode 100644 index 00000000..08233550 --- /dev/null +++ b/crates/optimism/rpc/src/lib.rs @@ -0,0 +1,28 @@ +//! OP-Reth RPC support. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +pub mod debug; +pub mod engine; +pub mod error; +pub mod eth; +pub mod historical; +pub mod metrics; +pub mod miner; +pub mod sequencer; +pub mod state; +pub mod witness; + +#[cfg(feature = "client")] +pub use engine::OpEngineApiClient; +pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; +pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; +pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; +pub use metrics::{EthApiExtMetrics, SequencerMetrics}; +pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/metrics.rs b/crates/optimism/rpc/src/metrics.rs new file mode 100644 index 00000000..17ac94f6 --- /dev/null +++ b/crates/optimism/rpc/src/metrics.rs @@ -0,0 +1,135 @@ +//! RPC metrics unique for OP-stack. + +use alloy_primitives::map::HashMap; +use core::time::Duration; +use metrics::{Counter, Histogram}; +use reth_metrics::Metrics; +use std::time::Instant; +use strum::{EnumCount, EnumIter, IntoEnumIterator}; + +/// Optimism sequencer metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.sequencer")] +pub struct SequencerMetrics { + /// How long it takes to forward a transaction to the sequencer + pub(crate) sequencer_forward_latency: Histogram, +} + +impl SequencerMetrics { + /// Records the duration it took to forward a transaction + #[inline] + pub fn record_forward_latency(&self, duration: Duration) { + self.sequencer_forward_latency.record(duration.as_secs_f64()); + } +} + +/// Optimism ETH API extension metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.eth_api_ext")] +pub struct EthApiExtMetrics { + /// How long it takes to handle a `eth_getProof` request successfully + pub(crate) get_proof_latency: Histogram, + + /// Total number of `eth_getProof` requests + pub(crate) get_proof_requests: Counter, + + /// Total number of successful `eth_getProof` responses + pub(crate) get_proof_successful_responses: Counter, + + /// Total number of failures handling `eth_getProof` requests + pub(crate) get_proof_failures: Counter, +} + +/// Types of debug apis +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, EnumCount, EnumIter)] +pub enum DebugApis { + /// `DebugExecutePayload` Api + DebugExecutePayload, + /// `DebugExecutionWitness` Api + DebugExecutionWitness, +} + +impl DebugApis { + /// Returns the operation as a string for metrics labels. + pub const fn as_str(&self) -> &'static str { + match self { + Self::DebugExecutePayload => "debug_execute_payload", + Self::DebugExecutionWitness => "debug_execution_witness", + } + } +} + +/// Metrics for Debug API extension calls. +#[derive(Debug)] +pub struct DebugApiExtMetrics { + /// Per-api metrics handles + apis: HashMap, +} + +impl DebugApiExtMetrics { + /// Initializes new `DebugApiExtMetrics` + pub fn new() -> Self { + let mut apis = HashMap::default(); + for api in DebugApis::iter() { + apis.insert(api, DebugApiExtRpcMetrics::new_with_labels(&[("api", api.as_str())])); + } + Self { apis } + } + + /// Record a Debug API call async (tracks latency, requests, success, failures). + pub async fn record_operation_async(&self, api: DebugApis, f: F) -> Result + where + F: Future>, + { + if let Some(metrics) = self.apis.get(&api) { + metrics.record_async(f).await + } else { + f.await + } + } +} + +impl Default for DebugApiExtMetrics { + fn default() -> Self { + Self::new() + } +} + +/// Optimism Debug API extension metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.debug_api_ext")] +pub struct DebugApiExtRpcMetrics { + /// End-to-end time to handle this API call + pub(crate) latency: Histogram, + + /// Total number of requests for this API + pub(crate) requests: Counter, + + /// Total number of successful responses for this API + pub(crate) successful_responses: Counter, + + /// Total number of failures for this API + pub(crate) failures: Counter, +} + +impl DebugApiExtRpcMetrics { + /// Record rpc api call async. + async fn record_async(&self, f: F) -> Result + where + F: Future>, + { + let start = Instant::now(); + let result = f.await; + + self.latency.record(start.elapsed().as_secs_f64()); + self.requests.increment(1); + + if result.is_ok() { + self.successful_responses.increment(1); + } else { + self.failures.increment(1); + } + + result + } +} diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs new file mode 100644 index 00000000..f8780f37 --- /dev/null +++ b/crates/optimism/rpc/src/miner.rs @@ -0,0 +1,78 @@ +//! Miner API extension for OP. + +use alloy_primitives::U64; +use jsonrpsee_core::{async_trait, RpcResult}; +pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; +use reth_metrics::{metrics::Gauge, Metrics}; +use reth_optimism_payload_builder::config::{OpDAConfig, OpGasLimitConfig}; +use tracing::debug; + +/// Miner API extension for OP, exposes settings for the data availability configuration via the +/// `miner_` API. +#[derive(Debug, Clone)] +pub struct OpMinerExtApi { + da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, + metrics: OpMinerMetrics, +} + +impl OpMinerExtApi { + /// Instantiate the miner API extension with the given, sharable data availability + /// configuration. + pub fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config, metrics: OpMinerMetrics::default() } + } +} + +#[async_trait] +impl MinerApiExtServer for OpMinerExtApi { + /// Handler for `miner_setMaxDASize` RPC method. + async fn set_max_da_size(&self, max_tx_size: U64, max_block_size: U64) -> RpcResult { + debug!(target: "rpc", "Setting max DA size: tx={}, block={}", max_tx_size, max_block_size); + self.da_config.set_max_da_size(max_tx_size.to(), max_block_size.to()); + + self.metrics.set_max_da_tx_size(max_tx_size.to()); + self.metrics.set_max_da_block_size(max_block_size.to()); + + Ok(true) + } + + async fn set_gas_limit(&self, gas_limit: U64) -> RpcResult { + debug!(target: "rpc", "Setting gas limit: {}", gas_limit); + self.gas_limit_config.set_gas_limit(gas_limit.to()); + self.metrics.set_gas_limit(gas_limit.to()); + Ok(true) + } +} + +/// Optimism miner metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.miner")] +pub struct OpMinerMetrics { + /// Max DA tx size set on the miner + max_da_tx_size: Gauge, + /// Max DA block size set on the miner + max_da_block_size: Gauge, + /// Gas limit set on the miner + gas_limit: Gauge, +} + +impl OpMinerMetrics { + /// Sets the max DA tx size gauge value + #[inline] + pub fn set_max_da_tx_size(&self, size: u64) { + self.max_da_tx_size.set(size as f64); + } + + /// Sets the max DA block size gauge value + #[inline] + pub fn set_max_da_block_size(&self, size: u64) { + self.max_da_block_size.set(size as f64); + } + + /// Sets the gas limit gauge value + #[inline] + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.set(gas_limit as f64); + } +} diff --git a/crates/optimism/rpc/src/sequencer.rs b/crates/optimism/rpc/src/sequencer.rs new file mode 100644 index 00000000..86ed000e --- /dev/null +++ b/crates/optimism/rpc/src/sequencer.rs @@ -0,0 +1,282 @@ +//! Helpers for optimism specific RPC implementations. + +use crate::{SequencerClientError, SequencerMetrics}; +use alloy_json_rpc::{RpcRecv, RpcSend}; +use alloy_primitives::{hex, B256}; +use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client}; +use alloy_rpc_types_eth::erc4337::TransactionConditional; +use alloy_transport_http::Http; +use std::{str::FromStr, sync::Arc, time::Instant}; +use thiserror::Error; +use tracing::warn; + +/// Sequencer client error +#[derive(Error, Debug)] +pub enum Error { + /// Invalid scheme + #[error("Invalid scheme of sequencer url: {0}")] + InvalidScheme(String), + /// Invalid header or value provided. + #[error("Invalid header: {0}")] + InvalidHeader(String), + /// Invalid url + #[error("Invalid sequencer url: {0}")] + InvalidUrl(String), + /// Establishing a connection to the sequencer endpoint resulted in an error. + #[error("Failed to connect to sequencer: {0}")] + TransportError( + #[from] + #[source] + alloy_transport::TransportError, + ), + /// Reqwest failed to init client + #[error("Failed to init reqwest client for sequencer: {0}")] + ReqwestError( + #[from] + #[source] + reqwest::Error, + ), +} + +/// A client to interact with a Sequencer +#[derive(Debug, Clone)] +pub struct SequencerClient { + inner: Arc, +} + +impl SequencerClientInner { + /// Creates a new instance with the given endpoint and client. + pub(crate) fn new(sequencer_endpoint: String, client: Client) -> Self { + let metrics = SequencerMetrics::default(); + Self { sequencer_endpoint, client, metrics } + } +} + +impl SequencerClient { + /// Creates a new [`SequencerClient`] for the given URL. + /// + /// If the URL is a websocket endpoint we connect a websocket instance. + pub async fn new(sequencer_endpoint: impl Into) -> Result { + Self::new_with_headers(sequencer_endpoint, Default::default()).await + } + + /// Creates a new `SequencerClient` for the given URL with the given headers + /// + /// This expects headers in the form: `header=value` + pub async fn new_with_headers( + sequencer_endpoint: impl Into, + headers: Vec, + ) -> Result { + let sequencer_endpoint = sequencer_endpoint.into(); + let endpoint = BuiltInConnectionString::from_str(&sequencer_endpoint)?; + if let BuiltInConnectionString::Http(url) = endpoint { + let mut builder = reqwest::Client::builder() + // we force use tls to prevent native issues + .use_rustls_tls(); + + if !headers.is_empty() { + let mut header_map = reqwest::header::HeaderMap::new(); + for header in headers { + if let Some((key, value)) = header.split_once('=') { + header_map.insert( + key.trim() + .parse::() + .map_err(|err| Error::InvalidHeader(err.to_string()))?, + value + .trim() + .parse::() + .map_err(|err| Error::InvalidHeader(err.to_string()))?, + ); + } + } + builder = builder.default_headers(header_map); + } + + let client = builder.build()?; + Self::with_http_client(url, client) + } else { + let client = ClientBuilder::default().connect_with(endpoint).await?; + let inner = SequencerClientInner::new(sequencer_endpoint, client); + Ok(Self { inner: Arc::new(inner) }) + } + } + + /// Creates a new [`SequencerClient`] with http transport with the given http client. + pub fn with_http_client( + sequencer_endpoint: impl Into, + client: reqwest::Client, + ) -> Result { + let sequencer_endpoint: String = sequencer_endpoint.into(); + let url = sequencer_endpoint + .parse() + .map_err(|_| Error::InvalidUrl(sequencer_endpoint.clone()))?; + + let http_client = Http::with_client(client, url); + let is_local = http_client.guess_local(); + let client = ClientBuilder::default().transport(http_client, is_local); + + let inner = SequencerClientInner::new(sequencer_endpoint, client); + Ok(Self { inner: Arc::new(inner) }) + } + + /// Returns the network of the client + pub fn endpoint(&self) -> &str { + &self.inner.sequencer_endpoint + } + + /// Returns the client + pub fn client(&self) -> &Client { + &self.inner.client + } + + /// Returns a reference to the [`SequencerMetrics`] for tracking client metrics. + fn metrics(&self) -> &SequencerMetrics { + &self.inner.metrics + } + + /// Sends a [`alloy_rpc_client::RpcCall`] request to the sequencer endpoint. + pub async fn request( + &self, + method: &str, + params: Params, + ) -> Result { + let resp = + self.client().request::(method.to_string(), params).await.inspect_err( + |err| { + warn!( + target: "rpc::sequencer", + %err, + "HTTP request to sequencer failed", + ); + }, + )?; + Ok(resp) + } + + /// Forwards a transaction to the sequencer endpoint. + pub async fn forward_raw_transaction(&self, tx: &[u8]) -> Result { + let start = Instant::now(); + let rlp_hex = hex::encode_prefixed(tx); + let tx_hash = + self.request("eth_sendRawTransaction", (rlp_hex,)).await.inspect_err(|err| { + warn!( + target: "rpc::eth", + %err, + "Failed to forward transaction to sequencer", + ); + })?; + self.metrics().record_forward_latency(start.elapsed()); + Ok(tx_hash) + } + + /// Forwards a transaction conditional to the sequencer endpoint. + pub async fn forward_raw_transaction_conditional( + &self, + tx: &[u8], + condition: TransactionConditional, + ) -> Result { + let start = Instant::now(); + let rlp_hex = hex::encode_prefixed(tx); + let tx_hash = self + .request("eth_sendRawTransactionConditional", (rlp_hex, condition)) + .await + .inspect_err(|err| { + warn!( + target: "rpc::eth", + %err, + "Failed to forward transaction conditional for sequencer", + ); + })?; + self.metrics().record_forward_latency(start.elapsed()); + Ok(tx_hash) + } +} + +#[derive(Debug)] +struct SequencerClientInner { + /// The endpoint of the sequencer + sequencer_endpoint: String, + /// The client + client: Client, + // Metrics for tracking sequencer forwarding + metrics: SequencerMetrics, +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::U64; + + #[tokio::test] + async fn test_http_body_str() { + let client = SequencerClient::new("http://localhost:8545").await.unwrap(); + + let request = client + .client() + .make_request("eth_getBlockByNumber", (U64::from(10),)) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_getBlockByNumber","params":["0xa"],"id":0,"jsonrpc":"2.0"}"# + ); + + let condition = TransactionConditional::default(); + + let request = client + .client() + .make_request( + "eth_sendRawTransactionConditional", + (format!("0x{}", hex::encode("abcd")), condition), + ) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_sendRawTransactionConditional","params":["0x61626364",{"knownAccounts":{}}],"id":1,"jsonrpc":"2.0"}"# + ); + } + + #[tokio::test] + #[ignore = "Start if WS is reachable at ws://localhost:8546"] + async fn test_ws_body_str() { + let client = SequencerClient::new("ws://localhost:8546").await.unwrap(); + + let request = client + .client() + .make_request("eth_getBlockByNumber", (U64::from(10),)) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_getBlockByNumber","params":["0xa"],"id":0,"jsonrpc":"2.0"}"# + ); + + let condition = TransactionConditional::default(); + + let request = client + .client() + .make_request( + "eth_sendRawTransactionConditional", + (format!("0x{}", hex::encode("abcd")), condition), + ) + .serialize() + .unwrap() + .take_request(); + let body = request.get(); + + assert_eq!( + body, + r#"{"method":"eth_sendRawTransactionConditional","params":["0x61626364",{"knownAccounts":{}}],"id":1,"jsonrpc":"2.0"}"# + ); + } +} diff --git a/crates/optimism/rpc/src/state.rs b/crates/optimism/rpc/src/state.rs new file mode 100644 index 00000000..a4b683f6 --- /dev/null +++ b/crates/optimism/rpc/src/state.rs @@ -0,0 +1,64 @@ +//! State provider factory for OP Proofs ExEx. + +use alloy_eips::BlockId; +use derive_more::Constructor; +use jsonrpsee_types::error::ErrorObject; +use reth_optimism_trie::{provider::OpProofsStateProviderRef, OpProofsStorage, OpProofsStore}; +use reth_provider::{BlockIdReader, ProviderError, ProviderResult, StateProvider}; +use reth_rpc_api::eth::helpers::FullEthApi; +use reth_rpc_eth_types::EthApiError; + +/// Creates a factory for state providers using OP Proofs external proofs storage. +#[derive(Debug, Constructor)] +pub struct OpStateProviderFactory { + eth_api: Eth, + preimage_store: OpProofsStorage

, +} + +impl<'a, Eth, P> OpStateProviderFactory +where + Eth: FullEthApi + Send + Sync + 'static, + ErrorObject<'static>: From, + P: OpProofsStore + Clone + 'a, +{ + /// Creates a state provider for the given block id. + pub async fn state_provider( + &'a self, + block_id: Option, + ) -> ProviderResult> { + let block_id = block_id.unwrap_or_default(); + // Check whether the distance to the block exceeds the maximum configured window. + let block_number = self + .eth_api + .provider() + .block_number_for_id(block_id)? + .ok_or(EthApiError::HeaderNotFound(block_id)) + .map_err(ProviderError::other)?; + + let historical_provider = + self.eth_api.state_at_block_id(block_id).await.map_err(ProviderError::other)?; + + let (Some((latest_block_number, _)), Some((earliest_block_number, _))) = ( + self.preimage_store + .get_latest_block_number() + .await + .map_err(|e| ProviderError::Database(e.into()))?, + self.preimage_store + .get_earliest_block_number() + .await + .map_err(|e| ProviderError::Database(e.into()))?, + ) else { + // if no earliest block, db is empty - use historical provider + return Ok(historical_provider); + }; + + if block_number < earliest_block_number || block_number > latest_block_number { + return Ok(historical_provider); + } + + let external_overlay_provider = + OpProofsStateProviderRef::new(historical_provider, &self.preimage_store, block_number); + + Ok(Box::new(external_overlay_provider)) + } +} diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs new file mode 100644 index 00000000..1858b4fd --- /dev/null +++ b/crates/optimism/rpc/src/witness.rs @@ -0,0 +1,121 @@ +//! Support for optimism specific witness RPCs. + +use alloy_primitives::B256; +use alloy_rpc_types_debug::ExecutionWitness; +use jsonrpsee_core::{async_trait, RpcResult}; +use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; +use reth_node_api::{BuildNextEnv, NodePrimitives}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_payload_builder::{OpAttributes, OpPayloadBuilder, OpPayloadPrimitives}; +use reth_optimism_txpool::OpPooledTx; +use reth_primitives_traits::{SealedHeader, TxTy}; +pub use reth_rpc_api::DebugExecutionWitnessApiServer; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_storage_api::{ + errors::{ProviderError, ProviderResult}, + BlockReaderIdExt, NodePrimitivesProvider, StateProviderFactory, +}; +use reth_tasks::TaskSpawner; +use reth_transaction_pool::TransactionPool; +use std::{fmt::Debug, sync::Arc}; +use tokio::sync::{oneshot, Semaphore}; + +/// An extension to the `debug_` namespace of the RPC API. +pub struct OpDebugWitnessApi { + inner: Arc>, +} + +impl OpDebugWitnessApi { + /// Creates a new instance of the `OpDebugWitnessApi`. + pub fn new( + provider: Provider, + task_spawner: Box, + builder: OpPayloadBuilder, + ) -> Self { + let semaphore = Arc::new(Semaphore::new(3)); + let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore }; + Self { inner: Arc::new(inner) } + } +} + +impl OpDebugWitnessApi +where + EvmConfig: ConfigureEvm, + Provider: NodePrimitivesProvider> + + BlockReaderIdExt, +{ + /// Fetches the parent header by hash. + fn parent_header( + &self, + parent_block_hash: B256, + ) -> ProviderResult> { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +#[async_trait] +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi +where + Pool: TransactionPool< + Transaction: OpPooledTx::SignedTx>, + > + 'static, + Provider: BlockReaderIdExt

::BlockHeader> + + NodePrimitivesProvider + + StateProviderFactory + + ChainSpecProvider + + Clone + + 'static, + EvmConfig: ConfigureEvm< + Primitives = Provider::Primitives, + NextBlockEnvCtx: BuildNextEnv, + > + 'static, + Attrs: OpAttributes>, +{ + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attrs::RpcPayloadAttributes, + ) -> RpcResult { + let _permit = self.inner.semaphore.acquire().await; + + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + + let (tx, rx) = oneshot::channel(); + let this = self.clone(); + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let res = this.inner.builder.payload_witness(parent_header, attributes); + let _ = tx.send(res); + })); + + rx.await + .map_err(|err| internal_rpc_err(err.to_string()))? + .map_err(|err| internal_rpc_err(err.to_string())) + } +} + +impl Clone + for OpDebugWitnessApi +{ + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} +impl Debug + for OpDebugWitnessApi +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() + } +} + +struct OpDebugWitnessApiInner { + provider: Provider, + builder: OpPayloadBuilder, + task_spawner: Box, + semaphore: Arc, +} diff --git a/crates/optimism/trie/Cargo.toml b/crates/optimism/trie/Cargo.toml new file mode 100644 index 00000000..340fa7e0 --- /dev/null +++ b/crates/optimism/trie/Cargo.toml @@ -0,0 +1,87 @@ +[package] +name = "reth-optimism-trie" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Trie node storage for serving proofs in FP window fast" + +[lints] +workspace = true + +[dependencies] +# reth +reth-db = { workspace = true, features = ["mdbx"] } +reth-evm.workspace = true +reth-execution-errors.workspace = true +reth-primitives-traits.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-trie = { workspace = true, features = ["serde"] } +reth-tasks.workspace = true + +# `metrics` feature +metrics = { workspace = true, optional = true } +reth-metrics = { workspace = true, features = ["common"], optional = true } + +# ethereum +alloy-primitives.workspace = true +alloy-eips.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } + +# codec +bytes.workspace = true +serde.workspace = true +bincode.workspace = true + +# misc +thiserror.workspace = true +auto_impl.workspace = true +eyre = { workspace = true, optional = true } +strum.workspace = true +tracing.workspace = true +derive_more.workspace = true + +[dev-dependencies] +reth-codecs = { workspace = true, features = ["test-utils"] } +tempfile.workspace = true +tokio = { workspace = true, features = ["test-util", "rt-multi-thread", "macros"] } +test-case.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } +# workaround for failing doc test +reth-db-api = { workspace = true, features = ["test-utils"] } +reth-trie = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-node-api.workspace = true +alloy-consensus.workspace = true +alloy-genesis.workspace = true +reth-chainspec.workspace = true +reth-db-common.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-testing-utils.workspace = true +reth-storage-errors.workspace = true +secp256k1.workspace = true +mockall.workspace = true + +# misc +serial_test.workspace = true + +[features] +serde-bincode-compat = [ + "reth-primitives-traits/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "alloy-genesis/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat", +] +metrics = [ + "reth-trie/metrics", + "dep:reth-metrics", + "dep:metrics", + "dep:eyre" +] diff --git a/crates/optimism/trie/src/api.rs b/crates/optimism/trie/src/api.rs new file mode 100644 index 00000000..66c27a4e --- /dev/null +++ b/crates/optimism/trie/src/api.rs @@ -0,0 +1,196 @@ +//! Storage API for external storage of intermediary trie nodes. + +use crate::OpProofsStorageResult; +use alloy_eips::eip1898::BlockWithParent; +use alloy_primitives::{map::HashMap, B256, U256}; +use auto_impl::auto_impl; +use derive_more::{AddAssign, Constructor}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, + updates::TrieUpdatesSorted, + BranchNodeCompact, HashedPostStateSorted, Nibbles, +}; +use std::{fmt::Debug, time::Duration}; + +/// Diff of trie updates and post state for a block. +#[derive(Debug, Clone, Default)] +pub struct BlockStateDiff { + /// Trie updates for branch nodes + pub sorted_trie_updates: TrieUpdatesSorted, + /// Post state for leaf nodes (accounts and storage) + pub sorted_post_state: HashedPostStateSorted, +} + +impl BlockStateDiff { + /// Extend the [` BlockStateDiff`] from other latest [`BlockStateDiff`] + pub fn extend_ref(&mut self, other: &Self) { + self.sorted_trie_updates.extend_ref(&other.sorted_trie_updates); + self.sorted_post_state.extend_ref(&other.sorted_post_state); + } +} + +/// Counts of trie updates written to storage. +#[derive(Debug, Clone, Default, AddAssign, Constructor, Eq, PartialEq)] +pub struct WriteCounts { + /// Number of account trie updates written + pub account_trie_updates_written_total: u64, + /// Number of storage trie updates written + pub storage_trie_updates_written_total: u64, + /// Number of hashed accounts written + pub hashed_accounts_written_total: u64, + /// Number of hashed storages written + pub hashed_storages_written_total: u64, +} + +/// Duration metrics for block processing. +#[derive(Debug, Default, Clone)] +pub struct OperationDurations { + /// Total time to process a block (end-to-end) in seconds + pub total_duration_seconds: Duration, + /// Time spent executing the block (EVM) in seconds + pub execution_duration_seconds: Duration, + /// Time spent calculating state root in seconds + pub state_root_duration_seconds: Duration, + /// Time spent writing trie updates to storage in seconds + pub write_duration_seconds: Duration, +} + +/// Trait for reading trie nodes from the database. +/// +/// Only leaf nodes and some branch nodes are stored. The bottom layer of branch nodes +/// are not stored to reduce write amplification. This matches Reth's non-historical trie storage. +#[auto_impl(Arc)] +pub trait OpProofsStore: Send + Sync + Debug { + /// Cursor for iterating over trie branches. + type StorageTrieCursor<'tx>: TrieStorageCursor + 'tx + where + Self: 'tx; + + /// Cursor for iterating over account trie branches. + type AccountTrieCursor<'tx>: TrieCursor + 'tx + where + Self: 'tx; + + /// Cursor for iterating over storage leaves. + type StorageCursor<'tx>: HashedStorageCursor + Send + Sync + 'tx + where + Self: 'tx; + + /// Cursor for iterating over account leaves. + type AccountHashedCursor<'tx>: HashedCursor + Send + Sync + 'tx + where + Self: 'tx; + + /// Store a batch of account trie branches. Used for saving existing state. For live state + /// capture, use [store_trie_updates](OpProofsStore::store_trie_updates). + fn store_account_branches( + &self, + account_nodes: Vec<(Nibbles, Option)>, + ) -> impl Future> + Send; + + /// Store a batch of storage trie branches. Used for saving existing state. + fn store_storage_branches( + &self, + hashed_address: B256, + storage_nodes: Vec<(Nibbles, Option)>, + ) -> impl Future> + Send; + + /// Store a batch of account trie leaf nodes. Used for saving existing state. + fn store_hashed_accounts( + &self, + accounts: Vec<(B256, Option)>, + ) -> impl Future> + Send; + + /// Store a batch of storage trie leaf nodes. Used for saving existing state. + fn store_hashed_storages( + &self, + hashed_address: B256, + storages: Vec<(B256, U256)>, + ) -> impl Future> + Send; + + /// Get the earliest block number and hash that has been stored + /// + /// This is used to determine the block number of trie nodes with block number 0. + /// All earliest block numbers are stored in 0 to reduce updates required to prune trie nodes. + fn get_earliest_block_number( + &self, + ) -> impl Future>> + Send; + + /// Get the latest block number and hash that has been stored + fn get_latest_block_number( + &self, + ) -> impl Future>> + Send; + + /// Get a trie cursor for the storage backend + fn storage_trie_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get a trie cursor for the account backend + fn account_trie_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get a storage cursor for the storage backend + fn storage_hashed_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Get an account hashed cursor for the storage backend + fn account_hashed_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult>; + + /// Store a batch of trie updates. + /// + /// If wiped is true, the entire storage trie is wiped, but this is unsupported going forward, + /// so should only happen for legacy reasons. + fn store_trie_updates( + &self, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> impl Future> + Send; + + /// Fetch all updates for a given block number. + fn fetch_trie_updates( + &self, + block_number: u64, + ) -> impl Future> + Send; + + /// Applies [`BlockStateDiff`] to the earliest state (updating/deleting nodes) and updates the + /// earliest block number. + fn prune_earliest_state( + &self, + new_earliest_block_ref: BlockWithParent, + diff: BlockStateDiff, + ) -> impl Future> + Send; + + /// Remove account, storage and trie updates from historical storage for all blocks till + /// the specified block (inclusive). + fn unwind_history( + &self, + to: BlockWithParent, + ) -> impl Future> + Send; + + /// Deletes all updates > `latest_common_block_number` and replaces them with the new updates. + fn replace_updates( + &self, + latest_common_block_number: u64, + blocks_to_add: HashMap, + ) -> impl Future> + Send; + + /// Set the earliest block number and hash that has been stored + fn set_earliest_block_number( + &self, + block_number: u64, + hash: B256, + ) -> impl Future> + Send; +} diff --git a/crates/optimism/trie/src/backfill.rs b/crates/optimism/trie/src/backfill.rs new file mode 100644 index 00000000..fab08006 --- /dev/null +++ b/crates/optimism/trie/src/backfill.rs @@ -0,0 +1,700 @@ +//! Backfill job for proofs storage. Handles storing the existing state into the proofs storage. + +use crate::{OpProofsStorageError, OpProofsStore}; +use alloy_primitives::B256; +use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRO}, + tables, + transaction::DbTx, + DatabaseError, +}; +use reth_primitives_traits::{Account, StorageEntry}; +use reth_trie::{BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles}; +use std::{collections::HashMap, time::Instant}; +use tracing::info; + +/// Batch size threshold for storing entries during backfill +const BACKFILL_STORAGE_THRESHOLD: usize = 100000; + +/// Threshold for logging progress during backfill +const BACKFILL_LOG_THRESHOLD: usize = 100000; + +/// Backfill job for external storage. +#[derive(Debug)] +pub struct BackfillJob<'a, Tx: DbTx, S: OpProofsStore + Send> { + storage: S, + tx: &'a Tx, +} + +/// Macro to generate simple cursor iterators for tables +macro_rules! define_simple_cursor_iter { + ($iter_name:ident, $table:ty, $key_type:ty, $value_type:ty) => { + struct $iter_name(C); + + impl $iter_name { + const fn new(cursor: C) -> Self { + Self(cursor) + } + } + + impl> Iterator for $iter_name { + type Item = Result<($key_type, $value_type), DatabaseError>; + + fn next(&mut self) -> Option { + self.0.next().transpose() + } + } + }; +} + +/// Macro to generate duplicate cursor iterators for tables with custom logic +macro_rules! define_dup_cursor_iter { + ($iter_name:ident, $table:ty, $key_type:ty, $value_type:ty) => { + struct $iter_name(C); + + impl $iter_name { + const fn new(cursor: C) -> Self { + Self(cursor) + } + } + + impl + DbCursorRO<$table>> Iterator for $iter_name { + type Item = Result<($key_type, $value_type), DatabaseError>; + + fn next(&mut self) -> Option { + // First try to get the next duplicate value + if let Some(res) = self.0.next_dup().transpose() { + return Some(res); + } + + // If no more duplicates, find the next key with values + let Some(Ok((next_key, _))) = self.0.next_no_dup().transpose() else { + // If no more entries, return None + return None; + }; + + // If found, seek to the first duplicate for this key + return self.0.seek(next_key).transpose(); + } + } + }; +} + +// Generate iterators for all 4 table types +define_simple_cursor_iter!(HashedAccountsIter, tables::HashedAccounts, B256, Account); +define_dup_cursor_iter!(HashedStoragesIter, tables::HashedStorages, B256, StorageEntry); +define_simple_cursor_iter!( + AccountsTrieIter, + tables::AccountsTrie, + StoredNibbles, + BranchNodeCompact +); +define_dup_cursor_iter!(StoragesTrieIter, tables::StoragesTrie, B256, StorageTrieEntry); + +/// Trait to estimate the progress of a backfill job based on the key. +trait CompletionEstimatable { + // Returns a progress estimate as a percentage (0.0 to 1.0) + fn estimate_progress(&self) -> f64; +} + +impl CompletionEstimatable for B256 { + fn estimate_progress(&self) -> f64 { + // use the first 3 bytes as a progress estimate + let progress = self.0[..3].to_vec(); + let mut val: u64 = 0; + for nibble in &progress { + val = (val << 8) | *nibble as u64; + } + val as f64 / (256u64.pow(3)) as f64 + } +} + +impl CompletionEstimatable for StoredNibbles { + fn estimate_progress(&self) -> f64 { + // use the first 6 nibbles as a progress estimate + let progress_nibbles = + if self.0.is_empty() { Nibbles::new() } else { self.0.slice(0..(self.0.len().min(6))) }; + let mut val: u64 = 0; + for nibble in progress_nibbles.iter() { + val = (val << 4) | nibble as u64; + } + val as f64 / (16u64.pow(progress_nibbles.len() as u32)) as f64 + } +} + +/// Backfill a table from a source iterator to a storage function. Handles batching and logging. +async fn backfill< + S: Iterator>, + F: Future> + Send, + Key: CompletionEstimatable + Clone + 'static, + Value: Clone + 'static, +>( + name: &str, + source: S, + storage_threshold: usize, + log_threshold: usize, + save_fn: impl Fn(Vec<(Key, Value)>) -> F, +) -> Result { + let mut entries = Vec::new(); + + let mut total_entries: u64 = 0; + + info!("Starting {} backfill", name); + let start_time = Instant::now(); + + let mut source = source.peekable(); + let initial_progress = source + .peek() + .map(|entry| entry.clone().map(|entry| entry.0.estimate_progress())) + .transpose()?; + + for entry in source { + let Some(initial_progress) = initial_progress else { + // If there are any items, there must be an initial progress + unreachable!(); + }; + let entry = entry?; + + entries.push(entry.clone()); + total_entries += 1; + + if total_entries.is_multiple_of(log_threshold as u64) { + let progress = entry.0.estimate_progress(); + let elapsed = start_time.elapsed(); + let elapsed_secs = elapsed.as_secs_f64(); + + let progress_per_second = if elapsed_secs.is_normal() { + (progress - initial_progress) / elapsed_secs + } else { + 0.0 + }; + let estimated_total_time = if progress_per_second.is_normal() { + (1.0 - progress) / progress_per_second + } else { + 0.0 + }; + let progress_pct = progress * 100.0; + info!( + "Processed {} {}, progress: {progress_pct:.2}%, ETA: {}s", + name, total_entries, estimated_total_time, + ); + } + + if entries.len() >= storage_threshold { + info!("Storing {} entries, total entries: {}", name, total_entries); + save_fn(entries).await?; + entries = Vec::new(); + } + } + + if !entries.is_empty() { + info!("Storing final {} entries", name); + save_fn(entries).await?; + } + + info!("{} backfill complete: {} entries", name, total_entries); + Ok(total_entries) +} + +/// Save hashed accounts to storage. +async fn save_hashed_accounts( + storage: &S, + entries: Vec<(B256, Account)>, +) -> Result<(), OpProofsStorageError> { + storage + .store_hashed_accounts( + entries.into_iter().map(|(address, account)| (address, Some(account))).collect(), + ) + .await?; + Ok(()) +} + +impl<'a, Tx: DbTx, S: OpProofsStore + Send> BackfillJob<'a, Tx, S> { + /// Create a new backfill job. + pub const fn new(storage: S, tx: &'a Tx) -> Self { + Self { storage, tx } + } + + /// Backfill hashed accounts data + async fn backfill_hashed_accounts(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_read::()?; + + let source = HashedAccountsIter::new(start_cursor); + let storage = &self.storage; + backfill( + "hashed accounts", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + |entries| save_hashed_accounts(storage, entries), + ) + .await?; + + Ok(()) + } + + /// Backfill hashed storage data + async fn backfill_hashed_storages(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_dup_read::()?; + + let source = HashedStoragesIter::new(start_cursor); + let storage = &self.storage; + let save_fn = + async |entries: Vec<(B256, StorageEntry)>| -> Result<(), OpProofsStorageError> { + // Group entries by hashed address + let mut by_address: HashMap> = + HashMap::default(); + for (address, entry) in entries { + by_address.entry(address).or_default().push((entry.key, entry.value)); + } + + // Store each address's storage entries + for (address, storages) in by_address { + storage.store_hashed_storages(address, storages).await?; + } + Ok(()) + }; + + backfill( + "hashed storage", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + save_fn, + ) + .await?; + + Ok(()) + } + + /// Backfill accounts trie data + async fn backfill_accounts_trie(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_read::()?; + + let source = AccountsTrieIter::new(start_cursor); + let storage = &self.storage; + let save_fn = async |entries: Vec<(StoredNibbles, BranchNodeCompact)>| -> Result<(), OpProofsStorageError> { + storage + .store_account_branches( + entries.into_iter().map(|(path, branch)| (path.0, Some(branch))).collect(), + ) + .await?; + Ok(()) + }; + + backfill( + "accounts trie", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + save_fn, + ) + .await?; + + Ok(()) + } + + /// Backfill storage trie data + async fn backfill_storages_trie(&self) -> Result<(), OpProofsStorageError> { + let start_cursor = self.tx.cursor_dup_read::()?; + + let source = StoragesTrieIter::new(start_cursor); + let storage = &self.storage; + let save_fn = + async |entries: Vec<(B256, StorageTrieEntry)>| -> Result<(), OpProofsStorageError> { + // Group entries by hashed address + let mut by_address: HashMap)>> = + HashMap::default(); + for (hashed_address, storage_entry) in entries { + by_address + .entry(hashed_address) + .or_default() + .push((storage_entry.nibbles.0, Some(storage_entry.node))); + } + + // Store each address's storage trie branches + for (address, branches) in by_address { + storage.store_storage_branches(address, branches).await?; + } + Ok(()) + }; + + backfill( + "storage trie", + source, + BACKFILL_STORAGE_THRESHOLD, + BACKFILL_LOG_THRESHOLD, + save_fn, + ) + .await?; + + Ok(()) + } + + /// Run complete backfill of all preimage data + async fn backfill_trie(&self) -> Result<(), OpProofsStorageError> { + self.backfill_hashed_accounts().await?; + self.backfill_hashed_storages().await?; + self.backfill_storages_trie().await?; + self.backfill_accounts_trie().await?; + + Ok(()) + } + + /// Run the backfill job. + pub async fn run(&self, best_number: u64, best_hash: B256) -> Result<(), OpProofsStorageError> { + if self.storage.get_earliest_block_number().await?.is_none() { + self.backfill_trie().await?; + + self.storage.set_earliest_block_number(best_number, best_hash).await?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::InMemoryProofsStorage; + use alloy_primitives::{keccak256, Address, U256}; + use reth_db::{ + cursor::DbCursorRW, test_utils::create_test_rw_db, transaction::DbTxMut, Database, + }; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::HashedCursor, trie_cursor::TrieCursor, BranchNodeCompact, StorageTrieEntry, + StoredNibbles, StoredNibblesSubKey, + }; + use std::sync::Arc; + + /// Helper function to create a test branch node + fn create_test_branch_node() -> BranchNodeCompact { + let mut state_mask = reth_trie::TrieMask::default(); + state_mask.set_bit(0); + state_mask.set_bit(1); + + BranchNodeCompact { + state_mask, + tree_mask: reth_trie::TrieMask::default(), + hash_mask: reth_trie::TrieMask::default(), + hashes: Arc::new(vec![]), + root_hash: None, + } + } + + #[tokio::test] + async fn test_backfill_hashed_accounts() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test accounts into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_write::().unwrap(); + + let mut accounts = vec![ + ( + keccak256(Address::repeat_byte(0x01)), + Account { nonce: 1, balance: U256::from(100), bytecode_hash: None }, + ), + ( + keccak256(Address::repeat_byte(0x02)), + Account { nonce: 2, balance: U256::from(200), bytecode_hash: None }, + ), + ( + keccak256(Address::repeat_byte(0x03)), + Account { nonce: 3, balance: U256::from(300), bytecode_hash: None }, + ), + ]; + + // Sort accounts by address for cursor.append (which requires sorted order) + accounts.sort_by_key(|(addr, _)| *addr); + + for (addr, account) in &accounts { + cursor.append(*addr, account).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_hashed_accounts().await.unwrap(); + + // Verify data was stored (will be in sorted order) + let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); + let mut count = 0; + while let Some((key, account)) = account_cursor.next().unwrap() { + // Find matching account in our test data + let expected = accounts.iter().find(|(addr, _)| *addr == key).unwrap(); + assert_eq!((key, account), *expected); + count += 1; + } + assert_eq!(count, 3); + } + + #[tokio::test] + async fn test_backfill_hashed_storage() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test storage into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_dup_write::().unwrap(); + + let addr1 = keccak256(Address::repeat_byte(0x01)); + let addr2 = keccak256(Address::repeat_byte(0x02)); + + let storage_entries = vec![ + ( + addr1, + StorageEntry { key: keccak256(B256::repeat_byte(0x10)), value: U256::from(100) }, + ), + ( + addr1, + StorageEntry { key: keccak256(B256::repeat_byte(0x20)), value: U256::from(200) }, + ), + ( + addr2, + StorageEntry { key: keccak256(B256::repeat_byte(0x30)), value: U256::from(300) }, + ), + ]; + + for (addr, entry) in &storage_entries { + cursor.upsert(*addr, entry).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_hashed_storages().await.unwrap(); + + // Verify data was stored for addr1 + let mut storage_cursor = storage.storage_hashed_cursor(addr1, 100).unwrap(); + let mut found = vec![]; + while let Some((key, value)) = storage_cursor.next().unwrap() { + found.push((key, value)); + } + assert_eq!(found.len(), 2); + assert_eq!(found[0], (storage_entries[0].1.key, storage_entries[0].1.value)); + assert_eq!(found[1], (storage_entries[1].1.key, storage_entries[1].1.value)); + + // Verify data was stored for addr2 + let mut storage_cursor = storage.storage_hashed_cursor(addr2, 100).unwrap(); + let mut found = vec![]; + while let Some((key, value)) = storage_cursor.next().unwrap() { + found.push((key, value)); + } + assert_eq!(found.len(), 1); + assert_eq!(found[0], (storage_entries[2].1.key, storage_entries[2].1.value)); + } + + #[tokio::test] + async fn test_backfill_accounts_trie() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test trie nodes into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_write::().unwrap(); + + let branch = create_test_branch_node(); + let nodes = vec![ + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![1])), branch.clone()), + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![2])), branch.clone()), + (StoredNibbles(Nibbles::from_nibbles_unchecked(vec![3])), branch.clone()), + ]; + + for (path, node) in &nodes { + cursor.append(path.clone(), node).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_accounts_trie().await.unwrap(); + + // Verify data was stored + let mut trie_cursor = storage.account_trie_cursor(100).unwrap(); + let mut count = 0; + while let Some((path, _node)) = trie_cursor.next().unwrap() { + assert_eq!(path, nodes[count].0 .0); + count += 1; + } + assert_eq!(count, 3); + } + + #[tokio::test] + async fn test_backfill_storages_trie() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert test storage trie nodes into database + let tx = db.tx_mut().unwrap(); + let mut cursor = tx.cursor_dup_write::().unwrap(); + + let branch = create_test_branch_node(); + let addr1 = keccak256(Address::repeat_byte(0x01)); + let addr2 = keccak256(Address::repeat_byte(0x02)); + + let nodes = vec![ + ( + addr1, + StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![1])), + node: branch.clone(), + }, + ), + ( + addr1, + StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![2])), + node: branch.clone(), + }, + ), + ( + addr2, + StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![3])), + node: branch.clone(), + }, + ), + ]; + + for (addr, entry) in &nodes { + cursor.upsert(*addr, entry).unwrap(); + } + drop(cursor); + tx.commit().unwrap(); + + // Run backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + job.backfill_storages_trie().await.unwrap(); + + // Verify data was stored for addr1 + let mut trie_cursor = storage.storage_trie_cursor(addr1, 100).unwrap(); + let mut found = vec![]; + while let Some((path, _node)) = trie_cursor.next().unwrap() { + found.push(path); + } + assert_eq!(found.len(), 2); + assert_eq!(found[0], nodes[0].1.nibbles.0); + assert_eq!(found[1], nodes[1].1.nibbles.0); + + // Verify data was stored for addr2 + let mut trie_cursor = storage.storage_trie_cursor(addr2, 100).unwrap(); + let mut found = vec![]; + while let Some((path, _node)) = trie_cursor.next().unwrap() { + found.push(path); + } + assert_eq!(found.len(), 1); + assert_eq!(found[0], nodes[2].1.nibbles.0); + } + + #[tokio::test] + async fn test_full_backfill_run() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Insert some test data + let tx = db.tx_mut().unwrap(); + + // Add accounts + let mut cursor = tx.cursor_write::().unwrap(); + let addr = keccak256(Address::repeat_byte(0x01)); + cursor + .append(addr, &Account { nonce: 1, balance: U256::from(100), bytecode_hash: None }) + .unwrap(); + drop(cursor); + + // Add storage + let mut cursor = tx.cursor_dup_write::().unwrap(); + cursor + .upsert( + addr, + &StorageEntry { key: keccak256(B256::repeat_byte(0x10)), value: U256::from(100) }, + ) + .unwrap(); + drop(cursor); + + // Add account trie + let mut cursor = tx.cursor_write::().unwrap(); + cursor + .append( + StoredNibbles(Nibbles::from_nibbles_unchecked(vec![1])), + &create_test_branch_node(), + ) + .unwrap(); + drop(cursor); + + // Add storage trie + let mut cursor = tx.cursor_dup_write::().unwrap(); + cursor + .upsert( + addr, + &StorageTrieEntry { + nibbles: StoredNibblesSubKey(Nibbles::from_nibbles_unchecked(vec![1])), + node: create_test_branch_node(), + }, + ) + .unwrap(); + drop(cursor); + + tx.commit().unwrap(); + + // Run full backfill + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + let best_number = 100; + let best_hash = B256::repeat_byte(0x42); + + // Should be None initially + assert_eq!(storage.get_earliest_block_number().await.unwrap(), None); + + job.run(best_number, best_hash).await.unwrap(); + + // Should be set after backfill + assert_eq!( + storage.get_earliest_block_number().await.unwrap(), + Some((best_number, best_hash)) + ); + + // Verify data was backfilled + let mut account_cursor = storage.account_hashed_cursor(100).unwrap(); + assert!(account_cursor.next().unwrap().is_some()); + + let mut storage_cursor = storage.storage_hashed_cursor(addr, 100).unwrap(); + assert!(storage_cursor.next().unwrap().is_some()); + + let mut trie_cursor = storage.account_trie_cursor(100).unwrap(); + assert!(trie_cursor.next().unwrap().is_some()); + + let mut storage_trie_cursor = storage.storage_trie_cursor(addr, 100).unwrap(); + assert!(storage_trie_cursor.next().unwrap().is_some()); + } + + #[tokio::test] + async fn test_backfill_run_skips_if_already_done() { + let db = create_test_rw_db(); + let storage = InMemoryProofsStorage::new(); + + // Set earliest block to simulate already backfilled + storage.set_earliest_block_number(50, B256::repeat_byte(0x01)).await.unwrap(); + + let tx = db.tx().unwrap(); + let job = BackfillJob::new(storage.clone(), &tx); + + // Run backfill - should skip + job.run(100, B256::repeat_byte(0x42)).await.unwrap(); + + // Should still have old earliest block + assert_eq!( + storage.get_earliest_block_number().await.unwrap(), + Some((50, B256::repeat_byte(0x01))) + ); + } +} diff --git a/crates/optimism/trie/src/cursor.rs b/crates/optimism/trie/src/cursor.rs new file mode 100644 index 00000000..615c554f --- /dev/null +++ b/crates/optimism/trie/src/cursor.rs @@ -0,0 +1,129 @@ +//! Implementation of [`HashedCursor`] and [`TrieCursor`] for +//! [`OpProofsStorage`](crate::OpProofsStorage). + +use alloy_primitives::{B256, U256}; +use derive_more::Constructor; +use reth_db::DatabaseError; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, + BranchNodeCompact, Nibbles, +}; + +/// Manages reading storage or account trie nodes from [`TrieCursor`]. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsTrieCursor(pub C); + +impl TrieCursor for OpProofsTrieCursor +where + C: TrieCursor, +{ + #[inline] + fn seek_exact( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + self.0.seek_exact(key) + } + + #[inline] + fn seek( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn current(&mut self) -> Result, DatabaseError> { + self.0.current() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +impl TrieStorageCursor for OpProofsTrieCursor +where + C: TrieStorageCursor, +{ + #[inline] + fn set_hashed_address(&mut self, hashed_address: B256) { + self.0.set_hashed_address(hashed_address) + } +} + +/// Manages reading hashed account nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedAccountCursor(pub C); + +impl HashedCursor for OpProofsHashedAccountCursor +where + C: HashedCursor + Send + Sync, +{ + type Value = Account; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +/// Manages reading hashed storage nodes from external storage. +#[derive(Debug, Clone, Constructor)] +pub struct OpProofsHashedStorageCursor(pub C); + +impl HashedCursor for OpProofsHashedStorageCursor +where + C: HashedCursor + Send + Sync, +{ + type Value = U256; + + #[inline] + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + self.0.seek(key) + } + + #[inline] + fn next(&mut self) -> Result, DatabaseError> { + self.0.next() + } + + #[inline] + fn reset(&mut self) { + self.0.reset() + } +} + +impl HashedStorageCursor for OpProofsHashedStorageCursor +where + C: HashedStorageCursor + Send + Sync, +{ + #[inline] + fn is_storage_empty(&mut self) -> Result { + self.0.is_storage_empty() + } + + #[inline] + fn set_hashed_address(&mut self, hashed_address: B256) { + self.0.set_hashed_address(hashed_address) + } +} diff --git a/crates/optimism/trie/src/cursor_factory.rs b/crates/optimism/trie/src/cursor_factory.rs new file mode 100644 index 00000000..ff47c913 --- /dev/null +++ b/crates/optimism/trie/src/cursor_factory.rs @@ -0,0 +1,100 @@ +//! Implements [`TrieCursorFactory`] and [`HashedCursorFactory`] for [`OpProofsStore`] types. + +use crate::{ + OpProofsHashedAccountCursor, OpProofsHashedStorageCursor, OpProofsStorage, OpProofsStore, + OpProofsTrieCursor, +}; +use alloy_primitives::B256; +use reth_db::DatabaseError; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use std::marker::PhantomData; + +/// Factory for creating trie cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsTrieCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsTrieCursorFactory<'tx, S> { + /// Initializes new `OpProofsTrieCursorFactory` + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> TrieCursorFactory for OpProofsTrieCursorFactory<'tx, S> +where + for<'a> S: OpProofsStore + 'tx, +{ + type AccountTrieCursor<'a> + = OpProofsTrieCursor> + where + Self: 'a; + type StorageTrieCursor<'a> + = OpProofsTrieCursor> + where + Self: 'a; + + fn account_trie_cursor(&self) -> Result, DatabaseError> { + Ok(OpProofsTrieCursor::new( + self.storage + .account_trie_cursor(self.block_number) + .map_err(Into::::into)?, + )) + } + + fn storage_trie_cursor( + &self, + hashed_address: B256, + ) -> Result, DatabaseError> { + Ok(OpProofsTrieCursor::new( + self.storage + .storage_trie_cursor(hashed_address, self.block_number) + .map_err(Into::::into)?, + )) + } +} + +/// Factory for creating hashed account cursors for [`OpProofsStore`]. +#[derive(Debug, Clone)] +pub struct OpProofsHashedAccountCursorFactory<'tx, S: OpProofsStore> { + storage: &'tx OpProofsStorage, + block_number: u64, + _marker: PhantomData<&'tx ()>, +} + +impl<'tx, S: OpProofsStore> OpProofsHashedAccountCursorFactory<'tx, S> { + /// Creates a new `OpProofsHashedAccountCursorFactory` instance. + pub const fn new(storage: &'tx OpProofsStorage, block_number: u64) -> Self { + Self { storage, block_number, _marker: PhantomData } + } +} + +impl<'tx, S> HashedCursorFactory for OpProofsHashedAccountCursorFactory<'tx, S> +where + S: OpProofsStore + 'tx, +{ + type AccountCursor<'a> + = OpProofsHashedAccountCursor> + where + Self: 'a; + type StorageCursor<'a> + = OpProofsHashedStorageCursor> + where + Self: 'a; + + fn hashed_account_cursor(&self) -> Result, DatabaseError> { + Ok(OpProofsHashedAccountCursor::new(self.storage.account_hashed_cursor(self.block_number)?)) + } + + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result, DatabaseError> { + Ok(OpProofsHashedStorageCursor::new( + self.storage.storage_hashed_cursor(hashed_address, self.block_number)?, + )) + } +} diff --git a/crates/optimism/trie/src/db/cursor.rs b/crates/optimism/trie/src/db/cursor.rs new file mode 100644 index 00000000..e5c0329d --- /dev/null +++ b/crates/optimism/trie/src/db/cursor.rs @@ -0,0 +1,1351 @@ +use std::marker::PhantomData; + +use crate::{ + db::{ + AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, + MaybeDeleted, StorageTrieHistory, StorageTrieKey, VersionedValue, + }, + OpProofsStorageResult, +}; +use alloy_primitives::{B256, U256}; +use reth_db::{ + cursor::{DbCursorRO, DbDupCursorRO}, + table::{DupSort, Table}, + transaction::DbTx, + Database, DatabaseEnv, DatabaseError, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{TrieCursor, TrieStorageCursor}, + BranchNodeCompact, Nibbles, StoredNibbles, +}; + +/// Generic alias for dup cursor for T +pub(crate) type Dup<'tx, T> = <::TX as DbTx>::DupCursor; + +/// Iterates versioned dup-sorted rows and returns the latest value (<= `max_block_number`), +/// skipping tombstones. +#[derive(Debug, Clone)] +pub struct BlockNumberVersionedCursor { + _table: PhantomData, + cursor: Cursor, + max_block_number: u64, +} + +impl BlockNumberVersionedCursor +where + T: Table> + DupSort, + Cursor: DbCursorRO + DbDupCursorRO, +{ + /// Initializes new [`BlockNumberVersionedCursor`]. + pub const fn new(cursor: Cursor, max_block_number: u64) -> Self { + Self { _table: PhantomData, cursor, max_block_number } + } + + /// Resolve the latest version for `key` with `block_number` <= `max_block_number`. + /// Strategy: + /// - `seek_by_key_subkey(key, max)` gives first dup >= max. + /// - if exactly == max → it's our latest + /// - if > max → `prev_dup()` is latest < max (or None) + /// - if no dup >= max: + /// - if key exists → `last_dup()` is latest < max + /// - else → None + fn latest_version_for_key( + &mut self, + key: T::Key, + ) -> OpProofsStorageResult> { + // First dup with subkey >= max_block_number + let seek_res = self.cursor.seek_by_key_subkey(key.clone(), self.max_block_number)?; + + if let Some(vv) = seek_res { + if vv.block_number > self.max_block_number { + // step back to the last dup < max + return Ok(self.cursor.prev_dup()?); + } + // already at the dup = max + return Ok(Some((key, vv))) + } + + // No dup >= max ⇒ either key absent or all dups < max. Check if key exists: + if self.cursor.seek_exact(key.clone())?.is_none() { + return Ok(None); + } + + // Key exists ⇒ take last dup (< max). + if let Some(vv) = self.cursor.last_dup()? { + return Ok(Some((key, vv))); + } + Ok(None) + } + + /// Returns a non-deleted latest version for exactly `key`, if any. + fn seek_exact(&mut self, key: T::Key) -> OpProofsStorageResult> { + if let Some((latest_key, latest_value)) = self.latest_version_for_key(key)? && + let MaybeDeleted(Some(v)) = latest_value.value + { + return Ok(Some((latest_key, v))); + } + Ok(None) + } + + /// Walk forward from `first_key` (inclusive) until we find a *live* latest-≤-max value. + /// `first_key` must already be a *real key* in the table. + fn next_live_from( + &mut self, + mut first_key: T::Key, + ) -> OpProofsStorageResult> { + loop { + // Compute latest version ≤ max for this key + if let Some((k, v)) = self.seek_exact(first_key.clone())? { + return Ok(Some((k, v))); + } + + // Move to next distinct key, or EOF + let Some((next_key, _)) = self.cursor.next_no_dup()? else { + return Ok(None); + }; + + first_key = next_key; + } + } + + /// Seek to the first non-deleted latest version at or after `start_key`. + /// Logic: + /// - Try exact key first (above). If alive, return it. + /// - Otherwise hop to next distinct key and repeat until we find a live version or hit EOF. + fn seek(&mut self, start_key: T::Key) -> OpProofsStorageResult> { + // Position MDBX at first key >= start_key + if let Some((first_key, _)) = self.cursor.seek(start_key)? { + return self.next_live_from(first_key); + } + Ok(None) + } + + /// Advance to the next distinct key from the current MDBX position + /// and return its non-deleted latest version, if any. + /// Next distinct key; if not positioned, start from `T::Key::default()`. + fn next(&mut self) -> OpProofsStorageResult> + where + T::Key: Default, + { + // If not positioned, start from the beginning (default key). + if self.cursor.current()?.is_none() { + let Some((first_key, _)) = self.cursor.seek(T::Key::default())? else { + return Ok(None); + }; + return self.next_live_from(first_key); + } + + // Otherwise advance to next distinct key and resume the walk. + let Some((next_key, _)) = self.cursor.next_no_dup()? else { + return Ok(None); + }; + self.next_live_from(next_key) + } +} + +/// MDBX implementation of [`TrieCursor`]. +#[derive(Debug)] +pub struct MdbxTrieCursor { + inner: BlockNumberVersionedCursor, + hashed_address: Option, +} + +impl< + V, + T: Table> + DupSort, + Cursor: DbCursorRO + DbDupCursorRO, + > MdbxTrieCursor +{ + /// Initializes new [`MdbxTrieCursor`]. + pub const fn new(cursor: Cursor, max_block_number: u64, hashed_address: Option) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, max_block_number), hashed_address } + } +} + +impl TrieCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn seek_exact( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + Ok(self + .inner + .seek_exact(StoredNibbles(path)) + .map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn seek( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + Ok(self + .inner + .seek(StoredNibbles(path)) + .map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.inner.next().map(|opt| opt.map(|(StoredNibbles(n), node)| (n, node)))?) + } + + fn current(&mut self) -> Result, DatabaseError> { + self.inner.cursor.current().map(|opt| opt.map(|(StoredNibbles(n), _)| n)) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl TrieCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn seek_exact( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + let key = StorageTrieKey::new(address, StoredNibbles(path)); + return Ok(self.inner.seek_exact(key).map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?) + } + Ok(None) + } + + fn seek( + &mut self, + path: Nibbles, + ) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + let key = StorageTrieKey::new(address, StoredNibbles(path)); + return Ok(self.inner.seek(key).map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?) + } + Ok(None) + } + + fn next(&mut self) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + return Ok(self.inner.next().map(|opt| { + opt.and_then(|(k, node)| (k.hashed_address == address).then_some((k.path.0, node))) + })?) + } + Ok(None) + } + + fn current(&mut self) -> Result, DatabaseError> { + if let Some(address) = self.hashed_address { + return self.inner.cursor.current().map(|opt| { + opt.and_then(|(k, _)| (k.hashed_address == address).then_some(k.path.0)) + }); + } + Ok(None) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl TrieStorageCursor for MdbxTrieCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = Some(hashed_address); + } +} + +/// MDBX implementation of [`HashedCursor`] for storage state. +#[derive(Debug)] +pub struct MdbxStorageCursor { + inner: BlockNumberVersionedCursor, + hashed_address: B256, +} + +impl MdbxStorageCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + /// Initializes new [`MdbxStorageCursor`] + pub const fn new(cursor: Cursor, block_number: u64, hashed_address: B256) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, block_number), hashed_address } + } +} + +impl HashedCursor for MdbxStorageCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + type Value = U256; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + let storage_key = HashedStorageKey::new(self.hashed_address, key); + + // hashed storage values can be zero, which means the storage slot is deleted, so we should + // skip those + let result = self.inner.seek(storage_key).map(|opt| { + opt.and_then(|(k, v)| { + // Only return entries that belong to the bound address + (k.hashed_address == self.hashed_address).then_some((k.hashed_storage_key, v.0)) + }) + })?; + + if let Some((_, v)) = result && + v.is_zero() + { + return self.next(); + } + + Ok(result) + } + + fn next(&mut self) -> Result, DatabaseError> { + loop { + let result = self.inner.next().map(|opt| { + opt.and_then(|(k, v)| { + // Only return entries that belong to the bound address + (k.hashed_address == self.hashed_address).then_some((k.hashed_storage_key, v.0)) + }) + })?; + + // hashed storage values can be zero, which means the storage slot is deleted, so we + // should skip those + if let Some((_, v)) = result && + v.is_zero() + { + continue; + } + + return Ok(result); + } + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +impl HashedStorageCursor for MdbxStorageCursor> { + fn is_storage_empty(&mut self) -> Result { + Ok(self.seek(B256::ZERO)?.is_none()) + } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = hashed_address + } +} + +/// MDBX implementation of [`HashedCursor`] for account state. +#[derive(Debug)] +pub struct MdbxAccountCursor { + inner: BlockNumberVersionedCursor, +} + +impl MdbxAccountCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + /// Initializes new `MdbxAccountCursor` + pub const fn new(cursor: Cursor, block_number: u64) -> Self { + Self { inner: BlockNumberVersionedCursor::new(cursor, block_number) } + } +} + +impl HashedCursor for MdbxAccountCursor +where + Cursor: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + type Value = Account; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + Ok(self.inner.seek(key)?) + } + + fn next(&mut self) -> Result, DatabaseError> { + Ok(self.inner.next()?) + } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::{models, StorageValue}; + use reth_db::{ + mdbx::{init_db_for, DatabaseArguments}, + DatabaseEnv, + }; + use reth_db_api::{ + cursor::DbDupCursorRW, + transaction::{DbTx, DbTxMut}, + Database, + }; + use reth_trie::{BranchNodeCompact, Nibbles, StoredNibbles}; + use tempfile::TempDir; + + fn setup_db() -> DatabaseEnv { + let tmp = TempDir::new().expect("create tmpdir"); + init_db_for::<_, models::Tables>(tmp, DatabaseArguments::default()).expect("init db") + } + + fn stored(path: Nibbles) -> StoredNibbles { + StoredNibbles(path) + } + + fn node() -> BranchNodeCompact { + BranchNodeCompact::default() + } + + fn append_account_trie( + wtx: &::TXMut, + key: StoredNibbles, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write cursor"); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_storage_trie( + wtx: &::TXMut, + address: B256, + path: Nibbles, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write cursor"); + let key = StorageTrieKey::new(address, StoredNibbles(path)); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_hashed_storage( + wtx: &::TXMut, + addr: B256, + slot: B256, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write"); + let key = HashedStorageKey::new(addr, slot); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val.map(StorageValue)) }; + c.append_dup(key, vv).expect("append dup"); + } + + fn append_hashed_account( + wtx: &::TXMut, + key: B256, + block: u64, + val: Option, + ) { + let mut c = wtx.cursor_dup_write::().expect("dup write"); + let vv = VersionedValue { block_number: block, value: MaybeDeleted(val) }; + c.append_dup(key, vv).expect("append dup"); + } + + // Open a dup-RO cursor and wrap it in a BlockNumberVersionedCursor with a given bound. + fn version_cursor( + tx: &::TX, + max_block: u64, + ) -> BlockNumberVersionedCursor> { + let cur = tx.cursor_dup_read::().expect("dup ro cursor"); + BlockNumberVersionedCursor::new(cur, max_block) + } + + fn account_trie_cursor( + tx: &'_ ::TX, + max_block: u64, + ) -> MdbxTrieCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + // For account trie the address is not used; pass None. + MdbxTrieCursor::new(c, max_block, None) + } + + // Helper: build a Storage trie cursor bound to an address + fn storage_trie_cursor( + tx: &'_ ::TX, + max_block: u64, + address: B256, + ) -> MdbxTrieCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxTrieCursor::new(c, max_block, Some(address)) + } + + fn storage_cursor( + tx: &'_ ::TX, + max_block: u64, + address: B256, + ) -> MdbxStorageCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxStorageCursor::new(c, max_block, address) + } + + fn account_cursor( + tx: &'_ ::TX, + max_block: u64, + ) -> MdbxAccountCursor> { + let c = tx.cursor_dup_read::().expect("dup ro cursor"); + MdbxAccountCursor::new(c, max_block) + } + + // Assert helper: ensure the chosen VersionedValue has the expected block and deletion flag. + fn assert_block( + got: Option<(StoredNibbles, VersionedValue)>, + expected_block: u64, + expect_deleted: bool, + ) { + let (_, vv) = got.expect("expected Some(..)"); + assert_eq!(vv.block_number, expected_block, "wrong block chosen"); + let is_deleted = matches!(vv.value, MaybeDeleted(None)); + assert_eq!(is_deleted, expect_deleted, "tombstone mismatch"); + } + + /// No entry for key → None. + #[test] + fn latest_version_for_key_none_when_key_absent() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cursor = version_cursor(&tx, 100); + + let out = cursor + .latest_version_for_key(stored(Nibbles::default())) + .expect("should not return error"); + assert!(out.is_none(), "absent key must return None"); + } + + /// Exact match at max (live) → pick it. + #[test] + fn latest_version_for_key_picks_value_at_max_if_present() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 50, Some(node())); // == max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 50, false); + } + + /// When `seek_by_key_subkey` points to the subkey > max - fallback to the prev. + #[test] + fn latest_version_for_key_picks_latest_below_max_when_next_is_above() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 30, Some(node())); // expected + append_account_trie(&wtx, k.clone(), 70, Some(node())); // > max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 30, false); + } + + /// No ≥ max but key exists → use last < max. + #[test] + fn latest_version_for_key_picks_last_below_max_when_none_at_or_above() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 40, Some(node())); // expected (max=100) + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 40, false); + } + + /// All entries are > max → None. + #[test] + fn latest_version_for_key_none_when_everything_is_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 60, Some(node())); + append_account_trie(&wtx, k1.clone(), 70, Some(node())); + append_account_trie(&wtx, k2, 40, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k1).expect("ok"); + assert!(out.is_none(), "no dup ≤ max ⇒ None"); + } + + /// Single dup < max → pick it. + #[test] + fn latest_version_for_key_picks_single_below_max() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 25, Some(node())); // < max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 25, false); + } + + /// Single dup == max → pick it. + #[test] + fn latest_version_for_key_picks_single_at_max() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 50, Some(node())); // == max + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 50); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 50, false); + } + + /// Latest ≤ max is a tombstone → return it (this API doesn't filter). + #[test] + fn latest_version_for_key_returns_tombstone_if_latest_is_deleted() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 90, None); // latest ≤ max, but deleted + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.latest_version_for_key(k).expect("ok"); + assert_block(out, 90, true); + } + + /// Should skip tombstones and return None when the latest ≤ max is deleted. + #[test] + fn seek_exact_skips_tombstone_returns_none() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 90, None); // latest ≤ max is tombstoned + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut core = version_cursor(&tx, 100); + + let out = core.seek_exact(k).expect("ok"); + assert!(out.is_none(), "seek_exact must filter out deleted latest value"); + } + + /// Empty table → None. + #[test] + fn seek_empty_returns_none() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.seek(stored(Nibbles::from_nibbles([0x0A]))).expect("ok"); + assert!(out.is_none()); + } + + /// Start at an existing key whose latest ≤ max is live → returns that key. + #[test] + fn seek_at_live_key_returns_it() { + let db = setup_db(); + let k = stored(Nibbles::from_nibbles([0x0A])); + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k.clone(), 10, Some(node())); + append_account_trie(&wtx, k.clone(), 20, Some(node())); // latest ≤ max + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k.clone()).expect("ok").expect("some"); + assert_eq!(out.0, k); + } + + /// Start at an existing key whose latest ≤ max is tombstoned → skip to next key with live + /// value. + #[test] + fn seek_skips_tombstoned_key_to_next_live_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + // Key 0x10 latest ≤ max is deleted + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + append_account_trie(&wtx, k1.clone(), 20, None); // tombstone at latest ≤ max + // Next key has live + append_account_trie(&wtx, k2.clone(), 5, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start between keys → returns the next key’s live latest ≤ max. + #[test] + fn seek_between_keys_returns_next_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0C])); + let k3 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2.clone(), 10, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Start at 0x15 (between 0x10 and 0x20) + + let out = cur.seek(k3).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start after the last key → None. + #[test] + fn seek_after_last_returns_none() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + let k3 = stored(Nibbles::from_nibbles([0x0C])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2, 10, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.seek(k3).expect("ok"); + assert!(out.is_none()); + } + + /// If the first key at-or-after has only versions > max, it is effectively not visible → skip + /// to next. + #[test] + fn seek_skips_keys_with_only_versions_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 60, Some(node())); + append_account_trie(&wtx, k2.clone(), 40, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Start at a key with mixed versions; latest ≤ max is tombstone → skip to next key with live. + #[test] + fn seek_mixed_versions_tombstone_latest_skips_to_next_key() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + append_account_trie(&wtx, k1.clone(), 30, None); + append_account_trie(&wtx, k2.clone(), 5, Some(node())); + wtx.commit().expect("commit"); + } + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 30); + + let out = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// When not positioned should start from default key and return the first live key. + #[test] + fn next_unpositioned_starts_from_default_returns_first_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); // first live + append_account_trie(&wtx, k2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + // Unpositioned cursor + let mut cur = version_cursor(&tx, 100); + + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k1); + } + + /// After positioning on a live key via `seek()`, `next()` should advance to the next live key. + #[test] + fn next_advances_from_current_live_to_next_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1.clone(), 10, Some(node())); // live + append_account_trie(&wtx, k2.clone(), 10, Some(node())); // next live + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Position at k1 + let _ = cur.seek(k1).expect("ok").expect("some"); + // Next should yield k2 + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// If the next key's latest ≤ max is tombstone, `next()` should skip to the next live key. + #[test] + fn next_skips_tombstoned_key_to_next_live() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); // will be tombstoned at latest ≤ max + let k3 = stored(Nibbles::from_nibbles([0x0C])); // next live + + { + let wtx = db.tx_mut().expect("rw tx"); + // k1 live + append_account_trie(&wtx, k1.clone(), 10, Some(node())); + // k2: latest ≤ max is tombstone + append_account_trie(&wtx, k2.clone(), 10, Some(node())); + append_account_trie(&wtx, k2, 20, None); + // k3 live + append_account_trie(&wtx, k3.clone(), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 50); + + // Position at k1 + let _ = cur.seek(k1).expect("ok").expect("some"); + // next should skip k2 (tombstoned latest) and return k3 + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k3); + } + + /// If positioned on the last live key, `next()` should return None (EOF). + #[test] + fn next_returns_none_at_eof() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); + let k2 = stored(Nibbles::from_nibbles([0x0B])); // last key + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, k1, 10, Some(node())); + append_account_trie(&wtx, k2.clone(), 10, Some(node())); // last live + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + // Position at the last key k2 + let _ = cur.seek(k2).expect("ok").expect("some"); + // `next()` should hit EOF + let out = cur.next().expect("ok"); + assert!(out.is_none()); + } + + /// If the first key has only versions > max, `next()` should skip it and return the next live + /// key. + #[test] + fn next_skips_keys_with_only_versions_above_max() { + let db = setup_db(); + let k1 = stored(Nibbles::from_nibbles([0x0A])); // only > max + let k2 = stored(Nibbles::from_nibbles([0x0B])); // ≤ max live + + { + let wtx = db.tx_mut().expect("rw tx"); + // k1 only above max (max=50) + append_account_trie(&wtx, k1, 60, Some(node())); + // k2 within max + append_account_trie(&wtx, k2.clone(), 40, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + // Unpositioned; `next()` will start from default and walk + let mut cur = version_cursor(&tx, 50); + + let out = cur.next().expect("ok").expect("some"); + assert_eq!(out.0, k2); + } + + /// Empty table: `next()` should return None. + #[test] + fn next_on_empty_returns_none() { + let db = setup_db(); + let tx = db.tx().expect("ro tx"); + let mut cur = version_cursor(&tx, 100); + + let out = cur.next().expect("ok"); + assert!(out.is_none()); + } + + // ----------------- Account trie cursor thin-wrapper checks ----------------- + + #[test] + fn account_seek_exact_live_maps_key_and_value() { + let db = setup_db(); + let k = Nibbles::from_nibbles([0x0A]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + + // Build wrapper + let mut cur = account_trie_cursor(&tx, 100); + + // Wrapper should return (Nibbles, BranchNodeCompact) + let out = TrieCursor::seek_exact(&mut cur, k).expect("ok").expect("some"); + assert_eq!(out.0, k); + } + + #[test] + fn account_seek_exact_filters_tombstone() { + let db = setup_db(); + let k = Nibbles::from_nibbles([0x0B]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k), 5, Some(node())); + append_account_trie(&wtx, StoredNibbles(k), 9, None); // latest ≤ max tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = account_trie_cursor(&tx, 10); + + let out = TrieCursor::seek_exact(&mut cur, k).expect("ok"); + assert!(out.is_none(), "account seek_exact must filter tombstone"); + } + + #[test] + fn account_seek_and_next_and_current_roundtrip() { + let db = setup_db(); + let k1 = Nibbles::from_nibbles([0x01]); + let k2 = Nibbles::from_nibbles([0x02]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_account_trie(&wtx, StoredNibbles(k1), 10, Some(node())); + append_account_trie(&wtx, StoredNibbles(k2), 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = account_trie_cursor(&tx, 100); + + // seek at k1 + let out1 = TrieCursor::seek(&mut cur, k1).expect("ok").expect("some"); + assert_eq!(out1.0, k1); + + // current should be k1 + let cur_k = TrieCursor::current(&mut cur).expect("ok").expect("some"); + assert_eq!(cur_k, k1); + + // next should move to k2 + let out2 = TrieCursor::next(&mut cur).expect("ok").expect("some"); + assert_eq!(out2.0, k2); + } + + // ----------------- Storage trie cursor thin-wrapper checks ----------------- + + #[test] + fn storage_seek_exact_respects_address_filter() { + let db = setup_db(); + + let addr_a = B256::from([0xAA; 32]); + let addr_b = B256::from([0xBB; 32]); + + let path = Nibbles::from_nibbles([0x0D]); + + { + let wtx = db.tx_mut().expect("rw tx"); + // insert only under B + append_storage_trie(&wtx, addr_b, path, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + + // Cursor bound to A must not see B’s data + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + let out_a = TrieCursor::seek_exact(&mut cur_a, path).expect("ok"); + assert!(out_a.is_none(), "no data for addr A"); + + // Cursor bound to B should see it + let mut cur_b = storage_trie_cursor(&tx, 100, addr_b); + let out_b = TrieCursor::seek_exact(&mut cur_b, path).expect("ok").expect("some"); + assert_eq!(out_b.0, path); + } + + #[test] + fn storage_seek_returns_first_key_for_bound_address() { + let db = setup_db(); + + let addr_a = B256::from([0x11; 32]); + let addr_b = B256::from([0x22; 32]); + + let p1 = Nibbles::from_nibbles([0x01]); + let p2 = Nibbles::from_nibbles([0x02]); + let p3 = Nibbles::from_nibbles([0x03]); + + { + let wtx = db.tx_mut().expect("rw tx"); + // For A: only p2 + append_storage_trie(&wtx, addr_a, p2, 10, Some(node())); + // For B: p1 + append_storage_trie(&wtx, addr_b, p1, 10, Some(node())); + wtx.commit().expect("commit"); + } + + // test seek behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // seek at p1: for A there is no p1; the next key >= p1 under A is p2 + let out = TrieCursor::seek(&mut cur_a, p1).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek at p2: exact match + let out = TrieCursor::seek(&mut cur_a, p2).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek at p3: no p3 under A; no next key ≥ p3 under A → None + let out = TrieCursor::seek(&mut cur_a, p3).expect("ok"); + assert!(out.is_none(), "no key ≥ p3 under A"); + } + + // test next behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + let out = TrieCursor::next(&mut cur_a).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // next should yield None as there is no further key under A + let out = TrieCursor::next(&mut cur_a).expect("ok"); + assert!(out.is_none(), "no more keys under A"); + + // current should return None + let out = TrieCursor::current(&mut cur_a).expect("ok"); + assert!(out.is_none(), "no current key after EOF"); + } + + // test seek_exact behaviour + { + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // seek_exact at p1: no exact match + let out = TrieCursor::seek_exact(&mut cur_a, p1).expect("ok"); + assert!(out.is_none(), "no exact p1 under A"); + + // seek_exact at p2: exact match + let out = TrieCursor::seek_exact(&mut cur_a, p2).expect("ok").expect("some"); + assert_eq!(out.0, p2); + + // seek_exact at p3: no exact match + let out = TrieCursor::seek_exact(&mut cur_a, p3).expect("ok"); + assert!(out.is_none(), "no exact p3 under A"); + } + } + + #[test] + fn storage_next_stops_at_address_boundary() { + let db = setup_db(); + + let addr_a = B256::from([0x33; 32]); + let addr_b = B256::from([0x44; 32]); + + let p1 = Nibbles::from_nibbles([0x05]); // under A + let p2 = Nibbles::from_nibbles([0x06]); // under B (next key overall) + + { + let wtx = db.tx_mut().expect("rw tx"); + append_storage_trie(&wtx, addr_a, p1, 10, Some(node())); + append_storage_trie(&wtx, addr_b, p2, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur_a = storage_trie_cursor(&tx, 100, addr_a); + + // position at p1 (A) + let _ = TrieCursor::seek_exact(&mut cur_a, p1).expect("ok").expect("some"); + + // next should reach boundary; impl filters different address and returns None + let out = TrieCursor::next(&mut cur_a).expect("ok"); + assert!(out.is_none(), "next() should stop when next key is a different address"); + } + + #[test] + fn storage_current_maps_key() { + let db = setup_db(); + + let addr = B256::from([0x55; 32]); + let p = Nibbles::from_nibbles([0x09]); + + { + let wtx = db.tx_mut().expect("rw tx"); + append_storage_trie(&wtx, addr, p, 10, Some(node())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro tx"); + let mut cur = storage_trie_cursor(&tx, 100, addr); + + let _ = TrieCursor::seek_exact(&mut cur, p).expect("ok").expect("some"); + + let now = TrieCursor::current(&mut cur).expect("ok").expect("some"); + assert_eq!(now, p); + } + + #[test] + fn hashed_storage_seek_maps_slot_and_value() { + let db = setup_db(); + let addr = B256::from([0xAA; 32]); + let slot = B256::from([0x10; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, slot, 10, Some(U256::from(7))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr); + + let (got_slot, got_val) = cur.seek(slot).expect("ok").expect("some"); + assert_eq!(got_slot, slot); + assert_eq!(got_val, U256::from(7)); + } + + #[test] + fn hashed_storage_seek_filters_tombstone() { + let db = setup_db(); + let addr = B256::from([0xAB; 32]); + let slot = B256::from([0x11; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, slot, 5, Some(U256::from(1))); + append_hashed_storage(&wtx, addr, slot, 9, None); // latest ≤ max is tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 10, addr); + + let out = cur.seek(slot).expect("ok"); + assert!(out.is_none(), "wrapper must filter tombstoned latest"); + } + + #[test] + fn hashed_storage_seek_and_next_roundtrip() { + let db = setup_db(); + let addr = B256::from([0xAC; 32]); + let s1 = B256::from([0x01; 32]); + let s2 = B256::from([0x02; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr, s1, 10, Some(U256::from(11))); + append_hashed_storage(&wtx, addr, s2, 10, Some(U256::from(22))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr); + + let (k1, v1) = cur.seek(s1).expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.next().expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + } + + #[test] + fn hashed_storage_address_boundary() { + let db = setup_db(); + let addr1 = B256::from([0xAC; 32]); + let addr2 = B256::from([0xAD; 32]); + let s1 = B256::from([0x01; 32]); + let s2 = B256::from([0x02; 32]); + let s3 = B256::from([0x03; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr1, s1, 10, Some(U256::from(11))); + append_hashed_storage(&wtx, addr1, s2, 10, Some(U256::from(22))); + wtx.commit().expect("commit"); + } + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_storage(&wtx, addr2, s1, 10, Some(U256::from(33))); + append_hashed_storage(&wtx, addr2, s2, 10, Some(U256::from(44))); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = storage_cursor(&tx, 100, addr1); + + let (k1, v1) = cur.next().expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.next().expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + + let out = cur.next().expect("ok"); + assert!(out.is_none(), "should stop at address boundary"); + + let (k1, v1) = cur.seek(s1).expect("ok").expect("some"); + assert_eq!((k1, v1), (s1, U256::from(11))); + + let (k2, v2) = cur.seek(s2).expect("ok").expect("some"); + assert_eq!((k2, v2), (s2, U256::from(22))); + + let out = cur.seek(s3).expect("ok"); + assert!(out.is_none(), "should not see keys from other address"); + } + + #[test] + fn hashed_account_seek_maps_key_and_value() { + let db = setup_db(); + let key = B256::from([0x20; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, key, 10, Some(Account::default())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 100); + + let (got_key, _acc) = cur.seek(key).expect("ok").expect("some"); + assert_eq!(got_key, key); + } + + #[test] + fn hashed_account_seek_filters_tombstone() { + let db = setup_db(); + let key = B256::from([0x21; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, key, 5, Some(Account::default())); + append_hashed_account(&wtx, key, 9, None); // latest ≤ max is tombstone + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 10); + + let out = cur.seek(key).expect("ok"); + assert!(out.is_none(), "wrapper must filter tombstoned latest"); + } + + #[test] + fn hashed_account_seek_and_next_roundtrip() { + let db = setup_db(); + let k1 = B256::from([0x01; 32]); + let k2 = B256::from([0x02; 32]); + + { + let wtx = db.tx_mut().expect("rw"); + append_hashed_account(&wtx, k1, 10, Some(Account::default())); + append_hashed_account(&wtx, k2, 10, Some(Account::default())); + wtx.commit().expect("commit"); + } + + let tx = db.tx().expect("ro"); + let mut cur = account_cursor(&tx, 100); + + let (got1, _) = cur.seek(k1).expect("ok").expect("some"); + assert_eq!(got1, k1); + + let (got2, _) = cur.next().expect("ok").expect("some"); + assert_eq!(got2, k2); + } +} diff --git a/crates/optimism/trie/src/db/mod.rs b/crates/optimism/trie/src/db/mod.rs new file mode 100644 index 00000000..042b6820 --- /dev/null +++ b/crates/optimism/trie/src/db/mod.rs @@ -0,0 +1,17 @@ +//! MDBX implementation of [`OpProofsStore`](crate::OpProofsStore). +//! +//! This module provides a complete MDBX implementation of the +//! [`OpProofsStore`](crate::OpProofsStore) trait. It uses the [`reth_db`] +//! crate for database interactions and defines the necessary tables and models for storing trie +//! branches, accounts, and storage leaves. + +mod models; +pub use models::*; + +mod store; +pub use store::MdbxProofsStorage; + +mod cursor; +pub use cursor::{ + BlockNumberVersionedCursor, MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, +}; diff --git a/crates/optimism/trie/src/db/models/block.rs b/crates/optimism/trie/src/db/models/block.rs new file mode 100644 index 00000000..6c5bd9c5 --- /dev/null +++ b/crates/optimism/trie/src/db/models/block.rs @@ -0,0 +1,76 @@ +use alloy_eips::BlockNumHash; +use alloy_primitives::B256; +use bytes::BufMut; +use derive_more::{From, Into}; +use reth_db::{ + table::{Compress, Decompress}, + DatabaseError, +}; +use serde::{Deserialize, Serialize}; + +/// Wrapper for block number and block hash tuple to implement [`Compress`]/[`Decompress`]. +/// +/// Used for storing block metadata (number + hash). +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, From, Into)] +pub struct BlockNumberHash(BlockNumHash); + +impl Compress for BlockNumberHash { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + // Encode block number (8 bytes, big-endian) + hash (32 bytes) = 40 bytes total + buf.put_u64(self.0.number); + buf.put_slice(self.0.hash.as_slice()); + } +} + +impl Decompress for BlockNumberHash { + fn decompress(value: &[u8]) -> Result { + if value.len() != 40 { + return Err(DatabaseError::Decode); + } + + let number = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); + let hash = B256::from_slice(&value[8..40]); + + Ok(Self(BlockNumHash { number, hash })) + } +} + +impl BlockNumberHash { + /// Create new instance. + pub const fn new(number: u64, hash: B256) -> Self { + Self(BlockNumHash { number, hash }) + } + + /// Get the block number. + pub const fn number(&self) -> u64 { + self.0.number + } + + /// Get the block hash. + pub const fn hash(&self) -> &B256 { + &self.0.hash + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + + #[test] + fn test_block_number_hash_roundtrip() { + let test_cases = vec![ + BlockNumberHash::new(0, B256::ZERO), + BlockNumberHash::new(42, B256::repeat_byte(0xaa)), + BlockNumberHash::new(u64::MAX, B256::repeat_byte(0xff)), + ]; + + for original in test_cases { + let compressed = original.compress(); + let decompressed = BlockNumberHash::decompress(&compressed).unwrap(); + assert_eq!(original, decompressed); + } + } +} diff --git a/crates/optimism/trie/src/db/models/change_set.rs b/crates/optimism/trie/src/db/models/change_set.rs new file mode 100644 index 00000000..f2328590 --- /dev/null +++ b/crates/optimism/trie/src/db/models/change_set.rs @@ -0,0 +1,125 @@ +use crate::db::{HashedStorageKey, StorageTrieKey}; +use alloy_primitives::B256; +use reth_db::{ + table::{self, Decode, Encode}, + DatabaseError, +}; +use reth_trie::StoredNibbles; +use serde::{Deserialize, Serialize}; + +/// The keys of the entries in the history tables. +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct ChangeSet { + /// Keys changed in [`AccountTrieHistory`](super::AccountTrieHistory) table. + pub account_trie_keys: Vec, + /// Keys changed in [`StorageTrieHistory`](super::StorageTrieHistory) table. + pub storage_trie_keys: Vec, + /// Keys changed in [`HashedAccountHistory`](super::HashedAccountHistory) table. + pub hashed_account_keys: Vec, + /// Keys changed in [`HashedStorageHistory`](super::HashedStorageHistory) table. + pub hashed_storage_keys: Vec, +} + +impl table::Encode for ChangeSet { + type Encoded = Vec; + + fn encode(self) -> Self::Encoded { + bincode::serialize(&self).expect("ChangeSet serialization should not fail") + } +} + +impl table::Decode for ChangeSet { + fn decode(value: &[u8]) -> Result { + bincode::deserialize(value).map_err(|_| DatabaseError::Decode) + } +} + +impl table::Compress for ChangeSet { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let encoded = self.clone().encode(); + buf.put_slice(&encoded); + } +} + +impl table::Decompress for ChangeSet { + fn decompress(value: &[u8]) -> Result { + Self::decode(value) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_db::table::{Compress, Decompress}; + + #[test] + fn test_encode_decode_empty_change_set() { + let change_set = ChangeSet { + account_trie_keys: vec![], + storage_trie_keys: vec![], + hashed_account_keys: vec![], + hashed_storage_keys: vec![], + }; + + let encoded = change_set.clone().encode(); + let decoded = ChangeSet::decode(&encoded).expect("Failed to decode"); + assert_eq!(change_set, decoded); + } + + #[test] + fn test_encode_decode_populated_change_set() { + let account_key = StoredNibbles::from(vec![1, 2, 3, 4]); + let storage_key = StorageTrieKey { + hashed_address: B256::repeat_byte(0x11), + path: StoredNibbles::from(vec![5, 6, 7, 8]), + }; + let hashed_storage_key = HashedStorageKey { + hashed_address: B256::repeat_byte(0x22), + hashed_storage_key: B256::repeat_byte(0x33), + }; + + let change_set = ChangeSet { + account_trie_keys: vec![account_key], + storage_trie_keys: vec![storage_key], + hashed_account_keys: vec![B256::repeat_byte(0x44)], + hashed_storage_keys: vec![hashed_storage_key], + }; + + let encoded = change_set.clone().encode(); + let decoded = ChangeSet::decode(&encoded).expect("Failed to decode"); + assert_eq!(change_set, decoded); + } + + #[test] + fn test_decode_invalid_data() { + let invalid_data = vec![0xFF; 32]; + let result = ChangeSet::decode(&invalid_data); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), DatabaseError::Decode)); + } + + #[test] + fn test_compress_decompress() { + let change_set = ChangeSet { + account_trie_keys: vec![StoredNibbles::from(vec![1, 2, 3])], + storage_trie_keys: vec![StorageTrieKey { + hashed_address: B256::ZERO, + path: StoredNibbles::from(vec![4, 5, 6]), + }], + hashed_account_keys: vec![B256::ZERO], + hashed_storage_keys: vec![HashedStorageKey { + hashed_address: B256::ZERO, + hashed_storage_key: B256::repeat_byte(0x42), + }], + }; + + let mut buf = Vec::new(); + change_set.compress_to_buf(&mut buf); + + let decompressed = ChangeSet::decompress(&buf).expect("Failed to decompress"); + assert_eq!(change_set, decompressed); + } +} diff --git a/crates/optimism/trie/src/db/models/kv.rs b/crates/optimism/trie/src/db/models/kv.rs new file mode 100644 index 00000000..30a2b042 --- /dev/null +++ b/crates/optimism/trie/src/db/models/kv.rs @@ -0,0 +1,66 @@ +use crate::db::{ + AccountTrieHistory, HashedAccountHistory, HashedStorageHistory, HashedStorageKey, MaybeDeleted, + StorageTrieHistory, StorageTrieKey, StorageValue, VersionedValue, +}; +use alloy_primitives::B256; +use reth_db::table::{DupSort, Table}; +use reth_primitives_traits::Account; +use reth_trie::{BranchNodeCompact, Nibbles, StoredNibbles}; + +/// Helper to convert inputs into a table key or kv pair. +pub trait IntoKV { + /// Convert `self` into the table key. + fn into_key(self) -> Tab::Key; + + /// Convert `self` into kv for the given `block_number`. + fn into_kv(self, block_number: u64) -> (Tab::Key, Tab::Value); +} + +impl IntoKV for (Nibbles, Option) { + fn into_key(self) -> StoredNibbles { + StoredNibbles::from(self.0) + } + + fn into_kv(self, block_number: u64) -> (StoredNibbles, VersionedValue) { + let (path, node) = self; + (StoredNibbles::from(path), VersionedValue { block_number, value: MaybeDeleted(node) }) + } +} + +impl IntoKV for (B256, Nibbles, Option) { + fn into_key(self) -> StorageTrieKey { + let (hashed_address, path, _) = self; + StorageTrieKey::new(hashed_address, StoredNibbles::from(path)) + } + fn into_kv(self, block_number: u64) -> (StorageTrieKey, VersionedValue) { + let (hashed_address, path, node) = self; + ( + StorageTrieKey::new(hashed_address, StoredNibbles::from(path)), + VersionedValue { block_number, value: MaybeDeleted(node) }, + ) + } +} + +impl IntoKV for (B256, Option) { + fn into_key(self) -> B256 { + self.0 + } + fn into_kv(self, block_number: u64) -> (B256, VersionedValue) { + let (hashed_address, account) = self; + (hashed_address, VersionedValue { block_number, value: MaybeDeleted(account) }) + } +} + +impl IntoKV for (B256, B256, Option) { + fn into_key(self) -> HashedStorageKey { + let (hashed_address, hashed_storage_key, _) = self; + HashedStorageKey::new(hashed_address, hashed_storage_key) + } + fn into_kv(self, block_number: u64) -> (HashedStorageKey, VersionedValue) { + let (hashed_address, hashed_storage_key, value) = self; + ( + HashedStorageKey::new(hashed_address, hashed_storage_key), + VersionedValue { block_number, value: MaybeDeleted(value) }, + ) + } +} diff --git a/crates/optimism/trie/src/db/models/mod.rs b/crates/optimism/trie/src/db/models/mod.rs new file mode 100644 index 00000000..408bf7ed --- /dev/null +++ b/crates/optimism/trie/src/db/models/mod.rs @@ -0,0 +1,84 @@ +//! MDBX implementation of [`OpProofsStore`](crate::OpProofsStore). +//! +//! This module provides a complete MDBX implementation of the +//! [`OpProofsStore`](crate::OpProofsStore) trait. It uses the [`reth_db`] crate for +//! database interactions and defines the necessary tables and models for storing trie branches, +//! accounts, and storage leaves. + +mod block; +pub use block::*; +mod version; +pub use version::*; +mod storage; +pub use storage::*; +mod change_set; +pub(crate) mod kv; +pub use change_set::*; +pub use kv::*; + +use alloy_primitives::B256; +use reth_db::{ + table::{DupSort, TableInfo}, + tables, TableSet, TableType, TableViewer, +}; +use reth_primitives_traits::Account; +use reth_trie::{BranchNodeCompact, StoredNibbles}; +use std::fmt; + +tables! { + /// Stores historical branch nodes for the account state trie. + /// + /// Each entry maps a compact-encoded trie path (`StoredNibbles`) to its versioned branch node. + /// Multiple versions of the same node are stored using the block number as a subkey. + table AccountTrieHistory { + type Key = StoredNibbles; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores historical branch nodes for the storage trie of each account. + /// + /// Each entry is identified by a composite key combining the account’s hashed address and the + /// compact-encoded trie path. Versions are tracked using block numbers as subkeys. + table StorageTrieHistory { + type Key = StorageTrieKey; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores versioned account state across block history. + /// + /// Each entry maps a hashed account address to its serialized account data (balance, nonce, + /// code hash, storage root). + table HashedAccountHistory { + type Key = B256; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Stores versioned storage state across block history. + /// + /// Each entry maps a composite key of (hashed address, storage key) to its stored value. + /// Used for reconstructing contract storage at any historical block height. + table HashedStorageHistory { + type Key = HashedStorageKey; + type Value = VersionedValue; + type SubKey = u64; // block number + } + + /// Tracks the active proof window in the external historical storage. + /// + /// Stores the earliest and latest block numbers (and corresponding hashes) + /// for which historical trie data is retained. + table ProofWindow { + type Key = ProofWindowKey; + type Value = BlockNumberHash; + } + + /// A reverse mapping of block numbers to a keys of the tables. + /// This is used for efficiently locating data by block number. + table BlockChangeSet { + type Key = u64; // Block number + type Value = ChangeSet; + } +} diff --git a/crates/optimism/trie/src/db/models/storage.rs b/crates/optimism/trie/src/db/models/storage.rs new file mode 100644 index 00000000..f8caf0e0 --- /dev/null +++ b/crates/optimism/trie/src/db/models/storage.rs @@ -0,0 +1,253 @@ +use alloy_primitives::{B256, U256}; +use derive_more::{Constructor, From, Into}; +use reth_db::{ + table::{Compress, Decode, Decompress, Encode}, + DatabaseError, +}; +use reth_trie::StoredNibbles; +use serde::{Deserialize, Serialize}; + +/// Composite key: `(hashed-address, path)` for storage trie branches +/// +/// Used to efficiently index storage branches by both account address and trie path. +/// The encoding ensures lexicographic ordering: first by address, then by path. +#[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct StorageTrieKey { + /// Hashed account address + pub hashed_address: B256, + /// Trie path as nibbles + pub path: StoredNibbles, +} + +impl StorageTrieKey { + /// Create a new storage branch key + pub const fn new(hashed_address: B256, path: StoredNibbles) -> Self { + Self { hashed_address, path } + } +} + +impl Encode for StorageTrieKey { + type Encoded = Vec; + + fn encode(self) -> Self::Encoded { + let mut buf = Vec::with_capacity(32 + self.path.0.len()); + // First encode the address (32 bytes) + buf.extend_from_slice(self.hashed_address.as_slice()); + // Then encode the path + buf.extend_from_slice(&self.path.encode()); + buf + } +} + +impl Decode for StorageTrieKey { + fn decode(value: &[u8]) -> Result { + if value.len() < 32 { + return Err(DatabaseError::Decode); + } + + // First 32 bytes are the address + let hashed_address = B256::from_slice(&value[..32]); + + // Remaining bytes are the path + let path = StoredNibbles::decode(&value[32..])?; + + Ok(Self { hashed_address, path }) + } +} + +/// Composite key: (`hashed_address`, `hashed_storage_key`) for hashed storage values +/// +/// Used to efficiently index storage values by both account address and storage key. +/// The encoding ensures lexicographic ordering: first by address, then by storage key. +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct HashedStorageKey { + /// Hashed account address + pub hashed_address: B256, + /// Hashed storage key + pub hashed_storage_key: B256, +} + +impl HashedStorageKey { + /// Create a new hashed storage key + pub const fn new(hashed_address: B256, hashed_storage_key: B256) -> Self { + Self { hashed_address, hashed_storage_key } + } +} + +impl Encode for HashedStorageKey { + type Encoded = [u8; 64]; + + fn encode(self) -> Self::Encoded { + let mut buf = [0u8; 64]; + // First 32 bytes: address + buf[..32].copy_from_slice(self.hashed_address.as_slice()); + // Next 32 bytes: storage key + buf[32..].copy_from_slice(self.hashed_storage_key.as_slice()); + buf + } +} + +impl Decode for HashedStorageKey { + fn decode(value: &[u8]) -> Result { + if value.len() != 64 { + return Err(DatabaseError::Decode); + } + + let hashed_address = B256::from_slice(&value[..32]); + let hashed_storage_key = B256::from_slice(&value[32..64]); + + Ok(Self { hashed_address, hashed_storage_key }) + } +} + +/// Storage value wrapper for U256 values +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, From, Into, Constructor)] +pub struct StorageValue(pub U256); + +impl Compress for StorageValue { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + let be: [u8; 32] = self.0.to_be_bytes::<32>(); + buf.put_slice(&be); + } +} + +impl Decompress for StorageValue { + fn decompress(value: &[u8]) -> Result { + if value.len() != 32 { + return Err(DatabaseError::Decode); + } + let bytes: [u8; 32] = value.try_into().map_err(|_| DatabaseError::Decode)?; + Ok(Self(U256::from_be_bytes(bytes))) + } +} + +/// Proof Window key for tracking active proof window bounds +/// +/// Used to store earliest and latest block numbers in the external storage. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(u8)] +pub enum ProofWindowKey { + /// Earliest block number stored in external storage + EarliestBlock = 0, + /// Latest block number stored in external storage + LatestBlock = 1, +} + +impl Encode for ProofWindowKey { + type Encoded = [u8; 1]; + + fn encode(self) -> Self::Encoded { + [self as u8] + } +} + +impl Decode for ProofWindowKey { + fn decode(value: &[u8]) -> Result { + match value.first() { + Some(&0) => Ok(Self::EarliestBlock), + Some(&1) => Ok(Self::LatestBlock), + _ => Err(DatabaseError::Decode), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_trie::Nibbles; + + #[test] + fn test_storage_branch_subkey_encode_decode() { + let addr = B256::from([1u8; 32]); + let path = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 2, 3, 4])); + let key = StorageTrieKey::new(addr, path.clone()); + + let encoded = key.clone().encode(); + let decoded = StorageTrieKey::decode(&encoded).unwrap(); + + assert_eq!(key, decoded); + assert_eq!(decoded.hashed_address, addr); + assert_eq!(decoded.path, path); + } + + #[test] + fn test_storage_branch_subkey_ordering() { + let addr1 = B256::from([1u8; 32]); + let addr2 = B256::from([2u8; 32]); + let path1 = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 2])); + let path2 = StoredNibbles(Nibbles::from_nibbles_unchecked([1, 3])); + + let key1 = StorageTrieKey::new(addr1, path1.clone()); + let key2 = StorageTrieKey::new(addr1, path2); + let key3 = StorageTrieKey::new(addr2, path1); + + // Encoded bytes should be sortable: first by address, then by path + let enc1 = key1.encode(); + let enc2 = key2.encode(); + let enc3 = key3.encode(); + + assert!(enc1 < enc2, "Same address, path1 < path2"); + assert!(enc1 < enc3, "addr1 < addr2"); + assert!(enc2 < enc3, "addr1 < addr2 (even with larger path)"); + } + + #[test] + fn test_hashed_storage_subkey_encode_decode() { + let addr = B256::from([1u8; 32]); + let storage_key = B256::from([2u8; 32]); + let key = HashedStorageKey::new(addr, storage_key); + + let encoded = key.clone().encode(); + let decoded = HashedStorageKey::decode(&encoded).unwrap(); + + assert_eq!(key, decoded); + assert_eq!(decoded.hashed_address, addr); + assert_eq!(decoded.hashed_storage_key, storage_key); + } + + #[test] + fn test_hashed_storage_subkey_ordering() { + let addr1 = B256::from([1u8; 32]); + let addr2 = B256::from([2u8; 32]); + let storage1 = B256::from([10u8; 32]); + let storage2 = B256::from([20u8; 32]); + + let key1 = HashedStorageKey::new(addr1, storage1); + let key2 = HashedStorageKey::new(addr1, storage2); + let key3 = HashedStorageKey::new(addr2, storage1); + + // Encoded bytes should be sortable: first by address, then by storage key + let enc1 = key1.encode(); + let enc2 = key2.encode(); + let enc3 = key3.encode(); + + assert!(enc1 < enc2, "Same address, storage1 < storage2"); + assert!(enc1 < enc3, "addr1 < addr2"); + assert!(enc2 < enc3, "addr1 < addr2 (even with larger storage key)"); + } + + #[test] + fn test_hashed_storage_subkey_size() { + let addr = B256::from([1u8; 32]); + let storage_key = B256::from([2u8; 32]); + let key = HashedStorageKey::new(addr, storage_key); + + let encoded = key.encode(); + assert_eq!(encoded.len(), 64, "Encoded size should be exactly 64 bytes"); + } + + #[test] + fn test_metadata_key_encode_decode() { + let key = ProofWindowKey::EarliestBlock; + let encoded = key.encode(); + let decoded = ProofWindowKey::decode(&encoded).unwrap(); + assert_eq!(key, decoded); + + let key = ProofWindowKey::LatestBlock; + let encoded = key.encode(); + let decoded = ProofWindowKey::decode(&encoded).unwrap(); + assert_eq!(key, decoded); + } +} diff --git a/crates/optimism/trie/src/db/models/version.rs b/crates/optimism/trie/src/db/models/version.rs new file mode 100644 index 00000000..49917099 --- /dev/null +++ b/crates/optimism/trie/src/db/models/version.rs @@ -0,0 +1,191 @@ +use bytes::{Buf, BufMut}; +use reth_db::{ + table::{Compress, Decompress}, + DatabaseError, +}; +use reth_primitives_traits::ValueWithSubKey; +use serde::{Deserialize, Serialize}; + +/// Wrapper type for `Option` that implements [`Compress`] and [`Decompress`] +/// +/// Encoding: +/// - `None` => empty byte array (length 0) +/// - `Some(value)` => compressed bytes of value (length > 0) +/// +/// This assumes the inner type `T` always compresses to non-empty bytes when it exists. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct MaybeDeleted(pub Option); + +impl From> for MaybeDeleted { + fn from(opt: Option) -> Self { + Self(opt) + } +} + +impl From> for Option { + fn from(maybe: MaybeDeleted) -> Self { + maybe.0 + } +} + +impl Compress for MaybeDeleted { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + match &self.0 { + None => { + // Empty = deleted, write nothing + } + Some(value) => { + // Compress the inner value to the buffer + value.compress_to_buf(buf); + } + } + } +} + +impl Decompress for MaybeDeleted { + fn decompress(value: &[u8]) -> Result { + if value.is_empty() { + // Empty = deleted + Ok(Self(None)) + } else { + // Non-empty = present + let inner = T::decompress(value)?; + Ok(Self(Some(inner))) + } + } +} + +/// Versioned value wrapper for [`DupSort`] tables +/// +/// For [`DupSort`] tables in MDBX, the Value type must contain the [`DupSort::SubKey`] as a field. +/// This wrapper combines a [`block_number`] (the [`DupSort::SubKey`]) with +/// the actual value. +/// +/// [`DupSort`]: reth_db::table::DupSort +/// [`DupSort::SubKey`]: reth_db::table::DupSort::SubKey +/// [`block_number`]: Self::block_number +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct VersionedValue { + /// Block number ([`DupSort::SubKey`] for [`DupSort`]) + /// + /// [`DupSort`]: reth_db::table::DupSort + /// [`DupSort::SubKey`]: reth_db::table::DupSort::SubKey + pub block_number: u64, + /// The actual value (may be deleted) + pub value: MaybeDeleted, +} + +impl VersionedValue { + /// Create a new versioned value + pub const fn new(block_number: u64, value: MaybeDeleted) -> Self { + Self { block_number, value } + } +} + +impl Compress for VersionedValue { + type Compressed = Vec; + + fn compress_to_buf>(&self, buf: &mut B) { + // Encode block number first (8 bytes, big-endian) + buf.put_u64(self.block_number); + // Then encode the value + self.value.compress_to_buf(buf); + } +} + +impl Decompress for VersionedValue { + fn decompress(value: &[u8]) -> Result { + if value.len() < 8 { + return Err(DatabaseError::Decode); + } + + let mut buf: &[u8] = value; + let block_number = buf.get_u64(); + let value = MaybeDeleted::::decompress(&value[8..])?; + + Ok(Self { block_number, value }) + } +} + +impl ValueWithSubKey for VersionedValue { + type SubKey = u64; + + fn get_subkey(&self) -> Self::SubKey { + self.block_number + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives_traits::Account; + use reth_trie::BranchNodeCompact; + + #[test] + fn test_maybe_deleted_none() { + let none: MaybeDeleted = MaybeDeleted(None); + let compressed = none.compress(); + assert!(compressed.is_empty(), "None should compress to empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, None); + } + + #[test] + fn test_maybe_deleted_some_account() { + let account = Account { + nonce: 42, + balance: alloy_primitives::U256::from(1000u64), + bytecode_hash: None, + }; + let some = MaybeDeleted(Some(account)); + let compressed = some.compress(); + assert!(!compressed.is_empty(), "Some should compress to non-empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, Some(account)); + } + + #[test] + fn test_maybe_deleted_some_branch() { + // Create a simple valid BranchNodeCompact (empty is valid) + let branch = BranchNodeCompact::new( + 0, // state_mask + 0, // tree_mask + 0, // hash_mask + vec![], // hashes + None, // root_hash + ); + let some = MaybeDeleted(Some(branch.clone())); + let compressed = some.compress(); + assert!(!compressed.is_empty(), "Some should compress to non-empty bytes"); + + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(decompressed.0, Some(branch)); + } + + #[test] + fn test_maybe_deleted_roundtrip() { + let test_cases = vec![ + MaybeDeleted(None), + MaybeDeleted(Some(Account { + nonce: 0, + balance: alloy_primitives::U256::ZERO, + bytecode_hash: None, + })), + MaybeDeleted(Some(Account { + nonce: 999, + balance: alloy_primitives::U256::MAX, + bytecode_hash: Some([0xff; 32].into()), + })), + ]; + + for original in test_cases { + let compressed = original.clone().compress(); + let decompressed = MaybeDeleted::::decompress(&compressed).unwrap(); + assert_eq!(original, decompressed); + } + } +} diff --git a/crates/optimism/trie/src/db/store.rs b/crates/optimism/trie/src/db/store.rs new file mode 100644 index 00000000..ee555c67 --- /dev/null +++ b/crates/optimism/trie/src/db/store.rs @@ -0,0 +1,3386 @@ +use super::{BlockNumberHash, ProofWindow, ProofWindowKey, Tables}; +use crate::{ + api::WriteCounts, + db::{ + cursor::Dup, + models::{ + kv::IntoKV, AccountTrieHistory, BlockChangeSet, ChangeSet, HashedAccountHistory, + HashedStorageHistory, HashedStorageKey, MaybeDeleted, StorageTrieHistory, + StorageTrieKey, StorageValue, VersionedValue, + }, + MdbxAccountCursor, MdbxStorageCursor, MdbxTrieCursor, + }, + BlockStateDiff, OpProofsStorageError, OpProofsStorageResult, OpProofsStore, +}; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; +use alloy_primitives::{map::HashMap, B256, U256}; +#[cfg(feature = "metrics")] +use metrics::{gauge, Label}; +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + mdbx::{init_db_for, DatabaseArguments}, + table::{DupSort, Table}, + transaction::{DbTx, DbTxMut}, + Database, DatabaseEnv, DatabaseError, +}; +use reth_primitives_traits::Account; +use reth_trie::{ + hashed_cursor::HashedCursor, + trie_cursor::TrieCursor, + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, HashedPostState, Nibbles, +}; +use std::{cmp::max, ops::RangeBounds, path::Path}; +use tracing::info; + +/// MDBX implementation of [`OpProofsStore`]. +#[derive(Debug)] +pub struct MdbxProofsStorage { + env: DatabaseEnv, +} + +struct ProofWindowValue { + earliest: NumHash, + latest: NumHash, +} + +impl MdbxProofsStorage { + /// Creates a new [`MdbxProofsStorage`] instance with the given path. + pub fn new(path: &Path) -> Result { + let env = init_db_for::<_, Tables>(path, DatabaseArguments::default()) + .map_err(|e| DatabaseError::Other(format!("Failed to open database: {e}")))?; + Ok(Self { env }) + } + + fn inner_get_latest_block_number_hash( + &self, + tx: &impl DbTx, + ) -> OpProofsStorageResult> { + let block = self.inner_get_block_number_hash(tx, ProofWindowKey::LatestBlock)?; + if block.is_some() { + return Ok(block); + } + + self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock) + } + + fn inner_get_block_number_hash( + &self, + tx: &impl DbTx, + key: ProofWindowKey, + ) -> OpProofsStorageResult> { + let mut cursor = tx.cursor_read::()?; + let value = cursor.seek_exact(key)?; + Ok(value.map(|(_, val)| (val.number(), *val.hash()))) + } + + fn inner_get_proof_window( + &self, + tx: &impl DbTx, + ) -> OpProofsStorageResult> { + let mut cursor = tx.cursor_read::()?; + + let earliest = match cursor.seek_exact(ProofWindowKey::EarliestBlock)? { + Some((_, val)) => NumHash::new(val.number(), *val.hash()), + None => return Ok(None), + }; + + let latest = match cursor.seek_exact(ProofWindowKey::LatestBlock)? { + Some((_, val)) => NumHash::new(val.number(), *val.hash()), + None => earliest, + }; + + Ok(Some(ProofWindowValue { earliest, latest })) + } + + async fn set_earliest_block_number_hash( + &self, + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let _ = self.env.update(|tx| { + Self::inner_set_earliest_block_number(tx, block_number, hash)?; + Ok::<(), DatabaseError>(()) + })?; + Ok(()) + } + + /// Internal helper to set earliest block number hash within an existing transaction + fn inner_set_earliest_block_number( + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + let mut cursor = tx.cursor_write::()?; + cursor.upsert(ProofWindowKey::EarliestBlock, &BlockNumberHash::new(block_number, hash))?; + Ok(()) + } + + /// Persist a batch of versioned history entries to a dup-sorted table. + /// + /// # Parameters + /// - `block_number`: Target block number for versioning entries + /// - `items`: **Must be sorted** - iterator of entries to persist + /// - `append_mode`: Mode selector for write strategy: + /// - `true` (Append): Appends all entries including tombstones for forward progress + /// - `false` (Prune): Removes tombstones, writes non-tombstones to block 0 + /// + /// The cost of pruning is the cost of (append + deleting tombstones + deleting old block 0). + /// The tombstones deletion is expensive as it requires a seek for each (key + subkey). + /// + /// Uses [`reth_db::mdbx::cursor::Cursor::upsert`] for upsert operation. + fn persist_history_batch( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: T::SubKey, + items: I, + append_mode: bool, + ) -> OpProofsStorageResult> + where + T: Table> + DupSort, + T::Key: Clone, + I: IntoIterator, + I::Item: IntoKV, + { + let mut cur = tx.cursor_dup_write::()?; + let mut keys = Vec::::new(); + + // Materialize iterator to enable partitioning and collect keys + let mut pairs: Vec<(T::Key, T::Value)> = Vec::new(); + for it in items { + let (k, vv) = it.into_kv(block_number); + pairs.push((k.clone(), vv)); + keys.push(k) + } + + if append_mode { + // Append all entries (including tombstones) to preserve full history + for (k, vv) in pairs { + cur.append_dup(k.clone(), vv)?; + } + return Ok(keys); + } + + // Prune mode: remove tombstones and write state to block 0 (not append) + let (to_delete, to_append): (Vec<_>, Vec<_>) = + pairs.into_iter().partition(|(_, vv)| vv.value.0.is_none()); + + for (k, vv) in to_append { + // Dupsort upsert doesn't replace - manually delete old block 0 entry first + let val = cur.seek_by_key_subkey(k.clone(), 0)?; + if val.is_some() && val.unwrap().block_number == 0 { + cur.delete_current()?; + } + cur.upsert(k, &vv)?; + } + + // Delete tombstones after updates to avoid overwrites + self.delete_dup_sorted::(tx, block_number, to_delete.into_iter().map(|(k, _)| k))?; + + Ok(keys) + } + + /// Delete entries for `items` at exactly `block_number` in a dup-sorted table. + /// Seeks (key, block) and deletes current if the subkey matches. + fn delete_dup_sorted( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: u64, + items: I, + ) -> OpProofsStorageResult<()> + where + T: Table> + DupSort, + T::Key: Clone, + T::SubKey: PartialEq + Clone, + I: IntoIterator, + { + let mut cur = tx.cursor_dup_write::()?; + for key in items { + if let Some(vv) = cur.seek_by_key_subkey(key.clone(), block_number)? { + // ensure we didn't land on a >subkey + if vv.block_number == block_number { + cur.delete_current()?; + } + } + } + Ok(()) + } + + /// Append deletion tombstones for all existing storage items of `hashed_address` at + /// `block_number`. Iterates via `next()` from a RO cursor and writes MaybeDeleted(None) + /// rows. + fn wipe_storage( + &self, + tx: &(impl DbTxMut + DbTx), + block_number: u64, + hashed_address: B256, + mut next: Next, + ) -> OpProofsStorageResult> + where + T: Table> + DupSort, + Next: FnMut() -> OpProofsStorageResult>, + (B256, K, Option): IntoKV, + T::Key: Clone, + { + let mut cur = tx.cursor_dup_write::()?; + let mut keys: Vec = Vec::new(); + + while let Some((k, _vv)) = next()? { + let key: T::Key = (hashed_address, k, Option::::None).into_key(); + let del: T::Value = VersionedValue { block_number, value: MaybeDeleted(None) }; + cur.append_dup(key.clone(), del)?; + keys.push(key); + } + + Ok(keys) + } + + /// Delete versioned history over `block_range` using `BlockChangeSet`. + /// For each block: delete referenced rows at that block and drop the changeset entry. + fn delete_history_ranged( + &self, + tx: &(impl DbTxMut + DbTx), + block_range: impl RangeBounds, + ) -> OpProofsStorageResult { + let mut write_count = WriteCounts::default(); + let mut change_set_cursor = tx.cursor_write::()?; + let mut walker = change_set_cursor.walk_range(block_range)?; + let mut blocks_deleted = 0; + while let Some(Ok((block_number, change_set))) = walker.next() { + write_count += WriteCounts::new( + change_set.account_trie_keys.len() as u64, + change_set.storage_trie_keys.len() as u64, + change_set.hashed_account_keys.len() as u64, + change_set.hashed_storage_keys.len() as u64, + ); + + self.delete_dup_sorted::( + tx, + block_number, + change_set.account_trie_keys, + )?; + self.delete_dup_sorted::( + tx, + block_number, + change_set.storage_trie_keys, + )?; + self.delete_dup_sorted::( + tx, + block_number, + change_set.hashed_account_keys, + )?; + self.delete_dup_sorted::( + tx, + block_number, + change_set.hashed_storage_keys, + )?; + + walker.delete_current()?; + + blocks_deleted += 1; + // Progress log: only every 20 blocks, only if total >= 20 + if blocks_deleted >= 1000 && blocks_deleted % 1000 == 0 { + info!(target: "optimism.trie", %blocks_deleted, "Deleting Proofs History"); + } + } + Ok(write_count) + } + + /// Write trie/state history for `block_number` from `block_state_diff`. + fn store_trie_updates_for_block( + &self, + tx: &::TXMut, + block_number: u64, + block_state_diff: BlockStateDiff, + append_mode: bool, + ) -> OpProofsStorageResult { + let BlockStateDiff { sorted_trie_updates, sorted_post_state } = block_state_diff; + + let storage_trie_len = sorted_trie_updates.storage_tries_ref().len(); + let hashed_storage_len = sorted_post_state.storages.len(); + + let account_trie_keys = self.persist_history_batch( + tx, + block_number, + sorted_trie_updates.account_nodes_ref().into_iter().cloned(), + append_mode, + )?; + let hashed_account_keys = self.persist_history_batch( + tx, + block_number, + sorted_post_state.accounts.iter().copied(), + append_mode, + )?; + + let mut storage_trie_keys = Vec::::with_capacity(storage_trie_len); + for (hashed_address, nodes) in sorted_trie_updates.storage_tries_ref().iter() { + // Handle wiped - mark all storage trie as deleted at the current block number + if nodes.is_deleted && append_mode { + // Yet to have any update for the current block number - So just using up to + // previous block number + let mut ro = self.storage_trie_cursor(*hashed_address, block_number - 1)?; + let keys = + self.wipe_storage(tx, block_number, *hashed_address, || Ok(ro.next()?))?; + + storage_trie_keys.extend(keys); + + // Skip any further processing for this hashed_address + continue; + } + + let keys = self.persist_history_batch( + tx, + block_number, + nodes.storage_nodes_ref().into_iter().map(|(path, node)| (*hashed_address, *path, node.clone())), + append_mode, + )?; + storage_trie_keys.extend(keys); + } + + let mut hashed_storage_keys = Vec::::with_capacity(hashed_storage_len); + for (hashed_address, storage) in sorted_post_state.storages { + // Handle wiped - mark all storage slots as deleted at the current block number + if append_mode && storage.is_wiped() { + // Yet to have any update for the current block number - So just using up to + // previous block number + let mut ro = self.storage_hashed_cursor(hashed_address, block_number - 1)?; + let keys = + self.wipe_storage(tx, block_number, hashed_address, || Ok(ro.next()?))?; + hashed_storage_keys.extend(keys); + // Skip any further processing for this hashed_address + continue; + } + let keys = self.persist_history_batch( + tx, + block_number, + storage + .storage_slots_ref() + .iter() + .map(|(key, val)| (hashed_address, *key, Some(StorageValue(*val)))), + append_mode, + )?; + hashed_storage_keys.extend(keys); + } + + Ok(ChangeSet { + account_trie_keys, + storage_trie_keys, + hashed_account_keys, + hashed_storage_keys, + }) + } + + /// Append-only writer for a block: validates parent, persists diff (soft-delete=true), + /// records a `BlockChangeSet`, and advances `ProofWindow::LatestBlock`. + fn store_trie_updates_append_only( + &self, + tx: &::TXMut, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult { + let block_number = block_ref.block.number; + + // Check the latest stored block is the parent of the incoming block + let latest_block_hash = match self.inner_get_latest_block_number_hash(tx)? { + Some((_num, hash)) => hash, + None => B256::ZERO, + }; + + if latest_block_hash != block_ref.parent { + return Err(OpProofsStorageError::OutOfOrder { + block_number, + parent_block_hash: block_ref.parent, + latest_block_hash, + }); + } + + let change_set = + &self.store_trie_updates_for_block(tx, block_number, block_state_diff, true)?; + + // Cursor for recording all changes made in this block for all history tables + let mut change_set_cursor = tx.new_cursor::()?; + change_set_cursor.append(block_number, change_set)?; + + // Update proof window's latest block + let mut proof_window_cursor = tx.new_cursor::()?; + proof_window_cursor.append( + ProofWindowKey::LatestBlock, + &BlockNumberHash::new(block_number, block_ref.block.hash), + )?; + + Ok(WriteCounts { + account_trie_updates_written_total: change_set.account_trie_keys.len() as u64, + storage_trie_updates_written_total: change_set.storage_trie_keys.len() as u64, + hashed_accounts_written_total: change_set.hashed_account_keys.len() as u64, + hashed_storages_written_total: change_set.hashed_storage_keys.len() as u64, + }) + } +} + +impl OpProofsStore for MdbxProofsStorage { + type StorageTrieCursor<'tx> + = MdbxTrieCursor> + where + Self: 'tx; + type AccountTrieCursor<'tx> + = MdbxTrieCursor> + where + Self: 'tx; + type StorageCursor<'tx> + = MdbxStorageCursor> + where + Self: 'tx; + type AccountHashedCursor<'tx> + = MdbxAccountCursor> + where + Self: 'tx; + + async fn store_account_branches( + &self, + account_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()> { + let mut account_nodes = account_nodes; + if account_nodes.is_empty() { + return Ok(()); + } + + account_nodes.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch(tx, 0, account_nodes.into_iter(), true)?; + Ok(()) + })? + } + + async fn store_storage_branches( + &self, + hashed_address: B256, + storage_nodes: Vec<(Nibbles, Option)>, + ) -> OpProofsStorageResult<()> { + let mut storage_nodes = storage_nodes; + if storage_nodes.is_empty() { + return Ok(()); + } + + storage_nodes.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch( + tx, + 0, + storage_nodes.into_iter().map(|(path, node)| (hashed_address, path, node)), + true, + )?; + Ok(()) + })? + } + + async fn store_hashed_accounts( + &self, + accounts: Vec<(B256, Option)>, + ) -> OpProofsStorageResult<()> { + let mut accounts = accounts; + if accounts.is_empty() { + return Ok(()); + } + + // sort the accounts by key to ensure insertion is efficient + accounts.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch(tx, 0, accounts.into_iter(), true)?; + Ok(()) + })? + } + + async fn store_hashed_storages( + &self, + hashed_address: B256, + storages: Vec<(B256, U256)>, + ) -> OpProofsStorageResult<()> { + let mut storages = storages; + if storages.is_empty() { + return Ok(()); + } + + // sort the storages by key to ensure insertion is efficient + storages.sort_by_key(|(key, _)| *key); + + self.env.update(|tx| { + self.persist_history_batch( + tx, + 0, + storages + .into_iter() + .map(|(key, val)| (hashed_address, key, Some(StorageValue(val)))), + true, + )?; + Ok(()) + })? + } + + async fn get_earliest_block_number(&self) -> OpProofsStorageResult> { + self.env.view(|tx| self.inner_get_block_number_hash(tx, ProofWindowKey::EarliestBlock))? + } + + async fn get_latest_block_number(&self) -> OpProofsStorageResult> { + self.env.view(|tx| self.inner_get_latest_block_number_hash(tx))? + } + + fn storage_trie_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxTrieCursor::new(cursor, max_block_number, Some(hashed_address))) + } + + fn account_trie_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxTrieCursor::new(cursor, max_block_number, None)) + } + + fn storage_hashed_cursor<'tx>( + &self, + hashed_address: B256, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxStorageCursor::new(cursor, max_block_number, hashed_address)) + } + + fn account_hashed_cursor<'tx>( + &self, + max_block_number: u64, + ) -> OpProofsStorageResult> { + let tx = self.env.tx()?; + let cursor = tx.cursor_dup_read::()?; + + Ok(MdbxAccountCursor::new(cursor, max_block_number)) + } + + async fn store_trie_updates( + &self, + block_ref: BlockWithParent, + block_state_diff: BlockStateDiff, + ) -> OpProofsStorageResult { + self.env + .update(|tx| self.store_trie_updates_append_only(tx, block_ref, block_state_diff))? + } + + async fn fetch_trie_updates(&self, block_number: u64) -> OpProofsStorageResult { + self.env.view(|tx| { + let mut change_set_cursor = tx.cursor_read::()?; + let (_, change_set) = change_set_cursor + .seek_exact(block_number)? + .ok_or(OpProofsStorageError::NoChangeSetForBlock(block_number))?; + + let mut account_trie_cursor = tx.new_cursor::()?; + let mut storage_trie_cursor = tx.new_cursor::()?; + let mut hashed_account_cursor = tx.new_cursor::()?; + let mut hashed_storage_cursor = tx.new_cursor::()?; + + let mut trie_updates = TrieUpdates::default(); + for key in change_set.account_trie_keys { + let entry = + match account_trie_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingAccountTrieHistory( + key.0, + block_number, + )) + } + }; + + if let Some(value) = entry { + trie_updates.account_nodes.insert(key.0, value); + } else { + trie_updates.removed_nodes.insert(key.0); + } + } + + for key in change_set.storage_trie_keys { + let entry = + match storage_trie_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingStorageTrieHistory( + key.hashed_address, + key.path.0, + block_number, + )) + } + }; + + let stu = trie_updates + .storage_tries + .entry(key.hashed_address) + .or_insert_with(StorageTrieUpdates::default); + + // handle is_deleted scenario + // Issue: https://github.com/op-rs/op-reth/issues/323 + if let Some(value) = entry { + stu.storage_nodes.insert(key.path.0, value); + } else { + stu.removed_nodes.insert(key.path.0); + } + } + + let mut post_state = + HashedPostState::with_capacity(change_set.hashed_account_keys.len()); + for key in change_set.hashed_account_keys { + let entry = match hashed_account_cursor.seek_by_key_subkey(key, block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingHashedAccountHistory( + key, + block_number, + )) + } + }; + + post_state.accounts.insert(key, entry); + } + + for key in change_set.hashed_storage_keys { + let entry = + match hashed_storage_cursor.seek_by_key_subkey(key.clone(), block_number)? { + Some(v) if v.block_number == block_number => v.value.0, + _ => { + return Err(OpProofsStorageError::MissingHashedStorageHistory { + hashed_address: key.hashed_address, + hashed_storage_key: key.hashed_storage_key, + block_number, + }) + } + }; + + let hs = post_state.storages.entry(key.hashed_address).or_default(); + + // handle wiped storage scenario + // Issue: https://github.com/op-rs/op-reth/issues/323 + if let Some(value) = entry { + hs.storage.insert(key.hashed_storage_key, value.0); + } else { + hs.storage.insert(key.hashed_storage_key, U256::ZERO); + } + } + + Ok(BlockStateDiff { + sorted_trie_updates: trie_updates.into_sorted(), + sorted_post_state: post_state.into_sorted(), + }) + })? + } + + /// Update the initial state with the provided diff. + /// Prune all historical trie data till `new_earliest_block_number` (inclusive) using + /// the [`BlockChangeSet`] index. + /// + /// Arguments: + /// - `new_earliest_block_ref`: The new earliest block reference (with parent hash). + /// - `diff`: The state diff to apply to the initial state (block 0). This diff represents all + /// the changes from the old earliest block to the new earliest block (inclusive). + async fn prune_earliest_state( + &self, + new_earliest_block_ref: BlockWithParent, + diff: BlockStateDiff, + ) -> OpProofsStorageResult { + let mut write_counts = WriteCounts::default(); + + let new_earliest_block_number = new_earliest_block_ref.block.number; + let Some((old_earliest_block_number, _)) = self.get_earliest_block_number().await? else { + return Ok(write_counts); // Nothing to prune + }; + + if old_earliest_block_number >= new_earliest_block_number { + return Ok(write_counts); // Nothing to prune + } + + self.env.update(|tx| { + // Update the initial state (block zero) + let change_set = self.store_trie_updates_for_block(tx, 0, diff, false)?; + write_counts += WriteCounts::new( + change_set.account_trie_keys.len() as u64, + change_set.storage_trie_keys.len() as u64, + change_set.hashed_account_keys.len() as u64, + change_set.hashed_storage_keys.len() as u64, + ); + + // Delete the old entries for the block range excluding block 0 + let delete_counts = self.delete_history_ranged( + tx, + max(old_earliest_block_number, 1)..=new_earliest_block_number, + )?; + write_counts += delete_counts; + + // Set the earliest block number to the new value + Self::inner_set_earliest_block_number( + tx, + new_earliest_block_number, + new_earliest_block_ref.block.hash, + )?; + Ok(write_counts) + })? + } + + /// Unwind the historical state to `unwind_upto_block` (inclusive), deleting all history + /// starting from provided block. Also updates the `ProofWindow::LatestBlock` to parent of + /// `unwind_upto_block`. + async fn unwind_history(&self, to: BlockWithParent) -> OpProofsStorageResult<()> { + self.env.update(|tx| { + let proof_window = match self.inner_get_proof_window(tx)? { + Some(pw) => pw, + None => return Ok(()), // Nothing to unwind + }; + + if to.block.number > proof_window.latest.number { + return Ok(()); // Nothing to unwind + } + + if to.block.number <= proof_window.earliest.number { + return Err(OpProofsStorageError::UnwindBeyondEarliest { + unwind_block_number: to.block.number, + earliest_block_number: proof_window.earliest.number, + }); + } + + self.delete_history_ranged(tx, (to.block.number)..)?; + + let new_latest_block = + BlockNumberHash::new(to.block.number.saturating_sub(1), to.parent); + let mut proof_window_cursor = tx.new_cursor::()?; + proof_window_cursor.append(ProofWindowKey::LatestBlock, &new_latest_block)?; + + Ok(()) + })? + } + + async fn replace_updates( + &self, + latest_common_block_number: u64, + blocks_to_add: HashMap, + ) -> OpProofsStorageResult<()> { + self.env.update(|tx| { + self.delete_history_ranged(tx, latest_common_block_number + 1..)?; + + // Sort by block number: Hashmap does not guarantee order + // todo: use a sorted vec instead + let mut blocks_to_add_vec: Vec<(BlockWithParent, BlockStateDiff)> = + blocks_to_add.into_iter().collect(); + + blocks_to_add_vec.sort_unstable_by_key(|(bwp, _)| bwp.block.number); + + // update the proof window + // todo: refactor to use block hash from the block to add. We need to pass the + // BlockNumHash type for the latest_common_block_number + let mut proof_window_cursor = tx.new_cursor::()?; + proof_window_cursor.append( + ProofWindowKey::LatestBlock, + &BlockNumberHash::new( + latest_common_block_number, + blocks_to_add_vec.first().unwrap().0.parent, + ), + )?; + + for (block_with_parent, diff) in blocks_to_add_vec { + self.store_trie_updates_append_only(tx, block_with_parent, diff)?; + } + Ok(()) + })? + } + + async fn set_earliest_block_number( + &self, + block_number: u64, + hash: B256, + ) -> OpProofsStorageResult<()> { + self.set_earliest_block_number_hash(block_number, hash).await + } +} + +/// This implementation is copied from the +/// [`DatabaseMetrics`](reth_db::database_metrics::DatabaseMetrics) implementation for +/// [`DatabaseEnv`]. As the implementation hard-coded the table name, we need to reimplement it. +#[cfg(feature = "metrics")] +impl reth_db::database_metrics::DatabaseMetrics for MdbxProofsStorage { + fn report_metrics(&self) { + for (name, value, labels) in self.gauge_metrics() { + gauge!(name, labels).set(value); + } + } + + fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec