From a717b54bb245283d43536736662e9acf801362ab Mon Sep 17 00:00:00 2001 From: dmkozh Date: Tue, 28 Apr 2026 16:26:21 -0400 Subject: [PATCH 1/2] Simplify apply load limits mode. Now that we have a better understand of what works for the benchmark, we can simplify the config and limit it to just the ledger limits + TPL, and derive the transaction profile automatically (initially we planned a capability of using different tx distributions, but that doesn't seem to be too useful and it does add noise to already noisy benchmarks). Also remove some unused modes from apply load and loadgen. --- Builds/VisualStudio/stellar-core.vcxproj | 6 +- .../VisualStudio/stellar-core.vcxproj.filters | 18 +- docs/apply-load-for-meta.cfg | 70 +-- docs/apply-load-ledger-limits.cfg | 73 +--- docs/apply-load-limits-for-model-tx.cfg | 128 ------ docs/software/commands.md | 6 +- src/main/Config.cpp | 53 +-- src/main/Config.h | 27 +- src/simulation/ApplyLoad.cpp | 397 +++++------------- src/simulation/ApplyLoad.h | 32 +- src/simulation/LoadGenerator.cpp | 39 +- src/simulation/LoadGenerator.h | 2 - src/simulation/TxGenerator.cpp | 47 ++- src/simulation/TxGenerator.h | 11 +- src/simulation/test/LoadGeneratorTests.cpp | 100 +---- 15 files changed, 217 insertions(+), 792 deletions(-) delete mode 100644 docs/apply-load-limits-for-model-tx.cfg diff --git a/Builds/VisualStudio/stellar-core.vcxproj b/Builds/VisualStudio/stellar-core.vcxproj index 55e855a360..d2e4460c2e 100644 --- a/Builds/VisualStudio/stellar-core.vcxproj +++ b/Builds/VisualStudio/stellar-core.vcxproj @@ -596,6 +596,7 @@ exit /b 0 + @@ -603,7 +604,6 @@ exit /b 0 - @@ -670,6 +670,7 @@ exit /b 0 + @@ -1060,6 +1061,7 @@ exit /b 0 + @@ -1069,7 +1071,6 @@ exit /b 0 - @@ -1119,6 +1120,7 @@ exit /b 0 + diff --git a/Builds/VisualStudio/stellar-core.vcxproj.filters b/Builds/VisualStudio/stellar-core.vcxproj.filters index be47043482..cd153ba742 100644 --- a/Builds/VisualStudio/stellar-core.vcxproj.filters +++ b/Builds/VisualStudio/stellar-core.vcxproj.filters @@ -1314,9 +1314,6 @@ main - - ledger - main @@ -1438,6 +1435,12 @@ util + + ledger + + + test + @@ -2427,9 +2430,6 @@ main - - ledger - main @@ -2554,6 +2554,12 @@ util + + ledger + + + test + diff --git a/docs/apply-load-for-meta.cfg b/docs/apply-load-for-meta.cfg index bdcda5dd65..2a1c5a3f09 100644 --- a/docs/apply-load-for-meta.cfg +++ b/docs/apply-load-for-meta.cfg @@ -15,44 +15,39 @@ METADATA_OUTPUT_STREAM='meta.xdr' # Network configuration to use during the benchmark # The fields here correspond to the network configuration settings. -APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 500000000 -APPLY_LOAD_TX_MAX_INSTRUCTIONS = 100000000 +APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 580000000 +APPLY_LOAD_TX_MAX_INSTRUCTIONS = 400000000 -APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 1 +APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 2 -APPLY_LOAD_TX_MAX_FOOTPRINT_SIZE = 200 +APPLY_LOAD_TX_MAX_FOOTPRINT_SIZE = 400 APPLY_LOAD_LEDGER_MAX_DISK_READ_LEDGER_ENTRIES = 1000 -APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES = 100 +APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES = 200 -APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES = 200000 -APPLY_LOAD_TX_MAX_DISK_READ_BYTES = 130000 +APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES = 400000 +APPLY_LOAD_TX_MAX_DISK_READ_BYTES = 200000 APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES = 1000 -APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = 100 +APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = 200 -APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = 300000 -APPLY_LOAD_TX_MAX_WRITE_BYTES = 140000 +APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = 286720 +APPLY_LOAD_TX_MAX_WRITE_BYTES = 132096 -APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = 270000 -APPLY_LOAD_MAX_TX_SIZE_BYTES = 150000 +APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = 266240 +APPLY_LOAD_MAX_TX_SIZE_BYTES = 132096 -APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 10000 -APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 1000 +APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 16384 +APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 482 # The following section contains various parameters for the generated load. -# Number of ledgers to close for benchmark +# Number of ledgers to close for the benchmark. APPLY_LOAD_NUM_LEDGERS = 100 # Generate that many simple Classic payment transactions in every benchmark ledger APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 1000 -# Size of every synthetic data entry generated. -# This setting affects both the size of the pre-generated Bucket List entries, -# and the size of every entry that a Soroban transaction reads/writes. -APPLY_LOAD_DATA_ENTRY_SIZE = 300 - # Bucket list pre-generation # The benchmark will pre-generate ledger entries using the simplified ledger @@ -78,44 +73,11 @@ APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300 #APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300 #APPLY_LOAD_BL_LAST_BATCH_SIZE = 100 -# Settings for generated transactions -# Every setting consists of the list of the possible values and the respective -# _DISTRIBUTION list that defines the weight of every value. The values are then -# sampled from the value list according to the distribution. - -# Core will try to pack as many generated transactions as possible, -# so if it's important to maintain a constant number of transactions per ledger, -# or to maintain constant utilization of every resources dimension in every -# ledger, then sampling should be avoided. - -# It's generally a good idea to utilize as many dimensions as possible, so -# the values here should be chosen carefully such that the ratio between the -# generated value and the respective limit is the roughly same for most of the -# resources. - -# Number of *disk* reads a transaction performs. Every disk read is restoration, -# so it's also a write (accounted for in NUM_RW_ENTRIES). -APPLY_LOAD_NUM_DISK_READ_ENTRIES = [1] -APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = [1] - -# Number of writes a transaction performs. -APPLY_LOAD_NUM_RW_ENTRIES = [4] -APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = [1] - -# Number of events a transaction emits. +# Number of events a transaction emits and the respective distribution weights. APPLY_LOAD_EVENT_COUNT = [5] APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = [1] -# Size of the generated transaction. -APPLY_LOAD_TX_SIZE_BYTES = [1080] -APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = [1] - -# Number of instructions a transaction will use. -APPLY_LOAD_INSTRUCTIONS = [2000000] -APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = [1] - # Common apply load boilerplate - ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=true # Diagnostic events should generally be disabled, but can be enabled for debug ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = false diff --git a/docs/apply-load-ledger-limits.cfg b/docs/apply-load-ledger-limits.cfg index 6e244adf4e..cdd321e43b 100644 --- a/docs/apply-load-ledger-limits.cfg +++ b/docs/apply-load-ledger-limits.cfg @@ -19,46 +19,39 @@ METADATA_DEBUG_LEDGERS = 0 # Network configuration to use during the benchmark # The fields here correspond to the network configuration settings. -APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 500000000 -APPLY_LOAD_TX_MAX_INSTRUCTIONS = 100000000 +APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 580000000 +APPLY_LOAD_TX_MAX_INSTRUCTIONS = 400000000 -APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 1 +APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 2 -APPLY_LOAD_TX_MAX_FOOTPRINT_SIZE = 200 +APPLY_LOAD_TX_MAX_FOOTPRINT_SIZE = 400 APPLY_LOAD_LEDGER_MAX_DISK_READ_LEDGER_ENTRIES = 1000 -APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES = 100 +APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES = 200 -APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES = 150000 -APPLY_LOAD_TX_MAX_DISK_READ_BYTES = 130000 +APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES = 400000 +APPLY_LOAD_TX_MAX_DISK_READ_BYTES = 200000 APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES = 1000 -APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = 100 +APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = 200 -APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = 300000 -APPLY_LOAD_TX_MAX_WRITE_BYTES = 140000 +APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = 286720 +APPLY_LOAD_TX_MAX_WRITE_BYTES = 132096 -APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = 270000 -APPLY_LOAD_MAX_TX_SIZE_BYTES = 150000 +APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = 266240 +APPLY_LOAD_MAX_TX_SIZE_BYTES = 132096 -APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 10000 -APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 1000 +APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 16384 +APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 482 # The following section contains various parameters for the generated load. -# Maximum number of ledgers to close for every iteration of search. -# Should be at least 30 and normally doesn't need to be changed as search will -# not run extra iterations if the results are already statistically significant. -APPLY_LOAD_NUM_LEDGERS = 1000 +# Number of ledgers to close for the benchmark. +APPLY_LOAD_NUM_LEDGERS = 100 # Generate that many simple Classic payment transactions in every benchmark ledger APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 0 -# Size of every synthetic data entry generated. -# This setting affects both the size of the pre-generated Bucket List entries, -# and the size of every entry that a Soroban transaction reads/writes. -APPLY_LOAD_DATA_ENTRY_SIZE = 300 - # Bucket list pre-generation # The benchmark will pre-generate ledger entries using the simplified ledger @@ -84,42 +77,10 @@ APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300 #APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300 #APPLY_LOAD_BL_LAST_BATCH_SIZE = 100 -# Settings for generated transactions -# Every setting consists of the list of the possible values and the respective -# _DISTRIBUTION list that defines the weight of every value. The values are then -# sampled from the value list according to the distribution. - -# Core will try to pack as many generated transactions as possible, -# so if it's important to maintain a constant number of transactions per ledger, -# or to maintain constant utilization of every resources dimension in every -# ledger, then sampling should be avoided. - -# It's generally a good idea to utilize as many dimensions as possible, so -# the values here should be chosen carefully such that the ratio between the -# generated value and the respective limit is the roughly same for most of the -# resources. - -# Number of *disk* reads a transaction performs. Every disk read is restoration, -# so it's also a write (accounted for in NUM_RW_ENTRIES). -APPLY_LOAD_NUM_DISK_READ_ENTRIES = [2] -APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = [1] - -# Number of writes a transaction performs. -APPLY_LOAD_NUM_RW_ENTRIES = [4] -APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = [1] - -# Number of events a transaction emits. +# Number of events a transaction emits and the respective distribution weights. APPLY_LOAD_EVENT_COUNT = [5] APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = [1] -# Size of the generated transaction. -APPLY_LOAD_TX_SIZE_BYTES = [1080] -APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = [1] - -# Number of instructions a transaction will use. -APPLY_LOAD_INSTRUCTIONS = [2000000] -APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = [1] - # Common apply load boilerplate ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=true # Diagnostic events should generally be disabled, but can be enabled for debug diff --git a/docs/apply-load-limits-for-model-tx.cfg b/docs/apply-load-limits-for-model-tx.cfg deleted file mode 100644 index c532091add..0000000000 --- a/docs/apply-load-limits-for-model-tx.cfg +++ /dev/null @@ -1,128 +0,0 @@ -# This is the Stellar Core configuration example for using the load generation -# (apply-load) tool for finding the maximum ledger limits by applying a number -# of the equivalent 'model' transactions. -# -# The mode will find the maximum value of N, such that closing a ledger -# with N 'model' transactions takes less than a certain target time. Then -# it will find the actual ledger limits by multiplying the 'model' transaction -# dimensions by N. -# -# This is not meant to be used in any production contexts. -# -# The core with this configuration should be run using `./stellar-core apply-load` - -# Select the apply-load mode. -APPLY_LOAD_MODE="limits-for-model-tx" - -# Medida metrics (histograms in particular) in apply path cause severe and -# non-deterministic performance degradation. While this has to be addressed -# eventually, it is useful to disable these when optimizing anything besides -# the metrics. -DISABLE_SOROBAN_METRICS_FOR_TESTING = false -# Disable metadata output -METADATA_OUTPUT_STREAM = "" -# Disable metadata debug -METADATA_DEBUG_LEDGERS = 0 - -# Target average ledger close time. -APPLY_LOAD_TARGET_CLOSE_TIME_MS = 300 - -# Network configuration section - -# Most of the network configuration will be inferred automatically from the 'model' -# transaction (for transaction limits) and from the search itself (for the ledger) -# limits. Only the following limits need to be set: - -# In this mode, defines the search upper bound for the number of Soroban -# transactions to apply. -APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 2000 - -# Number of the transaction clusters and thus apply threads. This will stay constant -# during the search, unlike all the other ledger limits. -APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 8 - -# The following section contains various parameters for the generated load. - -# Maximum number of ledgers to close for every iteration of search. -# Should be at least 30 and normally doesn't need to be changed as search will -# not run extra iterations if the results are already statistically significant. -# The average close time will then be compared to APPLY_LOAD_TARGET_CLOSE_TIME_MS. -APPLY_LOAD_NUM_LEDGERS = 1000 - -# Generate that many simple Classic payment transactions in every benchmark ledger. -# Note, that this will affect the close time. -APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 0 - -# Size of every synthetic data entry generated. -# This setting affects both the size of the pre-generated Bucket List entries, -# and the size of every entry that a Soroban transaction reads/writes. -APPLY_LOAD_DATA_ENTRY_SIZE = 250 - -# Bucket list pre-generation - -# The benchmark will pre-generate ledger entries using the simplified ledger -# close process; the generated ledgers won't be reflected in the meta or -# history checkpoints. - -# Faster settings, more shallow BL (up to level 6) -# Number of ledgers to close -APPLY_LOAD_BL_SIMULATED_LEDGERS = 10000 -# Write a batch of entries every that many ledgers -APPLY_LOAD_BL_WRITE_FREQUENCY = 1000 -# Write that many entries in every batch -APPLY_LOAD_BL_BATCH_SIZE = 1000 -# Write entry batches in every ledger of this many last ledgers -APPLY_LOAD_BL_LAST_BATCH_SIZE = 100 -# Write that many entries in every 'last' ledger -APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300 - -# Slower settings, deeper BL (up to level 9) -#APPLY_LOAD_BL_SIMULATED_LEDGERS = 300000 -#APPLY_LOAD_BL_WRITE_FREQUENCY = 10000 -#APPLY_LOAD_BL_BATCH_SIZE = 10000 -#APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300 -#APPLY_LOAD_BL_LAST_BATCH_SIZE = 100 - -# Settings for the generated 'model' transaction. -# Unlike the 'limit-based' apply-load mode, only a single value -# with `[1]` as distribution is allowed, thus only a single kind -# of transaction will be generated. - -# Number of *disk* reads a transaction performs. Every disk read is restoration, -# so it's also a write (accounted for in NUM_RW_ENTRIES). -APPLY_LOAD_NUM_DISK_READ_ENTRIES = [1] -APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = [1] - -# Number of writes a transaction performs. -APPLY_LOAD_NUM_RW_ENTRIES = [5] -APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = [1] - -# Number of 80-byte events a transaction emits. -APPLY_LOAD_EVENT_COUNT = [15] -APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = [1] - -# Size of a generated transaction. -APPLY_LOAD_TX_SIZE_BYTES = [1650] -APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = [1] - -# Number of instructions a transaction will use. -APPLY_LOAD_INSTRUCTIONS = [4250000] -APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = [1] - - -# Common apply load boilerplate -ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=true -# Diagnostic events should generally be disabled, but can be enabled for debug -ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = false -# Set up plenty of genesis accounts - benchmark will fail if the number is not -# sufficient. This should be at least 2x of APPLY_LOAD_MAX_SOROBAN_TX_COUNT. -GENESIS_TEST_ACCOUNT_COUNT = 40000 - -# Minimal core config boilerplate - -UNSAFE_QUORUM=true -NODE_SEED="SDQVDISRYN2JXBS7ICL7QJAEKB3HWBJFP2QECXG7GZICAHBK4UNJCWK2 self" - -[QUORUM_SET] -THRESHOLD_PERCENT=100 -VALIDATORS=["$self"] diff --git a/docs/software/commands.md b/docs/software/commands.md index c4dde5790a..ad8218d318 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -34,12 +34,8 @@ Command options can only by placed after command. ledger close time for applying transactions. - `APPLY_LOAD_MODE="max-sac-tps"`: determines maximum TPS for the load consisting only of fast SAC transfer. - - `APPLY_LOAD_MODE="limits-for-model-tx"`: determines maximum ledger - limits for the load consisting only of a customizable 'model' - transaction. - `APPLY_LOAD_MODE="benchmark"`: benchmarks a fixed-size ledger of model - transactions. Use `APPLY_LOAD_MODEL_TX` to select the model transaction; - currently only `"sac"` is supported. + transactions. Use `APPLY_LOAD_MODEL_TX` to select the model transaction. * Load generation is configured in the Core config file. The relevant settings all begin with `APPLY_LOAD_`. See full example configurations with per-setting documentation in the `docs` directory diff --git a/src/main/Config.cpp b/src/main/Config.cpp index ed80271b39..fbaae88b36 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -415,17 +415,13 @@ parseApplyLoadMode(ConfigItem const& item) { return ApplyLoadMode::MAX_SAC_TPS; } - if (mode == "limits-for-model-tx") - { - return ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX; - } if (mode == "benchmark") { return ApplyLoadMode::BENCHMARK_MODEL_TX; } throw std::invalid_argument( "invalid 'APPLY_LOAD_MODE', expected one of: ledger-limits, " - "max-sac-tps, limits-for-model-tx, benchmark"); + "max-sac-tps, benchmark"); } ApplyLoadModelTx @@ -1686,16 +1682,6 @@ Config::processConfig(std::shared_ptr t) [&]() { APPLY_LOAD_MODE = parseApplyLoadMode(item); }}, {"APPLY_LOAD_MODEL_TX", [&]() { APPLY_LOAD_MODEL_TX = parseApplyLoadModelTx(item); }}, - {"APPLY_LOAD_DATA_ENTRY_SIZE", - [&]() { - APPLY_LOAD_DATA_ENTRY_SIZE = readInt(item); - // align to 4 bytes - if (APPLY_LOAD_DATA_ENTRY_SIZE % 4 != 0) - { - APPLY_LOAD_DATA_ENTRY_SIZE += - 4 - (APPLY_LOAD_DATA_ENTRY_SIZE % 4); - } - }}, {"APPLY_LOAD_BL_SIMULATED_LEDGERS", [&]() { APPLY_LOAD_BL_SIMULATED_LEDGERS = readInt(item); @@ -1714,43 +1700,6 @@ Config::processConfig(std::shared_ptr t) [&]() { APPLY_LOAD_BL_LAST_BATCH_SIZE = readInt(item); }}, - {"APPLY_LOAD_INSTRUCTIONS", - [&]() { - APPLY_LOAD_INSTRUCTIONS = readIntArray(item); - }}, - {"APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION", - [&]() { - APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = - readIntArray(item); - }}, - {"APPLY_LOAD_TX_SIZE_BYTES", - [&]() { - APPLY_LOAD_TX_SIZE_BYTES = readIntArray(item); - }}, - {"APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION", - [&]() { - APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = - readIntArray(item); - }}, - {"APPLY_LOAD_NUM_DISK_READ_ENTRIES", - [&]() { - APPLY_LOAD_NUM_DISK_READ_ENTRIES = - readIntArray(item); - }}, - {"APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION", - [&]() { - APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = - readIntArray(item); - }}, - {"APPLY_LOAD_NUM_RW_ENTRIES", - [&]() { - APPLY_LOAD_NUM_RW_ENTRIES = readIntArray(item); - }}, - {"APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION", - [&]() { - APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = - readIntArray(item); - }}, {"APPLY_LOAD_EVENT_COUNT", [&]() { APPLY_LOAD_EVENT_COUNT = readIntArray(item); diff --git a/src/main/Config.h b/src/main/Config.h index a718a8b843..3f214b615a 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -73,7 +73,6 @@ struct ValidatorWeightConfig enum class ApplyLoadMode { LIMIT_BASED, - FIND_LIMITS_FOR_MODEL_TX, MAX_SAC_TPS, BENCHMARK_MODEL_TX }; @@ -349,11 +348,6 @@ class Config : public std::enable_shared_from_this ApplyLoadMode APPLY_LOAD_MODE = ApplyLoadMode::LIMIT_BASED; ApplyLoadModelTx APPLY_LOAD_MODEL_TX = ApplyLoadModelTx::SAC; - // Size of the synthetic contract data entries used in apply-load. - // Currently we generate entries of the equal size for more precise - // control over the modelled instructions. - uint32_t APPLY_LOAD_DATA_ENTRY_SIZE = 0; - // The parameters below control the synthetic bucket list generation in // apply-load. @@ -400,34 +394,15 @@ class Config : public std::enable_shared_from_this uint32_t APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 1; // Number of ledgers to apply in apply-load. - // Depending on the mode this represents either the total number of ledgers - // to close for benchmarking, or the number of ledgers to apply per - // iteration of binary search for modes that perform search. uint32_t APPLY_LOAD_NUM_LEDGERS = 100; - // Target ledger close time in milliseconds for modes that perform binary - // search of TPS or limits. + // Target ledger close time in milliseconds for max-sac-tps mode. uint32_t APPLY_LOAD_TARGET_CLOSE_TIME_MS = 1000; // Number of classic transactions to include in each ledger in ledger limit // based apply-load mode. uint32_t APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 0; - // Number of instructions to generate in the apply-load transactions. - std::vector APPLY_LOAD_INSTRUCTIONS; - std::vector APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION; - - // Transaction size in bytes for the apply-load transactions. - std::vector APPLY_LOAD_TX_SIZE_BYTES; - std::vector APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION; - - // Number of disk read-only and read-write entries in the apply-load - // transactions. Every entry will have `APPLY_LOAD_DATA_ENTRY_SIZE` size. - std::vector APPLY_LOAD_NUM_DISK_READ_ENTRIES; - std::vector APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION; - std::vector APPLY_LOAD_NUM_RW_ENTRIES; - std::vector APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION; - // Number of events to generate in the apply-load transactions. std::vector APPLY_LOAD_EVENT_COUNT; std::vector APPLY_LOAD_EVENT_COUNT_DISTRIBUTION; diff --git a/src/simulation/ApplyLoad.cpp b/src/simulation/ApplyLoad.cpp index f881ef2d40..c9ad33d788 100644 --- a/src/simulation/ApplyLoad.cpp +++ b/src/simulation/ApplyLoad.cpp @@ -38,6 +38,13 @@ namespace { constexpr double NOISY_BINARY_SEARCH_CONFIDENCE = 0.99; +uint32_t +roundUpToXdrSizeMultiple(uint32_t size) +{ + uint32_t remainder = size % 4; + return remainder == 0 ? size : size + 4 - remainder; +} + void logExecutionEnvironmentSnapshot(Config const& cfg) { @@ -69,6 +76,81 @@ interpolatePercentile(std::vector const& sortedValues, return sortedValues[lo] * (1.0 - weight) + sortedValues[hi] * weight; } +template +void +throwIfResourceIsZero(T resourceVal, char const* resourceName) +{ + if (resourceVal == 0) + { + throw std::runtime_error(fmt::format( + FMT_STRING( + "Derived apply-load tx profile has zero {} in LIMIT_BASED " + "mode; reduce APPLY_LOAD_MAX_SOROBAN_TX_COUNT or raise " + "the corresponding ledger limit"), + resourceName)); + } +} + +ApplyLoadTxProfile +deriveLimitBasedTxProfile(ApplyLoadMode mode, Config const& cfg) +{ + if (mode != ApplyLoadMode::LIMIT_BASED) + { + return ApplyLoadTxProfile{}; + } + uint32_t txsPerLedger = cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT; + releaseAssert(txsPerLedger != 0); + + ApplyLoadTxProfile txProfile; + txProfile.instructions = std::min( + static_cast(cfg.APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS * + cfg.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS / + txsPerLedger), + cfg.APPLY_LOAD_TX_MAX_INSTRUCTIONS); + throwIfResourceIsZero(txProfile.instructions, "instructions"); + + txProfile.txSizeBytes = + std::min(cfg.APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES / txsPerLedger, + cfg.APPLY_LOAD_MAX_TX_SIZE_BYTES); + txProfile.txSizeBytes = roundUpToXdrSizeMultiple(txProfile.txSizeBytes); + throwIfResourceIsZero(txProfile.txSizeBytes, "tx-size bytes"); + + txProfile.rwEntries = + std::min({cfg.APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES / txsPerLedger, + cfg.APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES, + cfg.APPLY_LOAD_TX_MAX_FOOTPRINT_SIZE}); + throwIfResourceIsZero(txProfile.rwEntries, "rw entries"); + + // Base the data entry size on the write bytes limit, as normally the disk + // reads should mostly come from the restorations, and the restorations + // require write bytes. + txProfile.dataEntrySizeBytes = + std::min(cfg.APPLY_LOAD_LEDGER_MAX_WRITE_BYTES / + (txsPerLedger * txProfile.rwEntries), + cfg.APPLY_LOAD_TX_MAX_WRITE_BYTES / txProfile.rwEntries); + txProfile.dataEntrySizeBytes -= txProfile.dataEntrySizeBytes % 4; + throwIfResourceIsZero(txProfile.dataEntrySizeBytes, "data entry size"); + + txProfile.diskReadEntries = std::min( + {cfg.APPLY_LOAD_LEDGER_MAX_DISK_READ_LEDGER_ENTRIES / txsPerLedger, + cfg.APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES / + (txsPerLedger * txProfile.dataEntrySizeBytes), + cfg.APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES, txProfile.rwEntries}); + // Allow 0 disk read entries in case if we want to omit restorations. + + CLOG_INFO(Perf, + "Derived tx profile for {} txs per ledger: " + "instructions {}, tx size {}, disk read entries {}, disk read " + "bytes {}, rw entries {}, write bytes {}", + cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT, txProfile.instructions, + txProfile.txSizeBytes, txProfile.diskReadEntries, + txProfile.diskReadEntries * txProfile.dataEntrySizeBytes, + txProfile.rwEntries, + txProfile.rwEntries * txProfile.dataEntrySizeBytes); + + return txProfile; +} + SorobanUpgradeConfig getUpgradeConfig(Config const& cfg, bool validate = true) { @@ -509,51 +591,27 @@ ApplyLoad::getKeyForArchivedEntry(uint64_t index) } uint32_t -ApplyLoad::calculateRequiredHotArchiveEntries(ApplyLoadMode mode, - Config const& cfg) +ApplyLoad::getTotalHotArchiveEntries() const { - // If no RO entries are configured, return 0 - if (cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES.empty()) - { - return 0; - } + return mTotalHotArchiveEntries; +} - releaseAssertOrThrow( - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES.size() == - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION.size()); - - // Calculate mean disk reads per transaction - double totalWeight = std::accumulate( - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION.begin(), - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION.end(), 0.0); - double meanDiskReadsPerTx = 0.0; - for (size_t i = 0; i < cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES.size(); ++i) +uint32_t +ApplyLoad::calculateRequiredHotArchiveEntries(Config const& cfg) +{ + if (mMode != ApplyLoadMode::LIMIT_BASED) { - meanDiskReadsPerTx += - static_cast(cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES[i]) * - (cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION[i] / - totalWeight); + return 0; } // Calculate total expected disk reads - double totalExpectedRestores = meanDiskReadsPerTx * - cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT * - cfg.APPLY_LOAD_NUM_LEDGERS; + uint32_t totalExpectedRestores = mLimitsBasedTxProfile.diskReadEntries * + cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT * + cfg.APPLY_LOAD_NUM_LEDGERS; // We technically can only actually perform totalExpectedRestores, but we // still need to create valid transactions in the 'mempool', so we need // to scale the expected number of restores by the transaction queue size. totalExpectedRestores *= cfg.SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER; - - // In FIND_LIMITS_FOR_MODEL_TX mode, we perform a binary search that uses - // new restores and thus we need to additionally scale the restores by - // log2 of max tx count (which approximates the maximum number of binary - // search iterations). - if (mode == ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX) - { - totalExpectedRestores *= log2(cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT); - } - - // Add some generous buffer since actual distributions may vary. return totalExpectedRestores * 1.5; } @@ -561,8 +619,9 @@ ApplyLoad::ApplyLoad(Application& app) : mApp(app) , mMode(app.getConfig().APPLY_LOAD_MODE) , mModelTx(app.getConfig().APPLY_LOAD_MODEL_TX) + , mLimitsBasedTxProfile(deriveLimitBasedTxProfile(mMode, app.getConfig())) , mTotalHotArchiveEntries( - calculateRequiredHotArchiveEntries(mMode, app.getConfig())) + calculateRequiredHotArchiveEntries(app.getConfig())) , mTxCountUtilization( mApp.getMetrics().NewHistogram({"soroban", "apply-load", "tx-count"})) , mInstructionUtilization(mApp.getMetrics().NewHistogram( @@ -612,8 +671,7 @@ ApplyLoad::ApplyLoad(Application& app) } // Noisy binary search-based modes require at least 30 ledgers to have // enough samples for statistics to be meaningful. - if (mMode == ApplyLoadMode::MAX_SAC_TPS || - mMode == ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX) + if (mMode == ApplyLoadMode::MAX_SAC_TPS) { if (config.APPLY_LOAD_NUM_LEDGERS < 30) @@ -635,7 +693,6 @@ ApplyLoad::ApplyLoad(Application& app) switch (mMode) { case ApplyLoadMode::LIMIT_BASED: - case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX: mNumAccounts = config.APPLY_LOAD_MAX_SOROBAN_TX_COUNT * config.SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER + config.APPLY_LOAD_CLASSIC_TXS_PER_LEDGER * @@ -724,7 +781,6 @@ ApplyLoad::setup() switch (mMode) { case ApplyLoadMode::LIMIT_BASED: - case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX: setupLoadContract(); break; case ApplyLoadMode::MAX_SAC_TPS: @@ -752,7 +808,6 @@ ApplyLoad::setup() switch (mMode) { case ApplyLoadMode::MAX_SAC_TPS: - case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX: // Just upgrade to a placeholder number of TXs, we'll // upgrade again before each TPS run. upgradeSettingsForMaxTPS(100000); @@ -766,8 +821,7 @@ ApplyLoad::setup() } // Setup initial bucket list for modes that support it. - if (mMode == ApplyLoadMode::LIMIT_BASED || - mMode == ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX) + if (mMode == ApplyLoadMode::LIMIT_BASED) { setupBucketList(); } @@ -836,9 +890,6 @@ ApplyLoad::execute() case ApplyLoadMode::MAX_SAC_TPS: findMaxSacTps(); break; - case ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX: - findMaxLimitsForModelTransaction(); - break; case ApplyLoadMode::BENCHMARK_MODEL_TX: benchmarkModelTx(); break; @@ -938,123 +989,6 @@ ApplyLoad::applyConfigUpgrade(SorobanUpgradeConfig const& upgradeConfig) 1); } -std::pair -ApplyLoad::updateSettingsForTxCount(uint64_t txsPerLedger) -{ - // Round the configuration values down to be a multiple of the respective - // step in order to get more readable configurations, and also to speeed - // up the binary search significantly. - uint64_t const INSTRUCTIONS_ROUNDING_STEP = 5'000'000; - uint64_t const SIZE_ROUNDING_STEP = 500; - uint64_t const ENTRIES_ROUNDING_STEP = 10; - - auto const& config = mApp.getConfig(); - uint64_t insns = - roundDown(txsPerLedger * config.APPLY_LOAD_INSTRUCTIONS[0] / - config.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS, - INSTRUCTIONS_ROUNDING_STEP); - uint64_t txSize = roundDown( - txsPerLedger * config.APPLY_LOAD_TX_SIZE_BYTES[0], SIZE_ROUNDING_STEP); - - uint64_t writeEntries = - roundDown(txsPerLedger * config.APPLY_LOAD_NUM_RW_ENTRIES[0], - ENTRIES_ROUNDING_STEP); - uint64_t writeBytes = roundDown( - writeEntries * config.APPLY_LOAD_DATA_ENTRY_SIZE, SIZE_ROUNDING_STEP); - - uint64_t diskReadEntries = - roundDown(txsPerLedger * config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0], - ENTRIES_ROUNDING_STEP); - uint64_t diskReadBytes = - roundDown(diskReadEntries * config.APPLY_LOAD_DATA_ENTRY_SIZE, - SIZE_ROUNDING_STEP); - - if (diskReadEntries == 0) - { - diskReadEntries = - MinimumSorobanNetworkConfig::TX_MAX_READ_LEDGER_ENTRIES; - diskReadBytes = MinimumSorobanNetworkConfig::TX_MAX_READ_BYTES; - } - - uint64_t actualMaxTxs = txsPerLedger; - actualMaxTxs = - std::min(actualMaxTxs, - insns * config.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS / - config.APPLY_LOAD_INSTRUCTIONS[0]); - actualMaxTxs = - std::min(actualMaxTxs, txSize / config.APPLY_LOAD_TX_SIZE_BYTES[0]); - if (config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0] > 0) - { - actualMaxTxs = std::min(actualMaxTxs, - diskReadEntries / - config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0]); - actualMaxTxs = std::min( - actualMaxTxs, - diskReadBytes / (config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0] * - config.APPLY_LOAD_DATA_ENTRY_SIZE)); - } - actualMaxTxs = std::min(actualMaxTxs, - writeEntries / config.APPLY_LOAD_NUM_RW_ENTRIES[0]); - - actualMaxTxs = std::min(actualMaxTxs, - writeBytes / (config.APPLY_LOAD_NUM_RW_ENTRIES[0] * - config.APPLY_LOAD_DATA_ENTRY_SIZE)); - CLOG_INFO(Perf, - "Resources after rounding for testing {} actual max txs per " - "ledger: " - "instructions {}, tx size {}, disk read entries {}, " - "disk read bytes {}, rw entries {}, rw bytes {}", - actualMaxTxs, insns, txSize, diskReadEntries, diskReadBytes, - writeEntries, writeBytes); - - auto upgradeConfig = getUpgradeConfig(mApp.getConfig(), - /* validate */ false); - // Set tx limits to the respective resources of the 'model' - // transaction. - upgradeConfig.txMaxInstructions = - std::max(MinimumSorobanNetworkConfig::TX_MAX_INSTRUCTIONS, - config.APPLY_LOAD_INSTRUCTIONS[0]); - upgradeConfig.txMaxSizeBytes = - std::max(MinimumSorobanNetworkConfig::TX_MAX_SIZE_BYTES, - config.APPLY_LOAD_TX_SIZE_BYTES[0]); - upgradeConfig.txMaxDiskReadEntries = - std::max(MinimumSorobanNetworkConfig::TX_MAX_READ_LEDGER_ENTRIES, - config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0]); - upgradeConfig.txMaxWriteLedgerEntries = - std::max(MinimumSorobanNetworkConfig::TX_MAX_WRITE_LEDGER_ENTRIES, - config.APPLY_LOAD_NUM_RW_ENTRIES[0]); - upgradeConfig.txMaxDiskReadBytes = - std::max(MinimumSorobanNetworkConfig::TX_MAX_READ_BYTES, - config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0] * - config.APPLY_LOAD_DATA_ENTRY_SIZE); - upgradeConfig.txMaxWriteBytes = - std::max(MinimumSorobanNetworkConfig::TX_MAX_WRITE_BYTES, - config.APPLY_LOAD_NUM_RW_ENTRIES[0] * - config.APPLY_LOAD_DATA_ENTRY_SIZE); - upgradeConfig.txMaxContractEventsSizeBytes = - std::max(MinimumSorobanNetworkConfig::TX_MAX_CONTRACT_EVENTS_SIZE_BYTES, - config.APPLY_LOAD_EVENT_COUNT[0] * - TxGenerator::SOROBAN_LOAD_V2_EVENT_SIZE_BYTES + - 100); - upgradeConfig.txMaxFootprintEntries = - *upgradeConfig.txMaxDiskReadEntries + - *upgradeConfig.txMaxWriteLedgerEntries; - - // Set the ledger-wide limits to the compute values calculated above. - // Note, that in theory we could end up with ledger limits lower than - // the transaction limits, but in normally would be just - // mis-configuration (using a model transaction that is too large to - // be applied within the target close time). - upgradeConfig.ledgerMaxInstructions = insns; - upgradeConfig.ledgerMaxTransactionsSizeBytes = txSize; - upgradeConfig.ledgerMaxDiskReadEntries = diskReadEntries; - upgradeConfig.ledgerMaxWriteLedgerEntries = writeEntries; - upgradeConfig.ledgerMaxDiskReadBytes = diskReadBytes; - upgradeConfig.ledgerMaxWriteBytes = writeBytes; - - return std::make_pair(upgradeConfig, actualMaxTxs); -} - void ApplyLoad::upgradeSettings() { @@ -1235,22 +1169,21 @@ ApplyLoad::setupBucketList() ContractDataDurability::PERSISTENT; baseLiveEntry.data.contractData().val.type(SCV_BYTES); - mDataEntrySize = xdr::xdr_size(baseLiveEntry); + auto dataEntrySize = xdr::xdr_size(baseLiveEntry); // Add some padding to reach the configured LE size. - if (mDataEntrySize < mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE) + if (dataEntrySize < mLimitsBasedTxProfile.dataEntrySizeBytes) { baseLiveEntry.data.contractData().val.bytes().resize( - mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE - mDataEntrySize); - mDataEntrySize = mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE; - releaseAssertOrThrow(xdr::xdr_size(baseLiveEntry) == mDataEntrySize); + mLimitsBasedTxProfile.dataEntrySizeBytes - dataEntrySize); + dataEntrySize = mLimitsBasedTxProfile.dataEntrySizeBytes; + releaseAssertOrThrow(xdr::xdr_size(baseLiveEntry) == dataEntrySize); } else { - CLOG_WARNING(Perf, - "Apply load generated entry size is larger than " - "APPLY_LOAD_DATA_ENTRY_SIZE: {} > {}", - mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE, - mDataEntrySize); + throw std::runtime_error(fmt::format( + "Apply load generated entry size is larger than " + "resolved apply-load data entry size: {} > {}", + dataEntrySize, mLimitsBasedTxProfile.dataEntrySizeBytes)); } auto logBucketListStats = [](std::string const& logStr, @@ -1273,6 +1206,12 @@ ApplyLoad::setupBucketList() // ledgers, but save one batch for the last batch to populate upper levels. uint32_t totalBatchCount = cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS / cfg.APPLY_LOAD_BL_WRITE_FREQUENCY; + if (cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS % + cfg.APPLY_LOAD_BL_WRITE_FREQUENCY != + 0) + { + totalBatchCount++; + } releaseAssertOrThrow(totalBatchCount > 0); // Reserve one batch worth of entries for the top level buckets. @@ -1560,7 +1499,7 @@ ApplyLoad::benchmarkLimitsIteration() auto [_, tx] = mTxGenerator.invokeSorobanLoadTransactionV2( lm.getLastClosedLedgerNum() + 1, it->first, mLoadInstance, - mDataEntryCount, mDataEntrySize, 1'000'000); + mLimitsBasedTxProfile, mDataEntryCount, 1'000'000); uint32_t ledgerVersion = mApp.getLedgerManager() .getLastClosedLedgerHeader() @@ -1610,116 +1549,6 @@ ApplyLoad::benchmarkLimitsIteration() return closeTime; } -void -ApplyLoad::findMaxLimitsForModelTransaction() -{ - auto const& config = mApp.getConfig(); - - auto validateTxParam = [&config](std::string const& paramName, - auto const& values, auto const& weights, - bool allowZeroValue = false) { - if (values.size() != 1) - { - throw std::runtime_error( - fmt::format(FMT_STRING("{} must have exactly one entry for " - "'limits-for-model-tx' mode"), - paramName)); - } - if (!allowZeroValue && values[0] == 0) - { - throw std::runtime_error(fmt::format( - FMT_STRING("{} cannot be zero for 'limits-for-model-tx' mode"), - paramName)); - } - if (weights.size() != 1 || weights[0] != 1) - { - throw std::runtime_error( - fmt::format(FMT_STRING("{}_DISTRIBUTION must have exactly one " - "entry with the value of 1 for " - "'limits-for-model-tx' mode"), - paramName)); - } - }; - validateTxParam("APPLY_LOAD_INSTRUCTIONS", config.APPLY_LOAD_INSTRUCTIONS, - config.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION); - validateTxParam("APPLY_LOAD_TX_SIZE_BYTES", config.APPLY_LOAD_TX_SIZE_BYTES, - config.APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION); - validateTxParam("APPLY_LOAD_NUM_DISK_READ_ENTRIES", - config.APPLY_LOAD_NUM_DISK_READ_ENTRIES, - config.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION, true); - validateTxParam("APPLY_LOAD_NUM_RW_ENTRIES", - config.APPLY_LOAD_NUM_RW_ENTRIES, - config.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION); - validateTxParam("APPLY_LOAD_EVENT_COUNT", config.APPLY_LOAD_EVENT_COUNT, - config.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION, true); - - double targetTimeMs = mApp.getConfig().APPLY_LOAD_TARGET_CLOSE_TIME_MS; - - // Track the best config found during the search - SorobanUpgradeConfig maxLimitsConfig; - uint64_t maxLimitsTxsPerLedger = 0; - - auto prepareIteration = [this, &config](uint32_t testTxsPerLedger) { - CLOG_INFO(Perf, - "Testing ledger max model txs: {}, generated limits: " - "instructions {}, tx size {}, disk read entries {}, rw " - "entries {}", - testTxsPerLedger, - testTxsPerLedger * config.APPLY_LOAD_INSTRUCTIONS[0], - testTxsPerLedger * config.APPLY_LOAD_TX_SIZE_BYTES[0], - testTxsPerLedger * config.APPLY_LOAD_NUM_DISK_READ_ENTRIES[0], - testTxsPerLedger * config.APPLY_LOAD_NUM_RW_ENTRIES[0]); - - auto [upgradeConfig, actualMaxTxsPerLedger] = - updateSettingsForTxCount(testTxsPerLedger); - - applyConfigUpgrade(upgradeConfig); - }; - auto iterationResult = [this, &maxLimitsTxsPerLedger, &maxLimitsConfig]( - uint32_t testTxsPerLedger, bool isAbove) { - auto [upgradeConfig, actualMaxTxsPerLedger] = - updateSettingsForTxCount(testTxsPerLedger); - // Store the config if this is the best so far - if (!isAbove && actualMaxTxsPerLedger > maxLimitsTxsPerLedger) - { - maxLimitsTxsPerLedger = actualMaxTxsPerLedger; - maxLimitsConfig = upgradeConfig; - } - }; - - auto benchmarkFunc = [this](uint32_t testTxsPerLedger) -> double { - double closeTime = benchmarkLimitsIteration(); - releaseAssert(successRate() == 1.0); - return closeTime; - }; - - uint32_t minTxsPerLedger = 1; - uint32_t maxTxsPerLedger = mApp.getConfig().APPLY_LOAD_MAX_SOROBAN_TX_COUNT; - size_t maxSamplesPerPoint = mApp.getConfig().APPLY_LOAD_NUM_LEDGERS; - uint32_t xTolerance = 100; - - auto [lo, hi] = noisyBinarySearch( - benchmarkFunc, targetTimeMs, minTxsPerLedger, maxTxsPerLedger, - NOISY_BINARY_SEARCH_CONFIDENCE, xTolerance, maxSamplesPerPoint, - prepareIteration, iterationResult); - // Note, that the final search range may be above the TPL found, that's due - // to rounding we do when calculating TPL to benchmark (not every TPL - // value can be tested fairly). - CLOG_INFO(Perf, - "Maximum limits found for model transaction ({} TPL, [{}, {}] " - "final search range): " - "instructions {}, " - "tx size {}, disk read entries {}, disk read bytes {}, " - "write entries {}, write bytes {}", - maxLimitsTxsPerLedger, lo, hi, - *maxLimitsConfig.ledgerMaxInstructions, - *maxLimitsConfig.ledgerMaxTransactionsSizeBytes, - *maxLimitsConfig.ledgerMaxDiskReadEntries, - *maxLimitsConfig.ledgerMaxDiskReadBytes, - *maxLimitsConfig.ledgerMaxWriteLedgerEntries, - *maxLimitsConfig.ledgerMaxWriteBytes); -} - double ApplyLoad::successRate() { diff --git a/src/simulation/ApplyLoad.h b/src/simulation/ApplyLoad.h index 9dcc3788ca..d7b424ea97 100644 --- a/src/simulation/ApplyLoad.h +++ b/src/simulation/ApplyLoad.h @@ -44,10 +44,12 @@ class ApplyLoad // Returns LedgerKey for pre-populated archived state at the given index. static LedgerKey getKeyForArchivedEntry(uint64_t index); - static uint32_t calculateRequiredHotArchiveEntries(ApplyLoadMode mode, - Config const& cfg); + + uint32_t getTotalHotArchiveEntries() const; private: + uint32_t calculateRequiredHotArchiveEntries(Config const& cfg); + void setup(); void setupUpgradeContract(); void setupLoadContract(); @@ -63,18 +65,6 @@ class ApplyLoad // support metrics. void benchmarkLimits(); - // Runs for `execute() in `ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX` mode. - // Generates transactions according to the 'model' transaction parameters - // (specified via the transaction generation config), and does a binary - // search for the maximum number of such transactions that can fit into - // ledger while not exceeding APPLY_LOAD_TARGET_CLOSE_TIME_MS ledger close - // time. - // After finding the maximum number of model transactions, outputs the - // respective ledger limits. - // This also performs some rounding on the ledger limits to make the binary - // search faster, and also to produce more readable limits. - void findMaxLimitsForModelTransaction(); - // Runs for `execute() in `ApplyLoadMode::MAX_SAC_TPS` mode. // Generates SAC transactions and times just the application phase (fee and // sequence number processing, tx execution, and post process, but no disk @@ -146,22 +136,15 @@ class ApplyLoad // Helper method to apply a config upgrade void applyConfigUpgrade(SorobanUpgradeConfig const& upgradeConfig); - // Updates the configuration settings such a way to accommodate around - // `txsPerLedger` 'model' transactions per ledger for the - // `FIND_LIMITS_FOR_MODEL_TX` mode. - // Returns the network configuration to use for upgrade and the actual - // number of transactions that can fit withing the limits (it may be - // slightly lower than `txsPerLedger` due to rounding). - std::pair - updateSettingsForTxCount(uint64_t txsPerLedger); - Application& mApp; ApplyLoadMode mMode; ApplyLoadModelTx mModelTx; + ApplyLoadTxProfile mLimitsBasedTxProfile; - uint32_t mNumAccounts; uint32_t mTotalHotArchiveEntries; + uint32_t mNumAccounts; + medida::Histogram& mTxCountUtilization; medida::Histogram& mInstructionUtilization; medida::Histogram& mTxSizeUtilization; @@ -183,7 +166,6 @@ class ApplyLoad // Used for batch transfers, one instance for each cluster std::vector mBatchTransferInstances; size_t mDataEntryCount = 0; - size_t mDataEntrySize = 0; // Used to generate custom token transfer transactions TxGenerator::ContractInstance mTokenInstance; diff --git a/src/simulation/LoadGenerator.cpp b/src/simulation/LoadGenerator.cpp index 476fa9c5ac..eacec08204 100644 --- a/src/simulation/LoadGenerator.cpp +++ b/src/simulation/LoadGenerator.cpp @@ -162,10 +162,6 @@ LoadGenerator::getMode(std::string const& mode) { return LoadGenMode::PAY_PREGENERATED; } - else if (mode == "soroban_invoke_apply_load") - { - return LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD; - } else if (mode == "mixed_pregen_sac_payment") { return LoadGenMode::MIXED_PREGEN_SAC_PAYMENT; @@ -326,9 +322,7 @@ LoadGenerator::start(GeneratedLoadConfig& cfg) mClassicAppliedAtStart = getTxCount(mApp, /* isSoroban */ false); mSorobanAppliedAtStart = getTxCount(mApp, /* isSoroban */ true); - if ((cfg.mode == LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD || - cfg.modeMixesPregen()) && - !mApp.getRunInOverlayOnlyMode()) + if (cfg.modeMixesPregen() && !mApp.getRunInOverlayOnlyMode()) { reset(); throw std::runtime_error( @@ -641,9 +635,6 @@ GeneratedLoadConfig::getStatus() const case LoadGenMode::PAY_PREGENERATED: modeStr = "pay_pregenerated"; break; - case LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD: - modeStr = "SOROBAN_INVOKE_APPLY_LOAD"; - break; case LoadGenMode::MIXED_PREGEN_SAC_PAYMENT: modeStr = "mixed_pregen_sac_payment"; break; @@ -849,22 +840,6 @@ LoadGenerator::generateLoad(GeneratedLoadConfig cfg) case LoadGenMode::PAY_PREGENERATED: generateTx = [&]() { return readTransactionFromFile(cfg); }; break; - case LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD: - generateTx = [&]() { - auto instanceIter = mContractInstances.find(sourceAccountId); - releaseAssert(instanceIter != mContractInstances.end()); - auto const& instance = instanceIter->second; - auto const& appCfg = mApp.getConfig(); - uint64_t dataEntryCount = - appCfg.APPLY_LOAD_BL_BATCH_SIZE * - appCfg.APPLY_LOAD_BL_SIMULATED_LEDGERS; - size_t dataEntrySize = appCfg.APPLY_LOAD_DATA_ENTRY_SIZE; - - return mTxGenerator.invokeSorobanLoadTransactionV2( - ledgerNum, sourceAccountId, instance, dataEntryCount, - dataEntrySize, cfg.maxGeneratedFeeRate); - }; - break; case LoadGenMode::MIXED_PREGEN_SAC_PAYMENT: case LoadGenMode::MIXED_PREGEN_OZ_TOKEN_TRANSFER: case LoadGenMode::MIXED_PREGEN_SOROSWAP_SWAP: @@ -1581,9 +1556,6 @@ LoadGenerator::execute(TransactionFrameBasePtr txf, LoadGenMode mode, releaseAssert(false); } break; - case LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD: - txm.mSorobanInvokeTxs.Mark(); - break; case LoadGenMode::MIXED_PREGEN_SAC_PAYMENT: case LoadGenMode::MIXED_PREGEN_OZ_TOKEN_TRANSFER: case LoadGenMode::MIXED_PREGEN_SOROSWAP_SWAP: @@ -1931,8 +1903,7 @@ GeneratedLoadConfig::isSoroban() const mode == LoadGenMode::SOROBAN_UPLOAD || mode == LoadGenMode::SOROBAN_UPGRADE_SETUP || mode == LoadGenMode::SOROBAN_CREATE_UPGRADE || - mode == LoadGenMode::MIXED_CLASSIC_SOROBAN || - mode == LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD; + mode == LoadGenMode::MIXED_CLASSIC_SOROBAN; } bool @@ -1949,16 +1920,14 @@ GeneratedLoadConfig::isLoad() const mode == LoadGenMode::SOROBAN_INVOKE || mode == LoadGenMode::SOROBAN_CREATE_UPGRADE || mode == LoadGenMode::MIXED_CLASSIC_SOROBAN || - mode == LoadGenMode::PAY_PREGENERATED || - mode == LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD || modeMixesPregen(); + mode == LoadGenMode::PAY_PREGENERATED || modeMixesPregen(); } bool GeneratedLoadConfig::modeInvokes() const { return mode == LoadGenMode::SOROBAN_INVOKE || - mode == LoadGenMode::MIXED_CLASSIC_SOROBAN || - mode == LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD; + mode == LoadGenMode::MIXED_CLASSIC_SOROBAN; } bool diff --git a/src/simulation/LoadGenerator.h b/src/simulation/LoadGenerator.h index 8801274efb..74eae0195c 100644 --- a/src/simulation/LoadGenerator.h +++ b/src/simulation/LoadGenerator.h @@ -45,8 +45,6 @@ enum class LoadGenMode MIXED_CLASSIC_SOROBAN, // Submit pre-generated payment transactions from an XDR file PAY_PREGENERATED, - // Submit the same type of invoke transaction as ApplyLoad - SOROBAN_INVOKE_APPLY_LOAD, // Overlay-only modes: pre-generated classic payments + a soroban // transaction type of choice, each with its own TPS. No on-ledger setup // is required; soroban contract keys are synthesized in memory. Apply diff --git a/src/simulation/TxGenerator.cpp b/src/simulation/TxGenerator.cpp index 5f2dba47fe..d1e8f97eb1 100644 --- a/src/simulation/TxGenerator.cpp +++ b/src/simulation/TxGenerator.cpp @@ -372,8 +372,9 @@ increaseOpSize(Operation& op, uint32_t increaseUpToBytes) auth.rootInvocation.function.type( SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN); SCVal val(SCV_BYTES); + auth.rootInvocation.function.contractFn().args = {val}; - auto const overheadBytes = xdr::xdr_size(auth) + xdr::xdr_size(val); + auto const overheadBytes = xdr::xdr_size(auth); if (overheadBytes > increaseUpToBytes) { increaseUpToBytes = 0; @@ -552,7 +553,7 @@ TxGenerator::invokeSorobanLoadTransaction( std::pair TxGenerator::invokeSorobanLoadTransactionV2( uint32_t ledgerNum, uint64_t accountId, ContractInstance const& instance, - uint64_t dataEntryCount, size_t dataEntrySize, + ApplyLoadTxProfile const& txProfile, uint64_t dataEntryCount, std::optional maxGeneratedFeeRate) { auto const& appCfg = mApp.getConfig(); @@ -560,10 +561,9 @@ TxGenerator::invokeSorobanLoadTransactionV2( // The estimates below are fairly tight as they depend on linear // functions (maybe with a small constant factor as well). uint32_t const baseInstructionCount = 737'119; - uint32_t const baselineTxSizeBytes = 256; + uint32_t const baselineTxSizeBytes = 328; uint32_t const eventSize = TxGenerator::SOROBAN_LOAD_V2_EVENT_SIZE_BYTES; uint32_t const instructionsPerGuestCycle = 40; - uint32_t const instructionsPerHostCycle = 4'875; uint32_t const instructionsPerAuthByte = 35; uint32_t const instructionsPerEvent = 8'500; @@ -575,14 +575,10 @@ TxGenerator::invokeSorobanLoadTransactionV2( uint32_t archiveEntriesToRestore = 0; if (mPrePopulatedArchivedEntries != 0) { - archiveEntriesToRestore = sampleDiscrete( - appCfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES, - appCfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION, 0u); + archiveEntriesToRestore = txProfile.diskReadEntries; } - uint32_t rwEntries = - sampleDiscrete(appCfg.APPLY_LOAD_NUM_RW_ENTRIES, - appCfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION, 0u); + uint32_t rwEntries = txProfile.rwEntries; // Subtract the archive entries from rwEntries since restoration counts as a // write @@ -641,26 +637,24 @@ TxGenerator::invokeSorobanLoadTransactionV2( } } - uint32_t txOverheadBytes = baselineTxSizeBytes + xdr::xdr_size(resources); - uint32_t desiredTxBytes = - sampleDiscrete(appCfg.APPLY_LOAD_TX_SIZE_BYTES, - appCfg.APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION, 0u); + uint32_t txOverheadBytes = baselineTxSizeBytes + xdr::xdr_size(resources) + + 4 * archivedIndexes.size(); + uint32_t desiredTxBytes = txProfile.txSizeBytes; uint32_t paddingBytes = txOverheadBytes > desiredTxBytes ? 0 : desiredTxBytes - txOverheadBytes; uint32_t entriesWriteSize = - dataEntrySize * (rwEntries + archiveEntriesToRestore); + txProfile.dataEntrySizeBytes * (rwEntries + archiveEntriesToRestore); uint32_t eventCount = sampleDiscrete(appCfg.APPLY_LOAD_EVENT_COUNT, appCfg.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION, 0u); // Pick random number of cycles between bounds - uint32_t targetInstructions = - sampleDiscrete(appCfg.APPLY_LOAD_INSTRUCTIONS, - appCfg.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION, 0u); + uint32_t targetInstructions = txProfile.instructions; resources.instructions = targetInstructions; resources.writeBytes = entriesWriteSize; - resources.diskReadBytes = dataEntrySize * archiveEntriesToRestore; + resources.diskReadBytes = + txProfile.dataEntrySizeBytes * archiveEntriesToRestore; auto numEntries = (rwEntries + archiveEntriesToRestore + instance.readOnlyKeys.size()); @@ -712,7 +706,14 @@ TxGenerator::invokeSorobanLoadTransactionV2( ihf.invokeContract().args = {makeU32(guestCycles), makeU32(hostCycles), makeU32(eventCount)}; - increaseOpSize(op, paddingBytes); + SorobanAuthorizationEntry auth; + auth.credentials.type(SOROBAN_CREDENTIALS_SOURCE_ACCOUNT); + auth.rootInvocation.function.type( + SOROBAN_AUTHORIZED_FUNCTION_TYPE_CONTRACT_FN); + SCVal val(SCV_BYTES); + val.bytes().resize(paddingBytes); + auth.rootInvocation.function.contractFn().args = {val}; + op.body.invokeHostFunctionOp().auth = {auth}; auto resourceFee = sorobanResourceFee(mApp, resources, txOverheadBytes + paddingBytes, @@ -733,6 +734,12 @@ TxGenerator::invokeSorobanLoadTransactionV2( resourceFee, std::nullopt, std::nullopt, archivedIndexes.empty() ? std::nullopt : std::make_optional(archivedIndexes)); + auto txSize = xdr::xdr_size(tx->getEnvelope()); + if (txSize != txProfile.txSizeBytes) + { + CLOG_WARNING(Perf, "Tx size is different than desired: {} vs {}", + txSize, txProfile.txSizeBytes); + } return std::make_pair(account, tx); } diff --git a/src/simulation/TxGenerator.h b/src/simulation/TxGenerator.h index 76c0c8a88b..ebed981e03 100644 --- a/src/simulation/TxGenerator.h +++ b/src/simulation/TxGenerator.h @@ -101,6 +101,15 @@ struct SorobanUpgradeConfig std::optional freezeBypassTxsDelta{}; }; +struct ApplyLoadTxProfile +{ + uint32_t instructions = 0; + uint32_t txSizeBytes = 0; + uint32_t diskReadEntries = 0; + uint32_t rwEntries = 0; + uint32_t dataEntrySizeBytes = 0; +}; + class TxGenerator { public: @@ -215,8 +224,8 @@ class TxGenerator std::pair invokeSorobanLoadTransactionV2(uint32_t ledgerNum, uint64_t accountId, ContractInstance const& instance, + ApplyLoadTxProfile const& txProfile, uint64_t dataEntryCount, - size_t dataEntrySize, std::optional maxGeneratedFeeRate); std::pair invokeSACPayment(uint32_t ledgerNum, uint64_t fromAccountId, diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index dc1fee9760..aa00c16bf2 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -29,10 +29,6 @@ TEST_CASE("loadgen in overlay-only mode", "[loadgen]") Simulation::pointer simulation = Topologies::pair(Simulation::OVER_LOOPBACK, networkID, [&](int i) { auto cfg = getTestConfig(i); - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES = {10}; - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = {100}; - cfg.APPLY_LOAD_NUM_RW_ENTRIES = {5}; - cfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = {100}; cfg.LOADGEN_INSTRUCTIONS_FOR_TESTING = {10'000'000, 50'000'000}; cfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING = {5, 1}; cfg.ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING = true; @@ -92,13 +88,6 @@ TEST_CASE("loadgen in overlay-only mode", "[loadgen]") app.getLoadGenerator().generateLoad(GeneratedLoadConfig::txLoad( LoadGenMode::PAY, nAccounts, nTxs, /* txRate */ 1)); } - SECTION("invoke realistic") - { - // Simulate realistic invoke transactions - app.getLoadGenerator().generateLoad( - GeneratedLoadConfig::txLoad(LoadGenMode::SOROBAN_INVOKE_APPLY_LOAD, - nAccounts, nTxs, /* txRate */ 1)); - } simulation->crankUntil( [&]() { return app.getMetrics() @@ -1005,8 +994,6 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]") cfg.APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 100; - cfg.APPLY_LOAD_DATA_ENTRY_SIZE = 1000; - // BL generation parameters cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS = 10000; cfg.APPLY_LOAD_BL_WRITE_FREQUENCY = 1000; @@ -1014,32 +1001,19 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]") cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300; cfg.APPLY_LOAD_BL_LAST_BATCH_SIZE = 100; - // Load generation parameters - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES = {0, 1, 2}; - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = {3, 2, 1}; - - cfg.APPLY_LOAD_NUM_RW_ENTRIES = {1, 5, 10}; - cfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = {1, 1, 1}; - cfg.APPLY_LOAD_EVENT_COUNT = {100}; cfg.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = {1}; - cfg.APPLY_LOAD_TX_SIZE_BYTES = {1'000, 2'000, 5'000}; - cfg.APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = {3, 2, 1}; - - cfg.APPLY_LOAD_INSTRUCTIONS = {10'000'000, 50'000'000}; - cfg.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = {5, 1}; - // Ledger and transaction limits cfg.APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 500'000'000; cfg.APPLY_LOAD_TX_MAX_INSTRUCTIONS = 100'000'000; cfg.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 2; - cfg.APPLY_LOAD_LEDGER_MAX_DISK_READ_LEDGER_ENTRIES = 2000; - cfg.APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES = 100; + cfg.APPLY_LOAD_LEDGER_MAX_DISK_READ_LEDGER_ENTRIES = 200; + cfg.APPLY_LOAD_TX_MAX_DISK_READ_LEDGER_ENTRIES = 10; cfg.APPLY_LOAD_TX_MAX_FOOTPRINT_SIZE = 100; - cfg.APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES = 50'000'000; + cfg.APPLY_LOAD_LEDGER_MAX_DISK_READ_BYTES = 1'000'000; cfg.APPLY_LOAD_TX_MAX_DISK_READ_BYTES = 200'000; cfg.APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES = 1250; @@ -1064,9 +1038,7 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]") ApplyLoad al(*app); // Sample a few indices to verify hot archive is properly initialized - uint32_t expectedArchivedEntries = - ApplyLoad::calculateRequiredHotArchiveEntries( - ApplyLoadMode::LIMIT_BASED, cfg); + uint32_t expectedArchivedEntries = al.getTotalHotArchiveEntries(); std::vector sampleIndices = {0, expectedArchivedEntries / 2, expectedArchivedEntries - 1}; std::set sampleKeys; @@ -1086,70 +1058,6 @@ TEST_CASE("apply load", "[loadgen][applyload][acceptance]") REQUIRE(1.0 - al.successRate() < std::numeric_limits::epsilon()); } -TEST_CASE("apply load find max limits for model tx", - "[loadgen][applyload][acceptance]") -{ - auto cfg = getTestConfig(); - cfg.APPLY_LOAD_MODE = ApplyLoadMode::FIND_LIMITS_FOR_MODEL_TX; - cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 1000; - cfg.USE_CONFIG_FOR_GENESIS = true; - cfg.LEDGER_PROTOCOL_VERSION = Config::CURRENT_LEDGER_PROTOCOL_VERSION; - cfg.MANUAL_CLOSE = true; - cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = true; - cfg.GENESIS_TEST_ACCOUNT_COUNT = 10000; - - // Also generate that many classic simple payments. - cfg.APPLY_LOAD_CLASSIC_TXS_PER_LEDGER = 100; - - // Close 30 ledgers per iteration. - cfg.APPLY_LOAD_NUM_LEDGERS = 30; - // The target close time is 500ms. - cfg.APPLY_LOAD_TARGET_CLOSE_TIME_MS = 500; - - // Size of each data entry to be used in the test. - cfg.APPLY_LOAD_DATA_ENTRY_SIZE = 100; - - // BL generation parameters - cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS = 1000; - cfg.APPLY_LOAD_BL_WRITE_FREQUENCY = 1000; - cfg.APPLY_LOAD_BL_BATCH_SIZE = 1000; - cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300; - cfg.APPLY_LOAD_BL_LAST_BATCH_SIZE = 100; - - // Load generation parameters - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES = {1}; - cfg.APPLY_LOAD_NUM_DISK_READ_ENTRIES_DISTRIBUTION = {1}; - - cfg.APPLY_LOAD_NUM_RW_ENTRIES = {4}; - cfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION = {1}; - - cfg.APPLY_LOAD_EVENT_COUNT = {2}; - cfg.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION = {1}; - - cfg.APPLY_LOAD_TX_SIZE_BYTES = {1000}; - cfg.APPLY_LOAD_TX_SIZE_BYTES_DISTRIBUTION = {1}; - - cfg.APPLY_LOAD_INSTRUCTIONS = {2'000'000}; - cfg.APPLY_LOAD_INSTRUCTIONS_DISTRIBUTION = {1}; - - // Only a few ledger limits need to be specified, the rest will be found by - // the benchmark itself. - // Number of soroban txs per ledger is the upper bound of the binary - // search for the number of the model txs to include in each ledger. - cfg.APPLY_LOAD_MAX_SOROBAN_TX_COUNT = 1000; - // Use 2 clusters/threads. - cfg.APPLY_LOAD_LEDGER_MAX_DEPENDENT_TX_CLUSTERS = 2; - - VirtualClock clock(VirtualClock::REAL_TIME); - auto app = createTestApplication(clock, cfg); - - ApplyLoad al(*app); - - al.execute(); - - REQUIRE(1.0 - al.successRate() < std::numeric_limits::epsilon()); -} - TEST_CASE("apply load find max SAC TPS", "[loadgen][applyload][soroban][acceptance]") { From 0aeb427554fd944e630e00eb24f3864448ebe592 Mon Sep 17 00:00:00 2001 From: dmkozh Date: Thu, 30 Apr 2026 14:10:03 -0400 Subject: [PATCH 2/2] Fix flaky test. There was relatively high probability that a contract instance is not invoked during the load generation, so increase the number of transactions to reduce it to a small enough value to never happen. --- src/simulation/test/LoadGeneratorTests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index aa00c16bf2..560186b2f1 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -746,7 +746,7 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") }, simulation); auto const numInstances = nAccounts; - auto const numSorobanTxs = 150; + auto const numSorobanTxs = 500; numTxsBefore = getSuccessfulTxCount();