From 9381fb2a5f9c96fec2d3a5b64680f503bc201381 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Fri, 24 Apr 2026 17:33:22 +0530 Subject: [PATCH 1/3] eth: improve private tx purging and error reporting --- eth/backend.go | 3 + eth/relay/multiclient.go | 52 +++++++- eth/relay/multiclient_test.go | 195 +++++++++++++++++++++++++++++ eth/relay/private_tx_store.go | 62 ++++++++- eth/relay/private_tx_store_test.go | 84 +++++++++++++ eth/relay/rejection_tracker.go | 92 ++++++++++++++ eth/relay/relay.go | 22 +++- 7 files changed, 503 insertions(+), 7 deletions(-) create mode 100644 eth/relay/rejection_tracker.go diff --git a/eth/backend.go b/eth/backend.go index 1b51bfde4d..95e9db1051 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -404,6 +404,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // The `config.TxPool.PriceLimit` used above doesn't reflect the sanitized/enforced changes // made in the txpool. Update the `gasTip` explicitly to reflect the enforced value. eth.txPool.SetGasTip(new(big.Int).SetUint64(params.BorDefaultTxPoolPriceLimit)) + + // Allow private tx store to check if txs are still in the pool for cleanup + relayService.SetTxPoolChecker(eth.txPool.Has) } if !config.TxPool.NoLocals { diff --git a/eth/relay/multiclient.go b/eth/relay/multiclient.go index 45738ce2ec..dd1060c4e5 100644 --- a/eth/relay/multiclient.go +++ b/eth/relay/multiclient.go @@ -27,6 +27,10 @@ var ( rpcErrorInPreconfMeter = metrics.NewRegisteredMeter("preconfs/rpcerror", nil) belowThresholdPreconfMeter = metrics.NewRegisteredMeter("preconfs/belowthreshold", nil) alreadyKnownErrMeter = metrics.NewRegisteredMeter("relay/txalreadyknown", nil) + + // Track BP level RPC rejections for all tx submissions + preconfRejectionMeter = metrics.NewRegisteredMeter("relay/bprpc/preconf/error", nil) + privateTxRejectionMeter = metrics.NewRegisteredMeter("relay/bprpc/privatetx/error", nil) ) // isAlreadyKnownError checks if the error indicates the transaction is already known to the node @@ -43,6 +47,10 @@ type multiClient struct { clients []*rpc.Client // rpc client instances dialed to each block producer closed atomic.Bool retryInterval time.Duration // 0 means use privateTxRetryInterval; configurable for testing + + rejectionTracker rejectionTracker + reporterDone chan struct{} // closed to signal the reporter goroutine to exit + closeOnce sync.Once } func newMultiClient(urls []string) *multiClient { @@ -94,9 +102,12 @@ func newMultiClient(urls []string) *multiClient { } log.Info("[tx-relay] Initialised rpc client for each block producer", "success", len(clients), "failed", failed) - return &multiClient{ - clients: clients, + mc := &multiClient{ + clients: clients, + reporterDone: make(chan struct{}), } + go mc.reportRejections() + return mc } type SendTxForPreconfResponse struct { @@ -129,6 +140,8 @@ func (mc *multiClient) submitPreconfTx(rawTx []byte) (bool, error) { preconfOfferedCount.Add(1) return } + preconfRejectionMeter.Mark(1) + mc.rejectionTracker.record(err) setError.Do(func() { firstErr = err }) @@ -194,6 +207,8 @@ func (mc *multiClient) submitPrivateTx(rawTx []byte, hash common.Hash, retry boo successfulIndices = append(successfulIndices, index) return } + privateTxRejectionMeter.Mark(1) + mc.rejectionTracker.record(err) setError.Do(func() { firstErr = err }) @@ -284,6 +299,8 @@ func (mc *multiClient) retryPrivateTxSubmission(hexTx string, hash common.Hash, alreadyKnownErrMeter.Mark(1) return } + privateTxRejectionMeter.Mark(1) + mc.rejectionTracker.record(err) mu.Lock() newFailedIndices = append(newFailedIndices, idx) mu.Unlock() @@ -354,8 +371,33 @@ func (mc *multiClient) checkTxStatus(hash common.Hash) (bool, error) { // Close closes all rpc client connections func (mc *multiClient) close() { - mc.closed.Store(true) - for _, client := range mc.clients { - client.Close() + mc.closeOnce.Do(func() { + mc.closed.Store(true) + if mc.reporterDone != nil { + close(mc.reporterDone) + } + for _, client := range mc.clients { + client.Close() + } + }) +} + +// reportRejections runs in the background and flushes the rejection tracker on a +// fixed interval, emitting one aggregated error log for different error types. +func (mc *multiClient) reportRejections() { + ticker := time.NewTicker(rejectionReportInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + total, counts := mc.rejectionTracker.flush() + log.Info("[tx-relay] BP rejection summary", + "total", total, + "errors", formatRejectionCounts(counts), + ) + case <-mc.reporterDone: + return + } } } diff --git a/eth/relay/multiclient_test.go b/eth/relay/multiclient_test.go index 04fb2b6da4..80fae352b1 100644 --- a/eth/relay/multiclient_test.go +++ b/eth/relay/multiclient_test.go @@ -3,6 +3,7 @@ package relay import ( "context" "encoding/json" + "fmt" "net/http" "net/http/httptest" "sync" @@ -1394,3 +1395,197 @@ func TestPrivateTxSubmissionRetry(t *testing.T) { require.Equal(t, int32(5), txGetterCallCount.Load(), "expected txGetter to be called 5 times during retries") }) } + +// TestRejectionTracker covers the in-memory aggregation used by the BP rejection +// reporter. The production reporter goroutine pulls from this same tracker, so +// verifying record/flush here covers the data path end-to-end. +func TestRejectionTracker(t *testing.T) { + t.Parallel() + + t.Run("record groups identical errors and counts total", func(t *testing.T) { + var tr rejectionTracker + errA := fmt.Errorf("nonce too low") + errB := fmt.Errorf("transaction underpriced") + + tr.record(errA) + tr.record(errA) + tr.record(errB) + tr.record(errA) + + total, counts := tr.flush() + require.Equal(t, uint64(4), total) + require.Equal(t, uint64(3), counts["nonce too low"]) + require.Equal(t, uint64(1), counts["transaction underpriced"]) + }) + + t.Run("flush resets state", func(t *testing.T) { + var tr rejectionTracker + tr.record(fmt.Errorf("boom")) + + total1, counts1 := tr.flush() + require.Equal(t, uint64(1), total1) + require.NotNil(t, counts1) + + total2, counts2 := tr.flush() + require.Equal(t, uint64(0), total2) + require.Nil(t, counts2, "flush should leave the tracker empty") + }) + + t.Run("nil error is ignored", func(t *testing.T) { + var tr rejectionTracker + tr.record(nil) + total, counts := tr.flush() + require.Equal(t, uint64(0), total) + require.Nil(t, counts) + }) + + t.Run("cardinality cap diverts overflow into the bucket", func(t *testing.T) { + var tr rejectionTracker + // Fill the map to the cap with unique errors. + for i := 0; i < maxRejectionCategories; i++ { + tr.record(fmt.Errorf("unique error %d", i)) + } + // Extra unique errors should land in the overflow bucket, not expand the map. + tr.record(fmt.Errorf("brand new error 1")) + tr.record(fmt.Errorf("brand new error 2")) + // But an error already in the map should still increment its own bucket. + tr.record(fmt.Errorf("unique error 0")) + + total, counts := tr.flush() + require.Equal(t, uint64(maxRejectionCategories+3), total) + require.Equal(t, maxRejectionCategories+1, len(counts), "exactly one overflow bucket should be added") + require.Equal(t, uint64(2), counts[rejectionOtherCategoryLabel]) + require.Equal(t, uint64(2), counts["unique error 0"]) + }) + + t.Run("concurrent records do not lose count", func(t *testing.T) { + var tr rejectionTracker + var wg sync.WaitGroup + const ( + goroutines = 20 + perRoutine = 500 + expectTotal = goroutines * perRoutine + ) + for g := 0; g < goroutines; g++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < perRoutine; i++ { + tr.record(fmt.Errorf("e%d", i%3)) + } + }() + } + wg.Wait() + total, _ := tr.flush() + require.Equal(t, uint64(expectTotal), total) + }) +} + +func TestFormatRejectionCounts(t *testing.T) { + t.Parallel() + + t.Run("sorted desc, zero-count entries skipped", func(t *testing.T) { + counts := map[string]uint64{ + "nonce too low": 10, + "transaction underpriced": 3, + "invalid sender": 3, + "pool full": 25, + "": 0, // must be skipped + } + got := formatRejectionCounts(counts) + require.Equal(t, + `pool full: 25, nonce too low: 10, invalid sender: 3, transaction underpriced: 3`, + got, + ) + }) + + t.Run("empty input yields empty string", func(t *testing.T) { + require.Equal(t, "", formatRejectionCounts(nil)) + require.Equal(t, "", formatRejectionCounts(map[string]uint64{"skip": 0})) + }) +} + +// TestRejectionTrackerWiredToSubmit verifies the full production path: BP +// rejections from both submitPrivateTx and submitPreconfTx flow into the same +// tracker on the multiClient, while "already known" is filtered out in both cases. +func TestRejectionTrackerWiredToSubmit(t *testing.T) { + t.Parallel() + + tx := types.NewTransaction(1, common.Address{}, nil, 0, nil, nil) + rawTx, err := tx.MarshalBinary() + require.NoError(t, err) + + t.Run("private tx rejection is tracked, already-known is not", func(t *testing.T) { + s1 := newMockRpcServer() + s2 := newMockRpcServer() + defer s1.close() + defer s2.close() + + s1.setHandleSendPrivateTx(func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "tx fee (1.00 ether) exceeds the configured cap (0.50 ether)") + }) + s2.setHandleSendPrivateTx(func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + }) + + mc := newMultiClient([]string{s1.server.URL, s2.server.URL}) + defer mc.close() + + _, _ = mc.submitPrivateTx(rawTx, tx.Hash(), false, nil) + + total, counts := mc.rejectionTracker.flush() + require.Equal(t, uint64(1), total, "only the non-already-known rejection should be tracked") + require.Equal(t, uint64(1), counts["tx fee (1.00 ether) exceeds the configured cap (0.50 ether)"]) + require.NotContains(t, counts, "already known", "already-known must not be classified as a rejection") + }) + + t.Run("preconf rejection is tracked, already-known is not", func(t *testing.T) { + s1 := newMockRpcServer() + s2 := newMockRpcServer() + defer s1.close() + defer s2.close() + + s1.setHandleSendPreconfTx(func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "tx fee (1.00 ether) exceeds the configured cap (0.50 ether)") + }) + s2.setHandleSendPreconfTx(func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, "already known") + }) + + mc := newMultiClient([]string{s1.server.URL, s2.server.URL}) + defer mc.close() + + _, _ = mc.submitPreconfTx(rawTx) + + total, counts := mc.rejectionTracker.flush() + require.Equal(t, uint64(1), total, "only the non-already-known rejection should be tracked") + require.Equal(t, uint64(1), counts["tx fee (1.00 ether) exceeds the configured cap (0.50 ether)"]) + require.NotContains(t, counts, "already known", "already-known must not be classified as a rejection") + }) + + t.Run("preconf and private rejections aggregate into the same tracker", func(t *testing.T) { + // Same BP rejects both preconf and private with the same config-mismatch + // error. The tracker should accumulate the count across both submission + // types, while the two separate meters distinguish the source. + s := newMockRpcServer() + defer s.close() + + errMsg := "tx fee exceeds the configured cap" + s.setHandleSendPreconfTx(func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, errMsg) + }) + s.setHandleSendPrivateTx(func(w http.ResponseWriter, id int, params json.RawMessage) { + defaultSendError(w, id, -32000, errMsg) + }) + + mc := newMultiClient([]string{s.server.URL}) + defer mc.close() + + _, _ = mc.submitPreconfTx(rawTx) + _, _ = mc.submitPrivateTx(rawTx, tx.Hash(), false, nil) + + total, counts := mc.rejectionTracker.flush() + require.Equal(t, uint64(2), total, "both rejections should be tracked together") + require.Equal(t, uint64(2), counts[errMsg], "same error message should aggregate across submission types") + }) +} diff --git a/eth/relay/private_tx_store.go b/eth/relay/private_tx_store.go index 06cd5f4696..cd2cf855aa 100644 --- a/eth/relay/private_tx_store.go +++ b/eth/relay/private_tx_store.go @@ -13,6 +13,12 @@ import ( "github.com/ethereum/go-ethereum/metrics" ) +const ( + privateTxTTL = 10 * time.Minute // hard TTL: remove from store after this duration regardless + privateTxGracePeriod = 2 * time.Minute // min age before txpool presence check applies + sweepInterval = 1 * time.Minute // how often the sweep goroutine runs +) + var totalPrivateTxsMeter = metrics.NewRegisteredMeter("privatetxs/count", nil) type PrivateTxGetter interface { @@ -24,16 +30,21 @@ type PrivateTxSetter interface { Purge(hash common.Hash) } +// TxPoolChecker returns true if the given tx hash is currently in the txpool. +type TxPoolChecker func(hash common.Hash) bool + type PrivateTxStore struct { txs map[common.Hash]time.Time // tx hash to last updated time mu sync.RWMutex chainEventSubFn func(ch chan<- core.ChainEvent) event.Subscription + txPoolChecker TxPoolChecker // metrics txsAdded atomic.Uint64 txsPurged atomic.Uint64 // deleted by an explicit call txsDeleted atomic.Uint64 // deleted because tx got included + txsExpired atomic.Uint64 // deleted by sweep (txpool eviction or TTL) closeCh chan struct{} } @@ -44,6 +55,7 @@ func NewPrivateTxStore() *PrivateTxStore { closeCh: make(chan struct{}), } go store.report() + go store.sweep() return store } @@ -126,6 +138,53 @@ func (s *PrivateTxStore) SetchainEventSubFn(fn func(ch chan<- core.ChainEvent) e } } +func (s *PrivateTxStore) SetTxPoolChecker(checker TxPoolChecker) { + s.mu.Lock() + defer s.mu.Unlock() + + s.txPoolChecker = checker +} + +// sweep periodically removes stale entries from the store. It uses two strategies: +// 1. Txpool check: if the tx is no longer in the txpool and old enough, remove it. +// 2. TTL backstop: unconditionally remove entries older than privateTxTTL. +func (s *PrivateTxStore) sweep() { + ticker := time.NewTicker(sweepInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.sweepOnce() + case <-s.closeCh: + return + } + } +} + +// sweepOnce performs one pass of the sweep logic. Extracted from sweep so tests +// can invoke the real eviction logic deterministically without waiting on a ticker. +func (s *PrivateTxStore) sweepOnce() { + s.mu.Lock() + defer s.mu.Unlock() + + expired := uint64(0) + now := time.Now() + for hash, addedAt := range s.txs { + age := now.Sub(addedAt) + if age > privateTxTTL { + // Hard TTL: remove regardless of txpool status + delete(s.txs, hash) + expired++ + } else if age > privateTxGracePeriod && s.txPoolChecker != nil && !s.txPoolChecker(hash) { + // Tx no longer in txpool and past grace period: remove + delete(s.txs, hash) + expired++ + } + } + s.txsExpired.Add(expired) +} + func (s *PrivateTxStore) report() { ticker := time.NewTicker(time.Minute) defer ticker.Stop() @@ -137,10 +196,11 @@ func (s *PrivateTxStore) report() { storeSize := len(s.txs) s.mu.RUnlock() totalPrivateTxsMeter.Mark(int64(storeSize)) - log.Info("[private-tx-store] stats", "len", storeSize, "added", s.txsAdded.Load(), "purged", s.txsPurged.Load(), "deleted", s.txsDeleted.Load()) + log.Info("[private-tx-store] stats", "len", storeSize, "added", s.txsAdded.Load(), "purged", s.txsPurged.Load(), "deleted", s.txsDeleted.Load(), "expired", s.txsExpired.Load()) s.txsAdded.Store(0) s.txsPurged.Store(0) s.txsDeleted.Store(0) + s.txsExpired.Store(0) case <-s.closeCh: return } diff --git a/eth/relay/private_tx_store_test.go b/eth/relay/private_tx_store_test.go index d45c7243c7..cb10832484 100644 --- a/eth/relay/private_tx_store_test.go +++ b/eth/relay/private_tx_store_test.go @@ -204,3 +204,87 @@ func TestPrivateTxStoreCleanup(t *testing.T) { // Ensure metrics are correctly reported require.Equal(t, uint64(1), store.txsDeleted.Load(), "expected txsDeleted metric to be 1") } + +// TestPrivateTxStoreSweepTTL tests that the sweep goroutine removes entries +// older than the TTL. +func TestPrivateTxStoreSweepTTL(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + defer store.Close() + + hash1 := common.HexToHash("0x1") + hash2 := common.HexToHash("0x2") + hash3 := common.HexToHash("0x3") + + store.Add(hash1) + store.Add(hash2) + store.Add(hash3) + + // Manually backdate hash1 and hash2 to be older than TTL + store.mu.Lock() + store.txs[hash1] = time.Now().Add(-(privateTxTTL + time.Minute)) + store.txs[hash2] = time.Now().Add(-(privateTxTTL + 2*time.Minute)) + // hash3 stays fresh + store.mu.Unlock() + + // Exercise the actual sweep code path used by the background goroutine + store.sweepOnce() + + require.False(t, store.IsTxPrivate(hash1), "hash1 should be removed after TTL") + require.False(t, store.IsTxPrivate(hash2), "hash2 should be removed after TTL") + require.True(t, store.IsTxPrivate(hash3), "hash3 should still be present (not expired)") + require.Equal(t, uint64(2), store.txsExpired.Load(), "expected 2 txsExpired") +} + +// TestPrivateTxStoreSweepTxPoolCheck tests that the sweep removes entries +// that are no longer in the txpool (after the grace period). +func TestPrivateTxStoreSweepTxPoolCheck(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + defer store.Close() + + hash1 := common.HexToHash("0x1") // in pool + hash2 := common.HexToHash("0x2") // NOT in pool, past grace period + hash3 := common.HexToHash("0x3") // NOT in pool, but too fresh (within grace period) + + store.Add(hash1) + store.Add(hash2) + store.Add(hash3) + + // Backdate hash1 and hash2 past the grace period + store.mu.Lock() + store.txs[hash1] = time.Now().Add(-(privateTxGracePeriod + time.Minute)) + store.txs[hash2] = time.Now().Add(-(privateTxGracePeriod + time.Minute)) + // hash3 stays fresh (within grace period) + store.mu.Unlock() + + // Set txpool checker: only hash1 is in the pool + poolSet := map[common.Hash]bool{hash1: true} + store.SetTxPoolChecker(func(hash common.Hash) bool { + return poolSet[hash] + }) + + // Exercise the actual sweep code path used by the background goroutine + store.sweepOnce() + + require.True(t, store.IsTxPrivate(hash1), "hash1 should remain (still in pool)") + require.False(t, store.IsTxPrivate(hash2), "hash2 should be removed (not in pool, past grace)") + require.True(t, store.IsTxPrivate(hash3), "hash3 should remain (within grace period)") + require.Equal(t, uint64(1), store.txsExpired.Load(), "expected 1 txsExpired") +} + +// TestPrivateTxStoreSetTxPoolChecker tests the SetTxPoolChecker method. +func TestPrivateTxStoreSetTxPoolChecker(t *testing.T) { + t.Parallel() + + store := NewPrivateTxStore() + defer store.Close() + + require.Nil(t, store.txPoolChecker, "txPoolChecker should be nil initially") + + checker := func(hash common.Hash) bool { return true } + store.SetTxPoolChecker(checker) + require.NotNil(t, store.txPoolChecker, "txPoolChecker should be set after calling SetTxPoolChecker") +} diff --git a/eth/relay/rejection_tracker.go b/eth/relay/rejection_tracker.go new file mode 100644 index 0000000000..209642c321 --- /dev/null +++ b/eth/relay/rejection_tracker.go @@ -0,0 +1,92 @@ +package relay + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" +) + +var ( + rejectionReportInterval = time.Minute // how often to log the aggregated rejection summary + maxRejectionCategories = 50 // cap on distinct error strings tracked to avoid unbounded map growth + rejectionOtherCategoryLabel = "" // overflow bucket once the cap is hit +) + +// rejectionTracker aggregates BP rejection errors for a failed tx submission and +// flushes them periodically as a single summary log line. Grouping by error string +// collapses N identical rejections (e.g. "tx fee exceeds the configured cap" when a +// BP has a lower fee cap than the relay) into one line with a count. +type rejectionTracker struct { + mu sync.Mutex + counts map[string]uint64 + total uint64 +} + +// record adds an error to the tracker. "already known" errors are filtered out by the +// caller — only genuine rejections should reach here. +func (t *rejectionTracker) record(err error) { + if err == nil { + return + } + msg := err.Error() + t.mu.Lock() + defer t.mu.Unlock() + if t.counts == nil { + t.counts = make(map[string]uint64) + } + // Cardinality guard: if the map is already at cap and this is a brand-new error + // string, drop it into the overflow bucket to preserve the total count without + // letting the map grow unbounded. + if _, seen := t.counts[msg]; !seen && len(t.counts) >= maxRejectionCategories { + t.counts[rejectionOtherCategoryLabel]++ + } else { + t.counts[msg]++ + } + t.total++ +} + +// flush returns the accumulated counts and total, then resets the tracker. +func (t *rejectionTracker) flush() (uint64, map[string]uint64) { + t.mu.Lock() + defer t.mu.Unlock() + total, counts := t.total, t.counts + t.total, t.counts = 0, nil + return total, counts +} + +// formatRejectionCounts renders the map as a single compact string, sorted by count +// desc so the most frequent error appears first. Entries with zero count are skipped. +// +// Example output: "nonce too low: 10, invalid sender: 3, pool full: 2" +func formatRejectionCounts(counts map[string]uint64) string { + if len(counts) == 0 { + return "" + } + type pair struct { + msg string + n uint64 + } + pairs := make([]pair, 0, len(counts)) + for msg, n := range counts { + if n == 0 { + continue + } + pairs = append(pairs, pair{msg, n}) + } + sort.Slice(pairs, func(i, j int) bool { + if pairs[i].n != pairs[j].n { + return pairs[i].n > pairs[j].n + } + return pairs[i].msg < pairs[j].msg + }) + var sb strings.Builder + for i, p := range pairs { + if i > 0 { + sb.WriteString(", ") + } + fmt.Fprintf(&sb, "%s: %d", p.msg, p.n) + } + return sb.String() +} diff --git a/eth/relay/relay.go b/eth/relay/relay.go index 76491e020d..0445ee8632 100644 --- a/eth/relay/relay.go +++ b/eth/relay/relay.go @@ -83,6 +83,12 @@ func (s *RelayService) SetchainEventSubFn(fn func(ch chan<- core.ChainEvent) eve } } +func (s *RelayService) SetTxPoolChecker(checker TxPoolChecker) { + if s.privateTxStore != nil { + s.privateTxStore.SetTxPoolChecker(checker) + } +} + func (s *RelayService) SetTxGetter(getter TxGetter) { if s.txRelay != nil { s.txRelay.SetTxGetter(getter) @@ -117,7 +123,21 @@ func (s *RelayService) SubmitPreconfTransaction(tx *types.Transaction) error { return nil } -// SubmitPrivateTransaction submits a private transaction to block producers +// SubmitPrivateTransaction submits a private transaction to block producers. +// Note: We deliberately do NOT purge the private-tx hash from the store on BP +// rejection. Various types of tx rejections are possible. +// - Permanant rejection (i.e. tx which will always be rejected). As the tx has +// already passed the relay's local txpool validation, invalid txs are already +// filtered out. +// - Permanant rejection due to BP config (e.g. tx fee cap set by BP) which will +// lead to relay accepting the tx but BPs rejecting it. Such instances should be +// logged but as they can't be fixed immediately by the relay, they'll be pruned +// after the default private tx store timeout. +// - Transient rejection due to reorg or nonce race. The relay's local pool will +// evict the tx eventually once it catches up with the txpool-aware sweep. +// - Transient rejection due to BPs current mempool state (e.g. pool full). This +// will be hanlded by local re-submissions in BPs. If the tx still remains invalid +// it will be evicted after the default private tx store timeout. func (s *RelayService) SubmitPrivateTransaction(tx *types.Transaction) error { if s.txRelay == nil { return fmt.Errorf("request dropped: %w", errRelayNotConfigured) From 10705b3d548d1cff338cc867eb75936dcf491c60 Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Fri, 24 Apr 2026 18:06:45 +0530 Subject: [PATCH 2/3] eth/relay: fix lint --- eth/relay/relay.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/eth/relay/relay.go b/eth/relay/relay.go index 0445ee8632..ff1f603f74 100644 --- a/eth/relay/relay.go +++ b/eth/relay/relay.go @@ -126,17 +126,17 @@ func (s *RelayService) SubmitPreconfTransaction(tx *types.Transaction) error { // SubmitPrivateTransaction submits a private transaction to block producers. // Note: We deliberately do NOT purge the private-tx hash from the store on BP // rejection. Various types of tx rejections are possible. -// - Permanant rejection (i.e. tx which will always be rejected). As the tx has +// - Permanent rejection (i.e. tx which will always be rejected). As the tx has // already passed the relay's local txpool validation, invalid txs are already // filtered out. -// - Permanant rejection due to BP config (e.g. tx fee cap set by BP) which will +// - Permanent rejection due to BP config (e.g. tx fee cap set by BP) which will // lead to relay accepting the tx but BPs rejecting it. Such instances should be // logged but as they can't be fixed immediately by the relay, they'll be pruned // after the default private tx store timeout. // - Transient rejection due to reorg or nonce race. The relay's local pool will // evict the tx eventually once it catches up with the txpool-aware sweep. // - Transient rejection due to BPs current mempool state (e.g. pool full). This -// will be hanlded by local re-submissions in BPs. If the tx still remains invalid +// will be handled by local re-submissions in BPs. If the tx still remains invalid // it will be evicted after the default private tx store timeout. func (s *RelayService) SubmitPrivateTransaction(tx *types.Transaction) error { if s.txRelay == nil { From 978f92f096e7ccc952a4fb779c0c0fa5bcc3b42a Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Fri, 24 Apr 2026 18:06:55 +0530 Subject: [PATCH 3/3] eth/relay: update comment --- eth/relay/rejection_tracker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/relay/rejection_tracker.go b/eth/relay/rejection_tracker.go index 209642c321..cc5ced4d39 100644 --- a/eth/relay/rejection_tracker.go +++ b/eth/relay/rejection_tracker.go @@ -24,8 +24,8 @@ type rejectionTracker struct { total uint64 } -// record adds an error to the tracker. "already known" errors are filtered out by the -// caller — only genuine rejections should reach here. +// record adds an error to the tracker. Callers typically filter out "already known" +// error type while other RPC related errors reach here. func (t *rejectionTracker) record(err error) { if err == nil { return