Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 75 additions & 10 deletions rpc/backend/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"math/big"
"sync"
"time"

"github.com/ethereum/go-ethereum/common"
Expand All @@ -22,6 +23,7 @@ import (
"github.com/cosmos/evm/rpc/types"
"github.com/cosmos/evm/server/config"
servertypes "github.com/cosmos/evm/server/types"
feemarkettypes "github.com/cosmos/evm/x/feemarket/types"
evmtypes "github.com/cosmos/evm/x/vm/types"

"cosmossdk.io/log"
Expand Down Expand Up @@ -173,6 +175,13 @@ type Backend struct {
Indexer servertypes.EVMTxIndexer
ProcessBlocker ProcessBlocker
Mempool *evmmempool.ExperimentalEVMMempool

// simple caches
cacheMu sync.RWMutex
cometBlockCache map[int64]*tmrpctypes.ResultBlock
cometBlockResultsCache map[int64]*tmrpctypes.ResultBlockResults
feeParamsCache map[int64]feemarkettypes.Params
consensusGasLimitCache map[int64]int64
}

func (b *Backend) GetConfig() config.Config {
Expand All @@ -199,17 +208,73 @@ func NewBackend(
}

b := &Backend{
Ctx: context.Background(),
ClientCtx: clientCtx,
RPCClient: rpcClient,
QueryClient: types.NewQueryClient(clientCtx),
Logger: logger.With("module", "backend"),
EvmChainID: big.NewInt(int64(appConf.EVM.EVMChainID)), //nolint:gosec // G115 // won't exceed uint64
Cfg: appConf,
AllowUnprotectedTxs: allowUnprotectedTxs,
Indexer: indexer,
Mempool: mempool,
Ctx: context.Background(),
ClientCtx: clientCtx,
RPCClient: rpcClient,
QueryClient: types.NewQueryClient(clientCtx),
Logger: logger.With("module", "backend"),
EvmChainID: big.NewInt(int64(appConf.EVM.EVMChainID)), //nolint:gosec // G115 // won't exceed uint64
Cfg: appConf,
AllowUnprotectedTxs: allowUnprotectedTxs,
Indexer: indexer,
Mempool: mempool,
cometBlockCache: make(map[int64]*tmrpctypes.ResultBlock),
cometBlockResultsCache: make(map[int64]*tmrpctypes.ResultBlockResults),
feeParamsCache: make(map[int64]feemarkettypes.Params),
consensusGasLimitCache: make(map[int64]int64),
}
b.ProcessBlocker = b.ProcessBlock
return b
}

// getFeeMarketParamsAtHeight returns FeeMarket params for a given height using a height-keyed cache.
func (b *Backend) getFeeMarketParamsAtHeight(height int64) (feemarkettypes.Params, error) {
b.cacheMu.RLock()
if p, ok := b.feeParamsCache[height]; ok {
b.cacheMu.RUnlock()
return p, nil
}
b.cacheMu.RUnlock()
res, err := b.QueryClient.FeeMarket.Params(types.ContextWithHeight(height), &feemarkettypes.QueryParamsRequest{})
if err != nil {
return feemarkettypes.Params{}, err
}
b.cacheMu.Lock()
if cap := int(b.Cfg.JSONRPC.FeeHistoryCap) * 2; cap > 0 && len(b.feeParamsCache) >= cap {
for k := range b.feeParamsCache {
delete(b.feeParamsCache, k)
break
}
}
b.feeParamsCache[height] = res.Params
b.cacheMu.Unlock()
return res.Params, nil
}

// BlockMaxGasAtHeight returns the consensus block gas limit for a given height using a height-keyed cache.
func (b *Backend) BlockMaxGasAtHeight(height int64) (int64, error) {
b.cacheMu.RLock()
if gl, ok := b.consensusGasLimitCache[height]; ok {
b.cacheMu.RUnlock()
return gl, nil
}
b.cacheMu.RUnlock()

ctx := types.ContextWithHeight(height)
gasLimit, err := types.BlockMaxGasFromConsensusParams(ctx, b.ClientCtx, height)
if err != nil {
return gasLimit, err
}

b.cacheMu.Lock()
// simple prune aligned with fee history window
if cap := int(b.Cfg.JSONRPC.FeeHistoryCap) * 2; cap > 0 && len(b.consensusGasLimitCache) >= cap {
for k := range b.consensusGasLimitCache {
delete(b.consensusGasLimitCache, k)
break
}
}
b.consensusGasLimitCache[height] = gasLimit
b.cacheMu.Unlock()
return gasLimit, nil
}
14 changes: 7 additions & 7 deletions rpc/backend/chain_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,13 +247,6 @@ func (b *Backend) FeeHistory(
return
}

// eth block
ethBlock, err := b.GetBlockByNumber(blockNum, true)
if ethBlock == nil {
chanErr <- err
return
}

// CometBFT block result
cometBlockResult, err := b.CometBlockResultByNumber(&cometBlock.Block.Height)
if cometBlockResult == nil {
Expand All @@ -262,6 +255,13 @@ func (b *Backend) FeeHistory(
return
}

// Build Ethereum-formatted block using the already fetched Comet block and results
ethBlock, err := b.RPCBlockFromCometBlock(cometBlock, cometBlockResult, true)
if err != nil {
chanErr <- err
return
}

oneFeeHistory := rpctypes.OneFeeHistory{}
err = b.ProcessBlocker(cometBlock, &ethBlock, rewardPercentiles, cometBlockResult, &oneFeeHistory)
if err != nil {
Expand Down
37 changes: 36 additions & 1 deletion rpc/backend/comet.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,13 @@ func (b *Backend) CometBlockByNumber(blockNum rpctypes.BlockNumber) (*cmtrpctype
if err != nil {
return nil, err
}
// cache lookup
b.cacheMu.RLock()
if cached, ok := b.cometBlockCache[height]; ok {
b.cacheMu.RUnlock()
return cached, nil
}
b.cacheMu.RUnlock()
resBlock, err := b.RPCClient.Block(b.Ctx, &height)
if err != nil {
b.Logger.Debug("cometbft client failed to get block", "height", height, "error", err.Error())
Expand All @@ -30,6 +37,16 @@ func (b *Backend) CometBlockByNumber(blockNum rpctypes.BlockNumber) (*cmtrpctype
return nil, nil
}

// store in cache (simple bound: FeeHistoryCap*2)
b.cacheMu.Lock()
if cap := int(b.Cfg.JSONRPC.FeeHistoryCap) * 2; cap > 0 && len(b.cometBlockCache) >= cap {
for k := range b.cometBlockCache {
delete(b.cometBlockCache, k)
break
}
}
b.cometBlockCache[height] = resBlock
b.cacheMu.Unlock()
return resBlock, nil
}

Expand All @@ -49,11 +66,29 @@ func (b *Backend) CometBlockResultByNumber(height *int64) (*cmtrpctypes.ResultBl
if height != nil && *height == 0 {
height = nil
}
if height != nil {
b.cacheMu.RLock()
if cached, ok := b.cometBlockResultsCache[*height]; ok {
b.cacheMu.RUnlock()
return cached, nil
}
b.cacheMu.RUnlock()
}
res, err := b.RPCClient.BlockResults(b.Ctx, height)
if err != nil {
return nil, fmt.Errorf("failed to fetch block result from CometBFT %d: %w", *height, err)
}

if height != nil {
b.cacheMu.Lock()
if cap := int(b.Cfg.JSONRPC.FeeHistoryCap) * 2; cap > 0 && len(b.cometBlockResultsCache) >= cap {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why do we multiply the cap by 2 here?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

tl;dr it gives better hit rates for overlapping requests where max no. of blocks are called for a reasonable cap size.

Well I noticed that FeeHistoryCap limits a single eth_feeHistory call to a default of 100 blocks. So in a possible worst case scenario where a user is indexing the network for details, consecutive requests often overlap because "latest" moves forward.

FeeHistoryCap = 100
Cache size = 200 entries

Request 1: blocks 1000-1099
Cache stores: blocks 1000-1099 (100 entries)

Request 2 (few seconds later): blocks 1050-1149
Cache hits: blocks 1050-1099 (50 hits)
Cache stores: blocks 1100-1149 (50 new entries)
Total cached: 150 entries (well within the 200 limit)

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If a user keeps making a request for 100 entries in a random range, we will lose the previous context. Then this isnt that helpful.

I was just thinking that if there were requests x and y in completely different ranges, y would override the cache with just FeeHistoryCap. But if again somehow, x was requested because y was a mistake, then there is a warm cache with FeeHistoryCap *2.

this is more like a safety check, so that we provide some leeway for older entries to be there. I think either way is good enough to get the job done.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@technicallyty please do let me know what you think about this

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i will try to carve out time to look at this this week

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, I appreciate it. If it makes your job any easier, the existing RPC unit tests are working with the added features. I dont think it breaks anything.

for k := range b.cometBlockResultsCache {
delete(b.cometBlockResultsCache, k)
break
}
}
b.cometBlockResultsCache[*height] = res
b.cacheMu.Unlock()
}
return res, nil
}

Expand Down
5 changes: 2 additions & 3 deletions rpc/backend/comet_to_eth.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,9 +143,8 @@ func (b *Backend) EthBlockFromCometBlock(
return nil, fmt.Errorf("failed to get miner(block proposer) address from comet block")
}

// 3. get block gasLimit
ctx := rpctypes.ContextWithHeight(cmtBlock.Height)
gasLimit, err := rpctypes.BlockMaxGasFromConsensusParams(ctx, b.ClientCtx, cmtBlock.Height)
// 3. get block gasLimit (cached by height)
gasLimit, err := b.BlockMaxGasAtHeight(cmtBlock.Height)
if err != nil {
b.Logger.Error("failed to query consensus params", "error", err.Error())
}
Expand Down
6 changes: 2 additions & 4 deletions rpc/backend/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (

"github.com/cosmos/evm/rpc/types"
"github.com/cosmos/evm/utils"
feemarkettypes "github.com/cosmos/evm/x/feemarket/types"
evmtypes "github.com/cosmos/evm/x/vm/types"

"cosmossdk.io/log"
Expand Down Expand Up @@ -176,12 +175,11 @@ func (b *Backend) ProcessBlock(
targetOneFeeHistory.BlobGasUsedRatio = 0

if cfg.IsLondon(big.NewInt(blockHeight + 1)) {
ctx := types.ContextWithHeight(blockHeight)
params, err := b.QueryClient.FeeMarket.Params(ctx, &feemarkettypes.QueryParamsRequest{})
feeParams, err := b.getFeeMarketParamsAtHeight(blockHeight)
if err != nil {
return err
}
nextBaseFee, err := types.CalcBaseFee(cfg, &header, params.Params)
nextBaseFee, err := types.CalcBaseFee(cfg, &header, feeParams)
if err != nil {
return err
}
Expand Down
25 changes: 25 additions & 0 deletions scripts/feeHistory_bench.ps1
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
param(
[string]$Endpoint = "http://127.0.0.1:8545",
[string]$Blocks = "0x40",
[int]$Rounds = 8,
[int[]]$Percentiles = @(25,50,75)
)

$body = @{ jsonrpc = "2.0"; id = 1; method = "eth_feeHistory"; params = @($Blocks, "latest", $Percentiles) } | ConvertTo-Json -Compress
$times = @()
Write-Host ("eth_feeHistory {0}, percentiles=[{1}], rounds={2}" -f $Blocks, ($Percentiles -join ","), $Rounds)
for ($i=1; $i -le $Rounds; $i++) {
$sw = [System.Diagnostics.Stopwatch]::StartNew()
try { Invoke-RestMethod -Uri $Endpoint -Method Post -ContentType "application/json" -Body $body | Out-Null } catch {}
$sw.Stop()
$ms = [int][Math]::Round($sw.Elapsed.TotalMilliseconds)
Write-Host ("Run {0}: {1} ms" -f $i, $ms)
$times += $ms
Start-Sleep -Milliseconds 150
}
$avg = [Math]::Round(($times | Measure-Object -Average).Average,0)
$min = ($times | Measure-Object -Minimum).Minimum
$max = ($times | Measure-Object -Maximum).Maximum
Write-Host ("Avg: {0} ms Min: {1} ms Max: {2} ms" -f $avg, $min, $max)


25 changes: 25 additions & 0 deletions scripts/feeHistory_bench.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -euo pipefail

ENDPOINT=${1:-http://127.0.0.1:8545}
BLOCKS=${2:-0x40}
ROUNDS=${3:-8}
PCTS=${4:-[25,50,75]}

BODY='{"jsonrpc":"2.0","id":1,"method":"eth_feeHistory","params":["'"$BLOCKS"'","latest",'"$PCTS"']}'

sum=0; min=999999; max=0
echo "eth_feeHistory $BLOCKS, percentiles=$PCTS, rounds=$ROUNDS"
for i in $(seq 1 "$ROUNDS"); do
t=$(curl -s -o /dev/null -w '%{time_total}\n' -H 'Content-Type: application/json' -d "$BODY" "$ENDPOINT")
t_ms=$(awk -v t="$t" 'BEGIN { printf("%.0f", t*1000) }')
echo "Run $i: ${t_ms} ms"
sum=$((sum + t_ms))
(( t_ms < min )) && min=$t_ms
(( t_ms > max )) && max=$t_ms
sleep 0.15
done
avg=$((sum / ROUNDS))
printf "Avg: %d ms Min: %d ms Max: %d ms\n" "$avg" "$min" "$max"