Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[draft] RPC mode support #513

Open
wants to merge 23 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[submodule "fuzzing/testdata/lib/forge-std"]
path = fuzzing/testdata/lib/forge-std
url = https://github.com/foundry-rs/forge-std
11 changes: 11 additions & 0 deletions chain/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,17 @@ type TestChainConfig struct {

// ContractAddressOverrides describes contracts that are going to be deployed at deterministic addresses
ContractAddressOverrides map[common.Hash]common.Address `json:"contractAddressOverrides,omitempty"`

// ForkConfig indicates the RPC configuration if fuzzing using a network fork.
ForkConfig ForkConfig `json:"forkConfig,omitempty"`
}

// ForkConfig describes configuration for fuzzing using a network fork
type ForkConfig struct {
ForkModeEnabled bool `json:"forkModeEnabled"`
RpcUrl string `json:"rpcUrl"`
RpcBlock uint64 `json:"rpcBlock"`
PoolSize uint `json:"poolSize"`
}

// CheatCodeConfig describes any configuration options related to the use of vm extensions (a.k.a. cheat codes)
Expand Down
6 changes: 6 additions & 0 deletions chain/config/config_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ func DefaultTestChainConfig() (*TestChainConfig, error) {
EnableFFI: false,
},
SkipAccountChecks: true,
ForkConfig: ForkConfig{
ForkModeEnabled: false,
RpcUrl: "",
RpcBlock: 1,
PoolSize: 20,
},
}

// Return the generated configuration.
Expand Down
177 changes: 177 additions & 0 deletions chain/state/cache/caches_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
package cache

import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
"math/rand"
"os"
"sync"
"testing"
)

// TestNonPersistentStateObjectCacheRace tests for race conditions
func TestNonPersistentStateObjectCacheRace(t *testing.T) {
cache := newNonPersistentStateCache()
numObjects := 5
writers := 10
numWrites := 10_000
readers := 10
numReads := 10_000

var wg sync.WaitGroup
wg.Add(writers + readers)

write := func(r *rand.Rand, writesRem int) {
for writesRem > 0 {
objId := r.Uint32() % uint32(numObjects)
addr := common.BytesToAddress([]byte{byte(objId)})
stateObject := StateObject{
Nonce: r.Uint64(),
}
err := cache.WriteStateObject(addr, stateObject)
assert.NoError(t, err)
writesRem--
}
wg.Add(-1)
}

read := func(r *rand.Rand, readsRem int) {
for readsRem > 0 {
objId := r.Uint32() % uint32(numObjects)
addr := common.BytesToAddress([]byte{byte(objId)})
_, _ = cache.GetStateObject(addr)
readsRem--
}
wg.Add(-1)
}

for i := 0; i < readers; i++ {
go read(rand.New(rand.NewSource(int64(i))), numReads)
}

for i := 0; i < writers; i++ {
go write(rand.New(rand.NewSource(int64(i))), numWrites)
}
wg.Wait()
}

// TestNonPersistentSlotCacheRace tests for race conditions
func TestNonPersistentSlotCacheRace(t *testing.T) {
cache := newNonPersistentStateCache()
numContracts := 3
numObjects := 5
writers := 10
numWrites := 10_000
readers := 10
numReads := 10_000

var wg sync.WaitGroup
wg.Add(writers + readers)

write := func(r *rand.Rand, writesRem int) {
for writesRem > 0 {
addrId := r.Uint32() % uint32(numContracts)
addr := common.BytesToAddress([]byte{byte(addrId)})

objId := r.Uint32() % uint32(numObjects)
objHash := common.BytesToHash([]byte{byte(objId)})

data := r.Uint32() % 255
dataHash := common.BytesToHash([]byte{byte(data)})

err := cache.WriteSlotData(addr, objHash, dataHash)
assert.NoError(t, err)
writesRem--
}
wg.Add(-1)
}

read := func(r *rand.Rand, readsRem int) {
for readsRem > 0 {
addrId := r.Uint32() % uint32(numContracts)
addr := common.BytesToAddress([]byte{byte(addrId)})

objId := r.Uint32() % uint32(numObjects)
objHash := common.BytesToHash([]byte{byte(objId)})
_, _ = cache.GetSlotData(addr, objHash)
readsRem--
}
wg.Add(-1)
}

for i := 0; i < readers; i++ {
go read(rand.New(rand.NewSource(int64(i))), numReads)
}

for i := 0; i < writers; i++ {
go write(rand.New(rand.NewSource(int64(i))), numWrites)
}
wg.Wait()
}

// TestPersistentCache tests read/write capability of the persistent cache, along with persistence itself.
func TestPersistentCache(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())

rpcAddr := "www.rpc.net/ethereum/etc"
blockHeight := uint64(55555)
tmpDir, err := os.MkdirTemp("", "test-*")
assert.NoError(t, err)
defer os.RemoveAll(tmpDir)

pc, err := newPersistentCache(ctx, tmpDir, rpcAddr, blockHeight)
assert.NoError(t, err)

stateObjectAddr := common.Address{0x55}
stateObjectData := &StateObject{
Nonce: rand.Uint64(),
}
// try reading from a state cache that doesnt exist
_, err = pc.GetStateObject(stateObjectAddr)
assert.Error(t, err)
assert.Equal(t, err, ErrCacheMiss)

// write the state cache, then make sure we can read it
err = pc.WriteStateObject(stateObjectAddr, *stateObjectData)
assert.NoError(t, err)

so, err := pc.GetStateObject(stateObjectAddr)
assert.NoError(t, err)
assert.Equal(t, *stateObjectData, *so)

// repeat the above for slots
stateSlotAddress := common.Hash{0x66, 0x01}
stateSlotData := common.Hash{0x81}

// try reading from a slot that doesnt exist
_, err = pc.GetSlotData(stateObjectAddr, stateSlotAddress)
assert.Error(t, err)
assert.Equal(t, err, ErrCacheMiss)

// write the slot, then make sure we can read it
err = pc.WriteSlotData(stateObjectAddr, stateSlotAddress, stateSlotData)
assert.NoError(t, err)

data, err := pc.GetSlotData(stateObjectAddr, stateSlotAddress)
assert.NoError(t, err)
assert.Equal(t, stateSlotData, data)

// now terminate our cache to test persistence
cancel()

ctx, cancel = context.WithCancel(context.Background())
defer cancel()
pc, err = newPersistentCache(ctx, tmpDir, rpcAddr, blockHeight)
assert.NoError(t, err)

// state cache matches
so, err = pc.GetStateObject(stateObjectAddr)
assert.NoError(t, err)
assert.Equal(t, *stateObjectData, *so)

// slot matches
data, err = pc.GetSlotData(stateObjectAddr, stateSlotAddress)
assert.NoError(t, err)
assert.Equal(t, stateSlotData, data)
}
26 changes: 26 additions & 0 deletions chain/state/cache/factory.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package cache

import (
"context"
"errors"
"os"
)

var _ StateCache = (*nonPersistentStateCache)(nil)
var _ StateCache = (*persistentCache)(nil)

var ErrCacheMiss = errors.New("not found in cache")

// NewPersistentCache creates a new set of persistent caches that will persist cache content to disk.
// Each cache is indexed by the RPC address (to separate network caches) and blockNum
func NewPersistentCache(ctx context.Context, rpcAddr string, height uint64) (StateCache, error) {
workingDir, err := os.Getwd()
if err != nil {
return nil, err
}
return newPersistentCache(ctx, workingDir, rpcAddr, height)
}

func NewNonPersistentCache() (StateCache, error) {
return newNonPersistentStateCache(), nil
}
67 changes: 67 additions & 0 deletions chain/state/cache/non_persistent_cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
package cache

import (
"github.com/ethereum/go-ethereum/common"
"sync"
)

// nonPersistentStateCache provides a thread-safe cache for storing state objects and slots without persisting to disk.
type nonPersistentStateCache struct {
stateObjectLock sync.RWMutex
stateObjectCache map[common.Address]*StateObject

slotLock sync.RWMutex
slotCache map[common.Address]map[common.Hash]common.Hash
}

func newNonPersistentStateCache() *nonPersistentStateCache {
return &nonPersistentStateCache{
stateObjectLock: sync.RWMutex{},
slotLock: sync.RWMutex{},
stateObjectCache: make(map[common.Address]*StateObject),
slotCache: make(map[common.Address]map[common.Hash]common.Hash),
}
}

// GetStateObject checks if the addr is present in the cache, and if not, returns an error
func (s *nonPersistentStateCache) GetStateObject(addr common.Address) (*StateObject, error) {
s.stateObjectLock.RLock()
defer s.stateObjectLock.RUnlock()

if obj, ok := s.stateObjectCache[addr]; !ok {
return nil, ErrCacheMiss
} else {
return obj, nil
}
}

func (s *nonPersistentStateCache) WriteStateObject(addr common.Address, data StateObject) error {
s.stateObjectLock.Lock()
defer s.stateObjectLock.Unlock()
s.stateObjectCache[addr] = &data
return nil
}

// GetSlotData checks if the specified data is stored in the cache, and if not, returns an error.
func (s *nonPersistentStateCache) GetSlotData(addr common.Address, slot common.Hash) (common.Hash, error) {
s.slotLock.RLock()
defer s.slotLock.RUnlock()
if slotLookup, ok := s.slotCache[addr]; ok {
if data, ok := slotLookup[slot]; ok {
return data, nil
}
}
return common.Hash{}, ErrCacheMiss
}

func (s *nonPersistentStateCache) WriteSlotData(addr common.Address, slot common.Hash, data common.Hash) error {
s.slotLock.Lock()
defer s.slotLock.Unlock()

if _, ok := s.slotCache[addr]; !ok {
s.slotCache[addr] = make(map[common.Hash]common.Hash)
}

s.slotCache[addr][slot] = data
return nil
}
Loading
Loading