Skip to content

Commit

Permalink
feat: follower node sync from DA (#1098)
Browse files Browse the repository at this point in the history
* port changes from #1013

* port changes from #1068

* go.mod tidy

* fix compile error

* fix goimports

* fix log

* address review comments

* upgrade golang.org/x/net to 0.23.0

* bump version

* remove unused flag

* update da-codec commit

---------

Co-authored-by: Péter Garamvölgyi <[email protected]>
  • Loading branch information
jonastheis and Thegaram authored Dec 18, 2024
1 parent bdf64cf commit ac8164f
Show file tree
Hide file tree
Showing 47 changed files with 2,653 additions and 468 deletions.
4 changes: 4 additions & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,10 @@ var (
utils.CircuitCapacityCheckWorkersFlag,
utils.RollupVerifyEnabledFlag,
utils.ShadowforkPeersFlag,
utils.DASyncEnabledFlag,
utils.DABlockNativeAPIEndpointFlag,
utils.DABlobScanAPIEndpointFlag,
utils.DABeaconNodeAPIEndpointFlag,
}

rpcFlags = []cli.Flag{
Expand Down
38 changes: 38 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -875,6 +875,24 @@ var (
Name: "net.shadowforkpeers",
Usage: "peer ids of shadow fork peers",
}

// DA syncing settings
DASyncEnabledFlag = &cli.BoolFlag{
Name: "da.sync",
Usage: "Enable node syncing from DA",
}
DABlobScanAPIEndpointFlag = &cli.StringFlag{
Name: "da.blob.blobscan",
Usage: "BlobScan blob API endpoint",
}
DABlockNativeAPIEndpointFlag = &cli.StringFlag{
Name: "da.blob.blocknative",
Usage: "BlockNative blob API endpoint",
}
DABeaconNodeAPIEndpointFlag = &cli.StringFlag{
Name: "da.blob.beaconnode",
Usage: "Beacon node API endpoint",
}
)

// MakeDataDir retrieves the currently requested data directory, terminating
Expand Down Expand Up @@ -1319,6 +1337,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setSmartCard(ctx, cfg)
setL1(ctx, cfg)

if ctx.IsSet(DASyncEnabledFlag.Name) {
cfg.DaSyncingEnabled = ctx.Bool(DASyncEnabledFlag.Name)
}

if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
}
Expand Down Expand Up @@ -1604,6 +1626,21 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) {
}
}

func setDA(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(DASyncEnabledFlag.Name) {
cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name)
if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) {
cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name)
}
if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) {
cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name)
}
if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) {
cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name)
}
}
}

func setMaxBlockRange(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(MaxBlockRangeFlag.Name) {
cfg.MaxBlockRange = ctx.GlobalInt64(MaxBlockRangeFlag.Name)
Expand Down Expand Up @@ -1679,6 +1716,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
setLes(ctx, cfg)
setCircuitCapacityCheck(ctx, cfg)
setEnableRollupVerify(ctx, cfg)
setDA(ctx, cfg)
setMaxBlockRange(ctx, cfg)
if ctx.GlobalIsSet(ShadowforkPeersFlag.Name) {
cfg.ShadowForkPeerIDs = ctx.GlobalStringSlice(ShadowforkPeersFlag.Name)
Expand Down
51 changes: 51 additions & 0 deletions common/backoff/exponential.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
package backoff

import (
"math"
"math/rand"
"time"
)

// Exponential is a backoff strategy that increases the delay between retries exponentially.
type Exponential struct {
attempt int

maxJitter time.Duration

min time.Duration
max time.Duration
}

func NewExponential(minimum, maximum, maxJitter time.Duration) *Exponential {
return &Exponential{
min: minimum,
max: maximum,
maxJitter: maxJitter,
}
}

func (e *Exponential) NextDuration() time.Duration {
var jitter time.Duration
if e.maxJitter > 0 {
jitter = time.Duration(rand.Int63n(e.maxJitter.Nanoseconds()))
}

minFloat := float64(e.min)
duration := math.Pow(2, float64(e.attempt)) * minFloat

// limit at configured maximum
if duration > float64(e.max) {
duration = float64(e.max)
}

e.attempt++
return time.Duration(duration) + jitter
}

func (e *Exponential) Reset() {
e.attempt = 0
}

func (e *Exponential) Attempt() int {
return e.attempt
}
39 changes: 39 additions & 0 deletions common/backoff/exponential_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package backoff

import (
"testing"
"time"

"github.com/stretchr/testify/require"
)

func TestExponentialBackoff(t *testing.T) {
t.Run("Multiple attempts", func(t *testing.T) {
e := NewExponential(100*time.Millisecond, 10*time.Second, 0)
expectedDurations := []time.Duration{
100 * time.Millisecond,
200 * time.Millisecond,
400 * time.Millisecond,
800 * time.Millisecond,
1600 * time.Millisecond,
3200 * time.Millisecond,
6400 * time.Millisecond,
10 * time.Second, // capped at max
}
for i, expected := range expectedDurations {
require.Equal(t, expected, e.NextDuration(), "attempt %d", i)
}
})

t.Run("Jitter added", func(t *testing.T) {
e := NewExponential(1*time.Second, 10*time.Second, 1*time.Second)
duration := e.NextDuration()
require.GreaterOrEqual(t, duration, 1*time.Second)
require.Less(t, duration, 2*time.Second)
})

t.Run("Edge case: min > max", func(t *testing.T) {
e := NewExponential(10*time.Second, 5*time.Second, 0)
require.Equal(t, 5*time.Second, e.NextDuration())
})
}
109 changes: 109 additions & 0 deletions common/heap.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
package common

import (
"container/heap"
)

// Heap is a generic min-heap (or max-heap, depending on Comparable behavior) implementation.
type Heap[T Comparable[T]] struct {
heap innerHeap[T]
}

func NewHeap[T Comparable[T]]() *Heap[T] {
return &Heap[T]{
heap: make(innerHeap[T], 0),
}
}

func (h *Heap[T]) Len() int {
return len(h.heap)
}

func (h *Heap[T]) Push(element T) *HeapElement[T] {
heapElement := NewHeapElement(element)
heap.Push(&h.heap, heapElement)

return heapElement
}

func (h *Heap[T]) Pop() *HeapElement[T] {
return heap.Pop(&h.heap).(*HeapElement[T])
}

func (h *Heap[T]) Peek() *HeapElement[T] {
if h.Len() == 0 {
return nil
}

return h.heap[0]
}

func (h *Heap[T]) Remove(element *HeapElement[T]) {
heap.Remove(&h.heap, element.index)
}

func (h *Heap[T]) Clear() {
h.heap = make(innerHeap[T], 0)
}

type innerHeap[T Comparable[T]] []*HeapElement[T]

func (h innerHeap[T]) Len() int {
return len(h)
}

func (h innerHeap[T]) Less(i, j int) bool {
return h[i].Value().CompareTo(h[j].Value()) < 0
}

func (h innerHeap[T]) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].index, h[j].index = i, j
}

func (h *innerHeap[T]) Push(x interface{}) {
data := x.(*HeapElement[T])
*h = append(*h, data)
data.index = len(*h) - 1
}

func (h *innerHeap[T]) Pop() interface{} {
n := len(*h)
element := (*h)[n-1]
(*h)[n-1] = nil // avoid memory leak
*h = (*h)[:n-1]
element.index = -1

return element
}

// Comparable is an interface for types that can be compared.
type Comparable[T any] interface {
// CompareTo compares x with other.
// To create a min heap, return:
// -1 if x < other
// 0 if x == other
// +1 if x > other
// To create a max heap, return the opposite.
CompareTo(other T) int
}

// HeapElement is a wrapper around the value stored in the heap.
type HeapElement[T Comparable[T]] struct {
value T
index int
}

func NewHeapElement[T Comparable[T]](value T) *HeapElement[T] {
return &HeapElement[T]{
value: value,
}
}

func (h *HeapElement[T]) Value() T {
return h.value
}

func (h *HeapElement[T]) Index() int {
return h.index
}
40 changes: 40 additions & 0 deletions common/heap_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
package common

import (
"testing"

"github.com/stretchr/testify/require"
)

type Int int

func (i Int) CompareTo(other Int) int {
if i < other {
return -1
} else if i > other {
return 1
} else {
return 0
}
}

func TestHeap(t *testing.T) {
h := NewHeap[Int]()

require.Equal(t, 0, h.Len(), "Heap should be empty initially")

h.Push(Int(3))
h.Push(Int(1))
h.Push(Int(2))

require.Equal(t, 3, h.Len(), "Heap should have three elements after pushing")

require.EqualValues(t, 1, h.Pop(), "Pop should return the smallest element")
require.Equal(t, 2, h.Len(), "Heap should have two elements after popping")

require.EqualValues(t, 2, h.Pop(), "Pop should return the next smallest element")
require.Equal(t, 1, h.Len(), "Heap should have one element after popping")

require.EqualValues(t, 3, h.Pop(), "Pop should return the last element")
require.Equal(t, 0, h.Len(), "Heap should be empty after popping all elements")
}
71 changes: 71 additions & 0 deletions common/shrinkingmap.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package common

// ShrinkingMap is a map that shrinks itself (by allocating a new map) after a certain number of deletions have been performed.
// If shrinkAfterDeletionsCount is set to <=0, the map will never shrink.
// This is useful to prevent memory leaks in long-running processes that delete a lot of keys from a map.
// See here for more details: https://github.com/golang/go/issues/20135
type ShrinkingMap[K comparable, V any] struct {
m map[K]V
deletedKeys int

shrinkAfterDeletionsCount int
}

func NewShrinkingMap[K comparable, V any](shrinkAfterDeletionsCount int) *ShrinkingMap[K, V] {
return &ShrinkingMap[K, V]{
m: make(map[K]V),
shrinkAfterDeletionsCount: shrinkAfterDeletionsCount,
}
}

func (s *ShrinkingMap[K, V]) Set(key K, value V) {
s.m[key] = value
}

func (s *ShrinkingMap[K, V]) Get(key K) (value V, exists bool) {
value, exists = s.m[key]
return value, exists
}

func (s *ShrinkingMap[K, V]) Has(key K) bool {
_, exists := s.m[key]
return exists
}

func (s *ShrinkingMap[K, V]) Delete(key K) (deleted bool) {
if _, exists := s.m[key]; !exists {
return false
}

delete(s.m, key)
s.deletedKeys++

if s.shouldShrink() {
s.shrink()
}

return true
}

func (s *ShrinkingMap[K, V]) Size() (size int) {
return len(s.m)
}

func (s *ShrinkingMap[K, V]) Clear() {
s.m = make(map[K]V)
s.deletedKeys = 0
}

func (s *ShrinkingMap[K, V]) shouldShrink() bool {
return s.shrinkAfterDeletionsCount > 0 && s.deletedKeys >= s.shrinkAfterDeletionsCount
}

func (s *ShrinkingMap[K, V]) shrink() {
newMap := make(map[K]V, len(s.m))
for k, v := range s.m {
newMap[k] = v
}

s.m = newMap
s.deletedKeys = 0
}
Loading

0 comments on commit ac8164f

Please sign in to comment.