Skip to content

Commit

Permalink
timer: drop HV interface, use this struct less often
Browse files Browse the repository at this point in the history
* HV is not a perfect name
* in most cases we can have simpler interface with one/two values
* the only problem is LastSeenMessage where we need to have this pair, so
  it's kept for this purpose only, but with a better name

Signed-off-by: Roman Khimov <[email protected]>
  • Loading branch information
roman-khimov committed Mar 9, 2024
1 parent 7e15b1f commit 262f817
Show file tree
Hide file tree
Showing 11 changed files with 87 additions and 130 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ Behaviour changes:
* drop outdated dBFT `Service` interface (#95)
* move all default implementations to `internal` package (#97)
* remove unused APIs of dBFT and payload interfaces (#104)
* timer interface refactoring (#105)

Improvements:
* add MIT License (#78, #79)
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ provided.
for main fields. `Transaction` is an entity which can be hashed. Two entities having
equal hashes are considered equal. No default implementation is provided.
5. `dbft` contains generic interfaces for payloads. No default implementation is provided.
6. `dbft` contains generic interfaces for time-related operations (`Timer` and `HV`). `timer` package contains
default time and height-view providers, it contains minimal required timer functionality and may safely be used in
production code. It should make it easier to write tests concerning dBFT's time depending behaviour.
6. `dbft` contains generic `Timer` interface for time-related operations. `timer` package contains
default `Timer` provider that can safely be used in production code. The interface itself
is mostly created for tests dealing with dBFT's time-dependant behaviour.
7. `internal` contains an example of custom identity types and payloads implementation used to implement
an example of dBFT's usage with 6-node consensus. Refer to `internal` subpackages for type-specific dBFT
implementation and tests. Refer to `internal/simulation` for an example of dBFT library usage.
Expand Down
11 changes: 0 additions & 11 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ type Config[H Hash] struct {
Logger *zap.Logger
// Timer
Timer Timer
// NewHeightView is a constructor for [dbft.HV] object.
NewHeightView func(height uint32, view byte) HV
// SecondsPerBlock is the number of seconds that
// need to pass before another block will be accepted.
SecondsPerBlock time.Duration
Expand Down Expand Up @@ -112,8 +110,6 @@ func checkConfig[H Hash](cfg *Config[H]) error {
return errors.New("private key is nil")
} else if cfg.Timer == nil {
return errors.New("Timer is nil")
} else if cfg.NewHeightView == nil {
return errors.New("NewHeightView is nil")
} else if cfg.CurrentHeight == nil {
return errors.New("CurrentHeight is nil")
} else if cfg.CurrentBlockHash == nil {
Expand Down Expand Up @@ -186,13 +182,6 @@ func WithTimer[H Hash](t Timer) func(config *Config[H]) {
}
}

// WithNewHeightView sets NewHeightView constructor.
func WithNewHeightView[H Hash](f func(height uint32, view byte) HV) func(config *Config[H]) {
return func(cfg *Config[H]) {
cfg.NewHeightView = f
}
}

// WithSecondsPerBlock sets SecondsPerBlock.
func WithSecondsPerBlock[H Hash](d time.Duration) func(config *Config[H]) {
return func(cfg *Config[H]) {
Expand Down
19 changes: 12 additions & 7 deletions context.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@ import (
"time"
)

// HeightView is a block height/consensus view pair.
type HeightView struct {
Height uint32
View byte
}

// Context is a main dBFT structure which
// contains all information needed for performing transitions.
type Context[H Hash] struct {
Expand Down Expand Up @@ -63,9 +69,9 @@ type Context[H Hash] struct {
ChangeViewPayloads []ConsensusPayload[H]
// LastChangeViewPayloads stores consensus ChangeView payloads for the last epoch.
LastChangeViewPayloads []ConsensusPayload[H]
// LastSeenMessage array stores the height of the last seen message, for each validator.
// if this node never heard from validator i, LastSeenMessage[i] will be -1.
LastSeenMessage []*HV
// LastSeenMessage array stores the height and view of the last seen message, for each validator.
// If this node never heard a thing from validator i, LastSeenMessage[i] will be nil.
LastSeenMessage []*HeightView

lastBlockTimestamp uint64 // ns-precision timestamp from the last header (used for the next block timestamp calculations).
lastBlockTime time.Time // Wall clock time of when the last block was first seen (used for timer adjustments).
Expand Down Expand Up @@ -118,7 +124,7 @@ func (c *Context[H]) CountCommitted() (count int) {
// for this view and that hasn't sent the Commit message at the previous views.
func (c *Context[H]) CountFailed() (count int) {
for i, hv := range c.LastSeenMessage {
if c.CommitPayloads[i] == nil && (hv == nil || (*hv).Height() < c.BlockIndex || (*hv).View() < c.ViewNumber) {
if c.CommitPayloads[i] == nil && (hv == nil || hv.Height < c.BlockIndex || hv.View < c.ViewNumber) {
count++
}
}
Expand Down Expand Up @@ -198,7 +204,7 @@ func (c *Context[H]) reset(view byte, ts uint64) {
c.LastChangeViewPayloads = make([]ConsensusPayload[H], n)

if c.LastSeenMessage == nil {
c.LastSeenMessage = make([]*HV, n)
c.LastSeenMessage = make([]*HeightView, n)
}
c.blockProcessed = false
} else {
Expand Down Expand Up @@ -231,8 +237,7 @@ func (c *Context[H]) reset(view byte, ts uint64) {
c.ViewNumber = view

if c.MyIndex >= 0 {
hv := c.Config.NewHeightView(c.BlockIndex, c.ViewNumber)
c.LastSeenMessage[c.MyIndex] = &hv
c.LastSeenMessage[c.MyIndex] = &HeightView{c.BlockIndex, c.ViewNumber}
}
}

Expand Down
19 changes: 9 additions & 10 deletions dbft.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,22 +168,22 @@ func (d *DBFT[H]) OnTransaction(tx Transaction[H]) {
}

// OnTimeout advances state machine as if timeout was fired.
func (d *DBFT[H]) OnTimeout(hv HV) {
func (d *DBFT[H]) OnTimeout(height uint32, view byte) {
if d.Context.WatchOnly() || d.BlockSent() {
return
}

if hv.Height() != d.BlockIndex || hv.View() != d.ViewNumber {
if height != d.BlockIndex || view != d.ViewNumber {
d.Logger.Debug("timeout: ignore old timer",
zap.Uint32("height", hv.Height()),
zap.Uint("view", uint(hv.View())))
zap.Uint32("height", height),
zap.Uint("view", uint(view)))

return
}

d.Logger.Debug("timeout",
zap.Uint32("height", hv.Height()),
zap.Uint("view", uint(hv.View())))
zap.Uint32("height", height),
zap.Uint("view", uint(view)))

if d.IsPrimary() && !d.RequestSentOrReceived() {
d.sendPrepareRequest()
Expand Down Expand Up @@ -236,9 +236,8 @@ func (d *DBFT[H]) OnReceive(msg ConsensusPayload[H]) {
}

hv := d.LastSeenMessage[msg.ValidatorIndex()]
if hv == nil || (*hv).Height() < msg.Height() || (*hv).View() < msg.ViewNumber() {
hv := d.Config.NewHeightView(msg.Height(), msg.ViewNumber())
d.LastSeenMessage[msg.ValidatorIndex()] = &hv
if hv == nil || hv.Height < msg.Height() || hv.View < msg.ViewNumber() {
d.LastSeenMessage[msg.ValidatorIndex()] = &HeightView{msg.Height(), msg.ViewNumber()}
}

if d.BlockSent() && msg.Type() != RecoveryRequestType {
Expand Down Expand Up @@ -609,7 +608,7 @@ func (d *DBFT[H]) changeTimer(delay time.Duration) {
zap.Uint32("h", d.BlockIndex),
zap.Int("v", int(d.ViewNumber)),
zap.Duration("delay", delay))
d.Timer.Reset(d.Config.NewHeightView(d.BlockIndex, d.ViewNumber), delay)
d.Timer.Reset(d.BlockIndex, d.ViewNumber, delay)
}

func (d *DBFT[H]) extendTimer(count time.Duration) {
Expand Down
38 changes: 15 additions & 23 deletions dbft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func TestDBFT_OnStartPrimarySendPrepareRequest(t *testing.T) {
require.EqualValues(t, 2, p.ValidatorIndex())

t.Run("primary send ChangeView on timeout", func(t *testing.T) {
service.OnTimeout(timer.HV{H: s.currHeight + 1})
service.OnTimeout(s.currHeight+1, 0)

// if there are many faulty must send RecoveryRequest
cv := s.tryRecv()
Expand All @@ -73,10 +73,9 @@ func TestDBFT_OnStartPrimarySendPrepareRequest(t *testing.T) {

// if all nodes are up must send ChangeView
for i := range service.LastSeenMessage {
var hv dbft.HV = timer.HV{H: s.currHeight + 1}
service.LastSeenMessage[i] = &hv
service.LastSeenMessage[i] = &dbft.HeightView{s.currHeight + 1, 0}
}
service.OnTimeout(timer.HV{H: s.currHeight + 1})
service.OnTimeout(s.currHeight+1, 0)

cv = s.tryRecv()
require.NotNil(t, cv)
Expand Down Expand Up @@ -167,8 +166,7 @@ func TestDBFT_OnReceiveRequestSendResponse(t *testing.T) {
service.Start(0)

for i := range service.LastSeenMessage {
var hv dbft.HV = timer.HV{H: s.currHeight + 1}
service.LastSeenMessage[i] = &hv
service.LastSeenMessage[i] = &dbft.HeightView{s.currHeight + 1, 0}
}

p := s.getPrepareRequest(5, txs[0].Hash())
Expand Down Expand Up @@ -305,10 +303,10 @@ func TestDBFT_OnReceiveCommit(t *testing.T) {
require.NoError(t, service.Header().Verify(pub, cm.GetCommit().Signature()))

t.Run("send recovery message on timeout", func(t *testing.T) {
service.OnTimeout(timer.HV{H: 1})
service.OnTimeout(1, 0)
require.Nil(t, s.tryRecv())

service.OnTimeout(timer.HV{H: s.currHeight + 1})
service.OnTimeout(s.currHeight+1, 0)

r := s.tryRecv()
require.NotNil(t, r)
Expand Down Expand Up @@ -396,13 +394,13 @@ func TestDBFT_OnReceiveChangeView(t *testing.T) {
service.OnReceive(resp)
require.Nil(t, s.tryRecv())

service.OnTimeout(timer.HV{H: s.currHeight + 1})
service.OnTimeout(s.currHeight+1, 0)
cv := s.tryRecv()
require.NotNil(t, cv)
require.Equal(t, dbft.ChangeViewType, cv.Type())

t.Run("primary sends prepare request after timeout", func(t *testing.T) {
service.OnTimeout(timer.HV{H: s.currHeight + 1, V: 1})
service.OnTimeout(s.currHeight+1, 1)
pr := s.tryRecv()
require.NotNil(t, pr)
require.Equal(t, dbft.PrepareRequestType, pr.Type())
Expand All @@ -425,11 +423,6 @@ func TestDBFT_Invalid(t *testing.T) {
})

opts = append(opts, dbft.WithTimer[crypto.Uint256](timer.New()))
t.Run("without NewHeightView", func(t *testing.T) {
require.Nil(t, dbft.New(opts...))
})

opts = append(opts, dbft.WithNewHeightView[crypto.Uint256](timer.NewHV))
t.Run("without CurrentHeight", func(t *testing.T) {
require.Nil(t, dbft.New(opts...))
})
Expand Down Expand Up @@ -582,7 +575,7 @@ func TestDBFT_FourGoodNodesDeadlock(t *testing.T) {
// (possible on timeout) and sends the ChangeView message.
s3.OnReceive(resp0V0)
s3.OnReceive(resp2V0)
s3.OnTimeout(timer.HV{H: r3.currHeight + 1, V: 0})
s3.OnTimeout(r3.currHeight+1, 0)
cv3V0 := r3.tryRecv()
require.NotNil(t, cv3V0)
require.Equal(t, dbft.ChangeViewType, cv3V0.Type())
Expand All @@ -592,7 +585,7 @@ func TestDBFT_FourGoodNodesDeadlock(t *testing.T) {
// current view) and sends the ChangeView message.
s1.OnReceive(resp0V0)
s1.OnReceive(cv3V0)
s1.OnTimeout(timer.HV{H: r1.currHeight + 1, V: 0})
s1.OnTimeout(r1.currHeight+1, 0)
cv1V0 := r1.tryRecv()
require.NotNil(t, cv1V0)
require.Equal(t, dbft.ChangeViewType, cv1V0.Type())
Expand All @@ -601,7 +594,7 @@ func TestDBFT_FourGoodNodesDeadlock(t *testing.T) {
// (possible on timeout after receiving at least M non-commit messages from the
// current view) and sends the ChangeView message.
s0.OnReceive(cv3V0)
s0.OnTimeout(timer.HV{H: r0.currHeight + 1, V: 0})
s0.OnTimeout(r0.currHeight+1, 0)
cv0V0 := r0.tryRecv()
require.NotNil(t, cv0V0)
require.Equal(t, dbft.ChangeViewType, cv0V0.Type())
Expand All @@ -617,7 +610,7 @@ func TestDBFT_FourGoodNodesDeadlock(t *testing.T) {
require.Equal(t, uint8(1), s0.ViewNumber)

// Step 10. The primary (at view 1) replica 0 sends the PrepareRequest message.
s0.OnTimeout(timer.HV{H: r0.currHeight + 1, V: 1})
s0.OnTimeout(r0.currHeight+1, 1)
reqV1 := r0.tryRecv()
require.NotNil(t, reqV1)
require.Equal(t, dbft.PrepareRequestType, reqV1.Type())
Expand All @@ -640,7 +633,7 @@ func TestDBFT_FourGoodNodesDeadlock(t *testing.T) {
// Intermediate step A. It is added to make step 14 possible. The backup (at
// view 1) replica 3 doesn't receive anything for a long time and sends
// RecoveryRequest.
s3.OnTimeout(timer.HV{H: r3.currHeight + 1, V: 1})
s3.OnTimeout(r3.currHeight+1, 1)
rcvr3V1 := r3.tryRecv()
require.NotNil(t, rcvr3V1)
require.Equal(t, dbft.RecoveryRequestType, rcvr3V1.Type())
Expand Down Expand Up @@ -675,15 +668,15 @@ func TestDBFT_FourGoodNodesDeadlock(t *testing.T) {
// of "lost" nodes. That's why we'he added Intermediate steps A and B.
//
// After that replica 1 is allowed to send the CV message.
s1.OnTimeout(timer.HV{H: r1.currHeight + 1, V: 1})
s1.OnTimeout(r1.currHeight+1, 1)
cv1V1 := r1.tryRecv()
require.NotNil(t, cv1V1)
require.Equal(t, dbft.ChangeViewType, cv1V1.Type())

// Step 13. The primary (at view 1) replica 0 decides to change its view
// (possible on timeout) and sends the ChangeView message.
s0.OnReceive(resp1V1)
s0.OnTimeout(timer.HV{H: r0.currHeight + 1, V: 1})
s0.OnTimeout(r0.currHeight+1, 1)
cv0V1 := r0.tryRecv()
require.NotNil(t, cv0V1)
require.Equal(t, dbft.ChangeViewType, cv0V1.Type())
Expand Down Expand Up @@ -819,7 +812,6 @@ func (s testState) copyWithIndex(myIndex int) *testState {
func (s *testState) getOptions() []func(*dbft.Config[crypto.Uint256]) {
opts := []func(*dbft.Config[crypto.Uint256]){
dbft.WithTimer[crypto.Uint256](timer.New()),
dbft.WithNewHeightView[crypto.Uint256](timer.NewHV),
dbft.WithCurrentHeight[crypto.Uint256](func() uint32 { return s.currHeight }),
dbft.WithCurrentBlockHash[crypto.Uint256](func() crypto.Uint256 { return s.currHash }),
dbft.WithGetValidators[crypto.Uint256](func(...dbft.Transaction[crypto.Uint256]) []dbft.PublicKey { return s.pubs }),
Expand Down
1 change: 0 additions & 1 deletion internal/consensus/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ func New(logger *zap.Logger, key dbft.PrivateKey, pub dbft.PublicKey,
verifyPayload func(consensusPayload dbft.ConsensusPayload[crypto.Uint256]) error) *dbft.DBFT[crypto.Uint256] {
return dbft.New[crypto.Uint256](
dbft.WithTimer[crypto.Uint256](timer.New()),
dbft.WithNewHeightView[crypto.Uint256](timer.NewHV),
dbft.WithLogger[crypto.Uint256](logger),
dbft.WithSecondsPerBlock[crypto.Uint256](time.Second*5),
dbft.WithKeyPair[crypto.Uint256](key, pub),
Expand Down
2 changes: 1 addition & 1 deletion internal/simulation/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (n *simNode) Run(ctx context.Context) {
n.log.Info("context cancelled")
return
case <-n.d.Timer.C():
n.d.OnTimeout(n.d.Timer.HV())
n.d.OnTimeout(n.d.Timer.Height(), n.d.Timer.View())

Check warning on line 97 in internal/simulation/main.go

View check run for this annotation

Codecov / codecov/patch

internal/simulation/main.go#L97

Added line #L97 was not covered by tests
case msg := <-n.messages:
n.d.OnReceive(msg)
}
Expand Down
14 changes: 5 additions & 9 deletions timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,17 @@ type Timer interface {
// Now returns current time.
Now() time.Time
// Reset resets timer to the specified block height and view.
Reset(hv HV, d time.Duration)
Reset(height uint32, view byte, d time.Duration)
// Sleep stops execution for duration d.
Sleep(d time.Duration)
// Extend extends current timer with duration d.
Extend(d time.Duration)
// Stop stops timer.
Stop()
// HV returns current height and view set for the timer.
HV() HV
// C returns channel for timer events.
C() <-chan time.Time
}

// HV is an abstraction for pair of a Height and a View.
type HV interface {
// Height returns current height set for the timer.
Height() uint32
// View returns current view set for the timer.
View() byte
// C returns channel for timer events.
C() <-chan time.Time
}
Loading

0 comments on commit 262f817

Please sign in to comment.