Skip to content

Commit

Permalink
Advance MBT prototype
Browse files Browse the repository at this point in the history
  • Loading branch information
p-offtermatt committed Nov 24, 2023
1 parent c7cba8b commit 2d130df
Show file tree
Hide file tree
Showing 6 changed files with 78 additions and 62 deletions.
20 changes: 11 additions & 9 deletions tests/difference/core/quint_model/driver/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type ModelParams struct {
CcvTimeout map[ChainId]time.Duration
UnbondingPeriodPerChain map[ChainId]time.Duration
TrustingPeriodPerChain map[ChainId]time.Duration
MaxClockDrift time.Duration
}

type Driver struct {
Expand Down Expand Up @@ -147,10 +148,13 @@ func (s *Driver) consumerPower(i int64, chain ChainId) (int64, error) {

// consumerTokens returns the number of tokens that the validator with
// id (ix) i has delegated to it in total on the provider chain
func (s *Driver) providerPower(i int64) int64 {
func (s *Driver) providerPower(i int64) (int64, error) {
v, found := s.providerStakingKeeper().GetValidator(s.ctx(P), s.validator(i))
require.True(s.t, found, "GetValidator(%v) -> !found", s.validator(i))
return v.BondedTokens().Int64()
if !found {
return 0, fmt.Errorf("Validator with id %v not found on provider!", i)
} else {
return v.BondedTokens().Int64(), nil
}
}

// delegation returns the number of delegated tokens in the delegation from
Expand Down Expand Up @@ -179,7 +183,7 @@ func (s *Driver) providerTokens(i int64) int64 {
return v.Tokens.Int64()
}

func (s *Driver) providerValidatorSet(chain ChainId) []stakingtypes.Validator {
func (s *Driver) providerValidatorSet() []stakingtypes.Validator {
return s.providerStakingKeeper().GetAllValidators(s.ctx(P))
}

Expand Down Expand Up @@ -239,14 +243,12 @@ func (s *Driver) consumerSlash(val sdk.ConsAddress, h int64, isDowntime bool, ch
}
}

func (s *Driver) updateClient(chain ChainId) {
s.path(chain).UpdateClient(string(chain))
func (s *Driver) updateClient(chain ChainId) error {
return s.path(chain).UpdateClient(string(chain), false)
}

// deliver numPackets packets from the network to chain
func (s *Driver) deliver(chain ChainId, numPackets int) {
// Makes sure client is updated
s.updateClient(chain)
// Deliver any outstanding acks
s.path(chain).DeliverAcks(string(chain), 999999)
// Consume deliverable packets from the network
Expand Down Expand Up @@ -347,7 +349,7 @@ func (s *Driver) getChainStateString(chain ChainId) string {
for index, valName := range s.valNames {
var power int64
if s.isProviderChain(chain) {
power = s.providerPower(int64(index))
power, _ = s.providerPower(int64(index))
} else {
power, _ = s.consumerPower(int64(index), chain)
}
Expand Down
87 changes: 46 additions & 41 deletions tests/difference/core/quint_model/driver/mbt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ import (

cmttypes "github.com/cometbft/cometbft/types"
sdktypes "github.com/cosmos/cosmos-sdk/types"
ibctesting "github.com/cosmos/ibc-go/v7/testing"
providertypes "github.com/cosmos/interchain-security/v3/x/ccv/provider/types"
"github.com/kylelemons/godebug/pretty"

"github.com/informalsystems/itf-go/itf"
"github.com/stretchr/testify/require"
Expand All @@ -27,6 +29,8 @@ func TestMBT(t *testing.T) {

t.Log("Running traces from the traces folder")

ibctesting.TimeIncrement = 1 * time.Nanosecond

for _, dirEntry := range dirEntries {
t.Log("Running trace ", dirEntry.Name())
RunItfTrace(t, "traces/"+dirEntry.Name())
Expand Down Expand Up @@ -129,13 +133,15 @@ func RunItfTrace(t *testing.T, path string) {
t.Log("Reading the trace...")

for index, state := range trace.States {
t.Logf("Reading state %v", index)
t.Logf("Reading state %v of trace %v", index, path)

// modelState := state.VarValues["currentState"]
trace := state.VarValues["trace"].Value.(itf.ListExprType)
// fmt.Println(modelState)
lastAction := trace[len(trace)-1].Value.(itf.MapExprType)

currentModelState := state.VarValues["currentState"].Value.(itf.MapExprType)

actionKind := lastAction["kind"].Value.(string)
switch actionKind {
case "init":
Expand Down Expand Up @@ -165,15 +171,18 @@ func RunItfTrace(t *testing.T, path string) {
// needs a header of height H+1 to accept the packet
// so we do one time advancement with a very small increment,
// and then increment the rest of the time]

// TODO: start and stop consumers
driver.endAndBeginBlock("provider", time.Duration(timeAdvancement)*time.Second-1*time.Nanosecond)
driver.endAndBeginBlock("provider", 1*time.Nanosecond)
driver.endAndBeginBlock("provider", time.Duration(timeAdvancement)*time.Second-1*time.Nanosecond)

// save the last timestamp for each chain,
// save the last timestamp for runningConsumers,
// because setting up chains will modify timestamps
// when the coordinator is starting chains
lastTimestamps := driver.allTimes()
lastTimestamps := make(map[ChainId]time.Time, len(consumers))
for _, consumer := range driver.runningConsumers() {
lastTimestamps[ChainId(consumer.ChainId)] = driver.time(ChainId(consumer.ChainId))
}

driver.coordinator.CurrentTime = driver.time("provider")
// start consumers
for _, consumer := range consumersToStart {
driver.setupConsumer(
Expand All @@ -185,7 +194,6 @@ func RunItfTrace(t *testing.T, path string) {
valNames,
driver.providerChain(),
)
lastTimestamps[ChainId(consumer.Value.(string))] = driver.time(ChainId(consumer.Value.(string)))
}

// stop consumers
Expand All @@ -194,40 +202,23 @@ func RunItfTrace(t *testing.T, path string) {
driver.stopConsumer(consumerChain)

// remove the consumer from timestamps map and time offsets
delete(lastTimestamps, consumerChain)
delete(timeOffsets, consumerChain)
}

// remove the stopped consumers from the timestamps
for _, consumer := range consumersToStop {
consumerChain := ChainId(consumer.Value.(string))
delete(lastTimestamps, consumerChain)
delete(timeOffsets, consumerChain)
}

// restore the old timestamps, but increment by 1 nanosecond to avoid duplicate timestamps in headers
for chain, oldTimestamp := range lastTimestamps {
// if the chain was stopped, we don't need to restore the timestamp
driver.setTime(ChainId(chain), oldTimestamp.Add(1*time.Nanosecond))
// also set the offset for each chain correctly - offset is incremented by 1 nanosecond
timeOffsets[ChainId(chain)] = timeOffsets[ChainId(chain)].Add(1 * time.Nanosecond)
// reset the times for the consumers that were not stopped or started just now
// their times were messed up by the coordinator
for consumer, timestamp := range lastTimestamps {
driver.setTime(consumer, timestamp)
}

// set the timeOffsets for the newly started chains
for _, consumer := range consumersToStart {
consumerChain := ChainId(consumer.Value.(string))
timeOffsets[consumerChain] = driver.time(consumerChain)
}

// need to produce another block to fix messed up times because the provider comitted blocks with
// times that were fixed by the coordinator
driver.endAndBeginBlock("provider", 0)
providerHeader := driver.providerChain().LastHeader

// same for the consumers that were started
for _, consumer := range consumersToStart {
consumerChainId := string(consumer.Value.(string))
driver.endAndBeginBlock(ChainId(consumerChainId), 0)
// the time offset is the time of the provider,
// because the model sets the timestamp on the consumer to the provider time
// when the consumer is started
timeOffsets[consumerChain] = timeOffsets["provider"]
}

// for all connected consumers, update the clients...
Expand All @@ -238,8 +229,14 @@ func RunItfTrace(t *testing.T, path string) {
}
consumerChainId := string(consumer.ChainId)

driver.path(ChainId(consumerChainId)).AddClientHeader(Provider, providerHeader)
driver.path(ChainId(consumerChainId)).UpdateClient(consumerChainId)
driver.path(ChainId(consumerChainId)).AddClientHeader(Provider, driver.providerHeader())
var err error
if ConsumerStatus(currentModelState, consumerChainId) == "expired" {
err = driver.path(ChainId(consumerChainId)).UpdateClient(consumerChainId, true)
} else {
err = driver.path(ChainId(consumerChainId)).UpdateClient(consumerChainId, false)
}
require.True(t, err == nil, "Error updating client from %v on provider: %v", consumerChainId, err)
}

case "EndAndBeginBlockForConsumer":
Expand All @@ -252,13 +249,20 @@ func RunItfTrace(t *testing.T, path string) {
headerBefore := driver.chain(ChainId(consumerChain)).LastHeader
_ = headerBefore

driver.endAndBeginBlock(ChainId(consumerChain), time.Duration(timeAdvancement)*time.Second-1*time.Nanosecond)
driver.endAndBeginBlock(ChainId(consumerChain), 1*time.Nanosecond)
driver.endAndBeginBlock(ChainId(consumerChain), time.Duration(timeAdvancement)*time.Second-1*time.Nanosecond)

// update the client on the provider
consumerHeader := driver.chain(ChainId(consumerChain)).LastHeader
driver.path(ChainId(consumerChain)).AddClientHeader(consumerChain, consumerHeader)
driver.path(ChainId(consumerChain)).UpdateClient(Provider)
var err error
if LocalClientExpired(currentModelState, consumerChain) {
err = driver.path(ChainId(consumerChain)).UpdateClient(Provider, true)
} else {
err = driver.path(ChainId(consumerChain)).UpdateClient(Provider, false)
}
require.True(t, err == nil, "Error updating client from %v on provider: %v", consumerChain, err)

case "DeliverVscPacket":
consumerChain := lastAction["consumerChain"].Value.(string)
t.Log("DeliverVscPacket", consumerChain)
Expand All @@ -282,7 +286,6 @@ func RunItfTrace(t *testing.T, path string) {
t.Logf("Current actual state: %s", driver.getStateString())
}

currentModelState := state.VarValues["currentState"].Value.(itf.MapExprType)
// check that the actual state is the same as the model state

// compare the running consumers
Expand Down Expand Up @@ -326,7 +329,7 @@ func RunItfTrace(t *testing.T, path string) {

func CompareValidatorSets(t *testing.T, driver *Driver, currentModelState map[string]itf.Expr, consumers []string, index int) {
modelValSet := ValidatorSet(currentModelState, "provider")
curValSet := driver.providerValidatorSet("provider")
curValSet := driver.providerValidatorSet()

actualValSet := make(map[string]int64, len(curValSet))

Expand Down Expand Up @@ -395,6 +398,8 @@ func ComparePacketQueue(t *testing.T, driver *Driver, currentModelState map[stri
// CompareTimes compares the block times in the model to the block times in the system.
// It takes into account the timeOffsets, which should be the starting times
// of the chains in the system after the system has been initialized.
// We only compare down to seconds, because the model and system will differ
// on the order of nanoseconds.
func CompareTimes(
t *testing.T,
driver *Driver,
Expand All @@ -409,7 +414,7 @@ func CompareTimes(
require.Equal(t,
providerLastTimestamp,
actualProviderTime.Unix()-providerOffset.Unix(),
"Block times do not match")
"Block times do not match for provider")

for _, chain := range consumers {
modelLastTimestamp := Time(currentModelState, chain)
Expand All @@ -418,7 +423,7 @@ func CompareTimes(
require.Equal(t,
modelLastTimestamp,
actualChainTime.Unix()-timeOffsets[ChainId(chain)].Unix(),
"Block times do not match")
"Block times do not match for chain %v", chain)
}
}

Expand All @@ -445,7 +450,7 @@ func CompareValSet(modelValSet map[string]itf.Expr, systemValSet map[string]int6
}

if !reflect.DeepEqual(expectedValSet, actualValSet) {
return fmt.Errorf("Model validator set %v, system validator set %v", expectedValSet, actualValSet)
return fmt.Errorf("Validator sets do not match: (+ expected, - actual): %v", pretty.Compare(expectedValSet, actualValSet))
}
return nil
}
8 changes: 8 additions & 0 deletions tests/difference/core/quint_model/driver/model_viewer.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,11 @@ func RunningConsumers(curStateExpr itf.MapExprType) []string {
}
return runningConsumers
}

func ConsumerStatus(curStateExpr itf.MapExprType, consumer string) string {
return ProviderState(curStateExpr)["consumerStatus"].Value.(itf.MapExprType)[consumer].Value.(string)
}

func LocalClientExpired(curStateExpr itf.MapExprType, consumer string) bool {
return ConsumerState(curStateExpr, consumer)["localClientExpired"].Value.(bool)
}
8 changes: 4 additions & 4 deletions tests/difference/core/quint_model/driver/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ func (s *Driver) ConfigureNewPath(consumerChain *ibctesting.TestChain, providerC
tmCfg := providerEndPoint.ClientConfig.(*ibctesting.TendermintConfig)
tmCfg.UnbondingPeriod = params.UnbondingPeriodPerChain[ChainId(providerChain.ChainID)]
tmCfg.TrustingPeriod = params.TrustingPeriodPerChain[ChainId(providerChain.ChainID)]
tmCfg.MaxClockDrift = params.TrustingPeriodPerChain[ChainId(providerChain.ChainID)] // make the clock drift a non-issue
tmCfg.MaxClockDrift = params.TrustingPeriodPerChain[ChainId(providerChain.ChainID)] * 5 // make the clock drift a non-issue
err := providerEndPoint.CreateClient()
require.NoError(s.t, err, "Error creating client on provider for chain %v", consumerChain.ChainID)

Expand All @@ -352,7 +352,7 @@ func (s *Driver) ConfigureNewPath(consumerChain *ibctesting.TestChain, providerC
tmCfg = consumerEndPoint.ClientConfig.(*ibctesting.TendermintConfig)
tmCfg.UnbondingPeriod = params.UnbondingPeriodPerChain[consumerChainId]
tmCfg.TrustingPeriod = params.TrustingPeriodPerChain[consumerChainId]
tmCfg.MaxClockDrift = params.TrustingPeriodPerChain[ChainId(providerChain.ChainID)] // make the clock drift a non-issue
tmCfg.MaxClockDrift = params.TrustingPeriodPerChain[ChainId(providerChain.ChainID)] * 5 // make the clock drift a non-issue

consumerClientState := ibctmtypes.NewClientState(
providerChain.ChainID, tmCfg.TrustLevel, tmCfg.TrustingPeriod, tmCfg.UnbondingPeriod, tmCfg.MaxClockDrift,
Expand Down Expand Up @@ -390,9 +390,9 @@ func (s *Driver) ConfigureNewPath(consumerChain *ibctesting.TestChain, providerC
simibc.BeginBlock(consumerChain, 5)

// Update clients to the latest header.
err = simibc.UpdateReceiverClient(consumerEndPoint, providerEndPoint, lastConsumerHeader)
err = simibc.UpdateReceiverClient(consumerEndPoint, providerEndPoint, lastConsumerHeader, false)
require.NoError(s.t, err, "Error updating client on consumer for chain %v", consumerChain.ChainID)
err = simibc.UpdateReceiverClient(providerEndPoint, consumerEndPoint, lastProviderHeader)
err = simibc.UpdateReceiverClient(providerEndPoint, consumerEndPoint, lastProviderHeader, false)
require.NoError(s.t, err, "Error updating client on provider for chain %v", consumerChain.ChainID)

// path is ready to go
Expand Down
10 changes: 5 additions & 5 deletions testutil/simibc/relay_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
ibctesting "github.com/cosmos/ibc-go/v7/testing"
simapp "github.com/cosmos/ibc-go/v7/testing/simapp"
"github.com/stretchr/testify/require"

errorsmod "cosmossdk.io/errors"

Expand All @@ -23,7 +22,7 @@ import (
// must have a client of the sender chain that it can update.
//
// NOTE: this function MAY be used independently of the rest of simibc.
func UpdateReceiverClient(sender, receiver *ibctesting.Endpoint, header *ibctmtypes.Header) (err error) {
func UpdateReceiverClient(sender, receiver *ibctesting.Endpoint, header *ibctmtypes.Header, expectExpiration bool) (err error) {
err = augmentHeader(sender.Chain, receiver.Chain, receiver.ClientID, header)

if err != nil {
Expand All @@ -34,8 +33,9 @@ func UpdateReceiverClient(sender, receiver *ibctesting.Endpoint, header *ibctmty
receiver.ClientID, header,
receiver.Chain.SenderAccount.GetAddress().String(),
)

require.NoError(receiver.Chain.T, err)
if err != nil {
return err
}

_, _, err = simapp.SignAndDeliver(
receiver.Chain.T,
Expand All @@ -46,7 +46,7 @@ func UpdateReceiverClient(sender, receiver *ibctesting.Endpoint, header *ibctmty
receiver.Chain.ChainID,
[]uint64{receiver.Chain.SenderAccount.GetAccountNumber()},
[]uint64{receiver.Chain.SenderAccount.GetSequence()},
true, true, receiver.Chain.SenderPrivKey,
true, !expectExpiration, receiver.Chain.SenderPrivKey,
)

if err != nil {
Expand Down
7 changes: 4 additions & 3 deletions testutil/simibc/relayed_path.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,15 @@ func (f *RelayedPath) InvolvesChain(chainID string) bool {
// UpdateClient updates the chain with the latest sequence
// of available headers committed by the counterparty chain since
// the last call to UpdateClient (or all for the first call).
func (f *RelayedPath) UpdateClient(chainID string) {
func (f *RelayedPath) UpdateClient(chainID string, expectExpiration bool) error {
for _, header := range f.clientHeaders[f.Counterparty(chainID)] {
err := UpdateReceiverClient(f.endpoint(f.Counterparty(chainID)), f.endpoint(chainID), header)
err := UpdateReceiverClient(f.endpoint(f.Counterparty(chainID)), f.endpoint(chainID), header, expectExpiration)
if err != nil {
f.t.Fatal("in relayed path could not update client of chain: ", chainID, " with header: ", header, " err: ", err)
return err
}
}
f.clientHeaders[f.Counterparty(chainID)] = []*ibctmtypes.Header{}
return nil
}

// DeliverPackets delivers UP TO <num> packets to the chain which have been
Expand Down

0 comments on commit 2d130df

Please sign in to comment.