-
Notifications
You must be signed in to change notification settings - Fork 58
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Prune EthereumSignatures and EthereumEventVoteRecords #517
Changes from 4 commits
203b02f
3b7e663
9e7219b
6e6d225
9576296
5f290f5
61c2f84
798bb28
8eac7c9
3384780
d879f93
016b9d8
b59b95f
18297d3
97a7cfc
f11ae2f
9b2fa69
7774350
93898a6
cee01a5
ad2e1ed
1a60aa4
c9e7b46
d094333
10ba8e8
62052b0
8aec8a4
e612af8
944db59
fa739d7
2b4f813
bddc200
45f3b66
71176eb
cfb424d
751b4f8
fdfee5f
aabb8de
32d2a87
1b53075
bfe6f10
71bacf4
1bbf5c0
bb2df29
2259b01
7160287
4f10dd4
6fd891b
49fe53c
92b7c88
8f974ee
386d151
43e7eb9
f525d8a
5ee3eee
5fa34d1
cb3a8f2
8bfdcdd
3b5532f
c063451
dea66cd
e1fb6b3
a3e1a77
daa37bb
a1a73c8
3255a88
2dc35c4
54f8996
fee0d33
8fbe3de
c2d78eb
5ccfc53
9e4ef50
18d89aa
1db56ab
9d0c767
f8618ee
c2a34f9
39c8888
3258760
5402aa8
dbb3f9c
5b2cb3e
b98ada6
64f09b2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,6 +27,8 @@ func BeginBlocker(ctx sdk.Context, k keeper.Keeper) { | |
// EndBlocker is called at the end of every block | ||
func EndBlocker(ctx sdk.Context, k keeper.Keeper) { | ||
outgoingTxSlashing(ctx, k) | ||
// Do we need to concern ourselves with future slashing windows for this pruning? | ||
pruneEventVoteRecords(ctx, k) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we move this to |
||
eventVoteRecordTally(ctx, k) | ||
updateObservedEthereumHeight(ctx, k) | ||
} | ||
|
@@ -100,12 +102,30 @@ func pruneSignerSetTxs(ctx sdk.Context, k keeper.Keeper) { | |
earliestToPrune := currentBlock - params.SignedSignerSetTxsWindow | ||
for _, set := range k.GetSignerSetTxs(ctx) { | ||
if set.Nonce < lastObserved.Nonce && set.Height < earliestToPrune { | ||
k.DeleteEthereumSignatures(ctx, set.GetStoreIndex()) | ||
EricBolten marked this conversation as resolved.
Show resolved
Hide resolved
|
||
k.DeleteOutgoingTx(ctx, set.GetStoreIndex()) | ||
} | ||
} | ||
} | ||
} | ||
|
||
// pruneEventVoteRecords deletes all event vote records with nonces that are older than the last observed event nonce | ||
func pruneEventVoteRecords(ctx sdk.Context, k keeper.Keeper) { | ||
lastObservedEventNonce := k.GetLastObservedEventNonce(ctx) | ||
EricBolten marked this conversation as resolved.
Show resolved
Hide resolved
|
||
k.IterateEthereumEventVoteRecords(ctx, func(key []byte, eventVoteRecord *types.EthereumEventVoteRecord) bool { | ||
event, err := types.UnpackEvent(eventVoteRecord.Event) | ||
if err != nil { | ||
panic(err) | ||
} | ||
eventNonce := event.GetEventNonce() | ||
if eventNonce <= lastObservedEventNonce { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't this be There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If the nonce has been canonically observed there is no need to keep new votes for it right? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. One thing I have noticed while helping validators diagnose issues with their orchestrators is that it's valuable to have evidence they have or have not been submitting things correctly. If we prune everything every time, it's very hard to query for this sort of information. I think leaving some small bit of history before pruning might be worthwhile. |
||
k.DeleteEthereumEventVoteRecord(ctx, eventNonce, event.Hash()) | ||
} | ||
|
||
return false | ||
}) | ||
} | ||
|
||
// Iterate over all attestations currently being voted on in order of nonce and | ||
// "Observe" those who have passed the threshold. Break the loop once we see | ||
// an attestation that has not passed the threshold | ||
|
@@ -155,11 +175,11 @@ func eventVoteRecordTally(ctx sdk.Context, k keeper.Keeper) { | |
// order to keep this information current regardless of the level of bridge activity. | ||
// | ||
// We determine if we should update the latest heights based on the following criteria: | ||
// 1. A consensus of validators agrees that the proposed height is equal to or less than their | ||
// last observed height, in order to reconcile the many different heights that will be submitted. | ||
// The highest height that meets this criteria will be the proposed height. | ||
// 2. The proposed consensus heights from this process are greater than the values stored from the last time | ||
// we observed an Ethereum event from the bridge | ||
// 1. A consensus of validators agrees that the proposed height is equal to or less than their | ||
// last observed height, in order to reconcile the many different heights that will be submitted. | ||
// The highest height that meets this criteria will be the proposed height. | ||
// 2. The proposed consensus heights from this process are greater than the values stored from the last time | ||
// we observed an Ethereum event from the bridge | ||
func updateObservedEthereumHeight(ctx sdk.Context, k keeper.Keeper) { | ||
// wait some minutes before checking the height votes | ||
if ctx.BlockHeight()%50 != 0 { | ||
|
@@ -228,12 +248,15 @@ func updateObservedEthereumHeight(ctx sdk.Context, k keeper.Keeper) { | |
// cleanupTimedOutBatchTxs deletes batches that have passed their expiration on Ethereum | ||
// keep in mind several things when modifying this function | ||
// A) unlike nonces timeouts are not monotonically increasing, meaning batch 5 can have a later timeout than batch 6 | ||
// this means that we MUST only cleanup a single batch at a time | ||
// | ||
// this means that we MUST only cleanup a single batch at a time | ||
// | ||
// B) it is possible for ethereumHeight to be zero if no events have ever occurred, make sure your code accounts for this | ||
// C) When we compute the timeout we do our best to estimate the Ethereum block height at that very second. But what we work with | ||
// here is the Ethereum block height at the time of the last Deposit or Withdraw to be observed. It's very important we do not | ||
// project, if we do a slowdown on ethereum could cause a double spend. Instead timeouts will *only* occur after the timeout period | ||
// AND any deposit or withdraw has occurred to update the Ethereum block height. | ||
// | ||
// here is the Ethereum block height at the time of the last Deposit or Withdraw to be observed. It's very important we do not | ||
// project, if we do a slowdown on ethereum could cause a double spend. Instead timeouts will *only* occur after the timeout period | ||
// AND any deposit or withdraw has occurred to update the Ethereum block height. | ||
func cleanupTimedOutBatchTxs(ctx sdk.Context, k keeper.Keeper) { | ||
ethereumHeight := k.GetLastObservedEthereumBlockHeight(ctx).EthereumHeight | ||
k.IterateOutgoingTxsByType(ctx, types.BatchTxPrefixByte, func(key []byte, otx types.OutgoingTx) bool { | ||
|
@@ -250,17 +273,21 @@ func cleanupTimedOutBatchTxs(ctx sdk.Context, k keeper.Keeper) { | |
// cleanupTimedOutContractCallTxs deletes logic calls that have passed their expiration on Ethereum | ||
// keep in mind several things when modifying this function | ||
// A) unlike nonces timeouts are not monotonically increasing, meaning call 5 can have a later timeout than batch 6 | ||
// this means that we MUST only cleanup a single call at a time | ||
// | ||
// this means that we MUST only cleanup a single call at a time | ||
// | ||
// B) it is possible for ethereumHeight to be zero if no events have ever occurred, make sure your code accounts for this | ||
// C) When we compute the timeout we do our best to estimate the Ethereum block height at that very second. But what we work with | ||
// here is the Ethereum block height at the time of the last Deposit or Withdraw to be observed. It's very important we do not | ||
// project, if we do a slowdown on ethereum could cause a double spend. Instead timeouts will *only* occur after the timeout period | ||
// AND any deposit or withdraw has occurred to update the Ethereum block height. | ||
// | ||
// here is the Ethereum block height at the time of the last Deposit or Withdraw to be observed. It's very important we do not | ||
// project, if we do a slowdown on ethereum could cause a double spend. Instead timeouts will *only* occur after the timeout period | ||
// AND any deposit or withdraw has occurred to update the Ethereum block height. | ||
func cleanupTimedOutContractCallTxs(ctx sdk.Context, k keeper.Keeper) { | ||
ethereumHeight := k.GetLastObservedEthereumBlockHeight(ctx).EthereumHeight | ||
k.IterateOutgoingTxsByType(ctx, types.ContractCallTxPrefixByte, func(_ []byte, otx types.OutgoingTx) bool { | ||
cctx, _ := otx.(*types.ContractCallTx) | ||
if cctx.Timeout < ethereumHeight { | ||
k.DeleteEthereumSignatures(ctx, cctx.GetStoreIndex()) | ||
k.DeleteOutgoingTx(ctx, cctx.GetStoreIndex()) | ||
} | ||
return true | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -274,18 +274,24 @@ func TestMsgSubmitEthreumEventSendToCosmosMultiValidator(t *testing.T) { | |
balance2 := input.BankKeeper.GetAllBalances(ctx, myCosmosAddr) | ||
require.Equal(t, sdk.Coins{sdk.NewInt64Coin(denom, 12)}, balance2) | ||
|
||
// and vouchers added to the account | ||
// this happens when 2/3 of the voting power have submitted claims, so the third isn't required | ||
balance3 := input.BankKeeper.GetAllBalances(ctx, myCosmosAddr) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Couldn't |
||
require.Equal(t, sdk.Coins{sdk.NewInt64Coin(denom, 12)}, balance3) | ||
|
||
// when | ||
ctx = ctx.WithBlockTime(myBlockTime) | ||
_, err = h(ctx, ethClaim3Msg) | ||
gravity.EndBlocker(ctx, input.GravityKeeper) | ||
require.NoError(t, err) | ||
|
||
// and attestation persisted | ||
// and attestations pruned | ||
a1 = input.GravityKeeper.GetEthereumEventVoteRecord(ctx, myNonce, ethClaim1.Hash()) | ||
a2 = input.GravityKeeper.GetEthereumEventVoteRecord(ctx, myNonce, ethClaim2.Hash()) | ||
a3 := input.GravityKeeper.GetEthereumEventVoteRecord(ctx, myNonce, ethClaim3.Hash()) | ||
require.NotNil(t, a3) | ||
// and no additional added to the account | ||
balance3 := input.BankKeeper.GetAllBalances(ctx, myCosmosAddr) | ||
require.Equal(t, sdk.Coins{sdk.NewInt64Coin(denom, 12)}, balance3) | ||
require.Nil(t, a1) | ||
require.Nil(t, a2) | ||
require.Nil(t, a3) | ||
} | ||
|
||
func TestMsgSetDelegateAddresses(t *testing.T) { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -129,12 +129,12 @@ func (k Keeper) GetLastUnbondingBlockHeight(ctx sdk.Context) uint64 { | |
// ETHEREUM SIGNATURES // | ||
/////////////////////////////// | ||
|
||
// getEthereumSignature returns a valset confirmation by a nonce and validator address | ||
// getEthereumSignature returns an ethereum signature by a nonce and validator address | ||
func (k Keeper) getEthereumSignature(ctx sdk.Context, storeIndex []byte, validator sdk.ValAddress) []byte { | ||
return ctx.KVStore(k.storeKey).Get(types.MakeEthereumSignatureKey(storeIndex, validator)) | ||
} | ||
|
||
// SetEthereumSignature sets a valset confirmation | ||
// SetEthereumSignature sets an ethereum signature | ||
func (k Keeper) SetEthereumSignature(ctx sdk.Context, sig types.EthereumTxConfirmation, val sdk.ValAddress) []byte { | ||
key := types.MakeEthereumSignatureKey(sig.GetStoreIndex(), val) | ||
ctx.KVStore(k.storeKey).Set(key, sig.GetSignature()) | ||
|
@@ -151,6 +151,14 @@ func (k Keeper) GetEthereumSignatures(ctx sdk.Context, storeIndex []byte) map[st | |
return signatures | ||
} | ||
|
||
// DeleteEthereumSignatures deletes all ethereum signatures for a given outgoing tx by store index | ||
func (k Keeper) DeleteEthereumSignatures(ctx sdk.Context, storeIndex []byte) { | ||
k.iterateEthereumSignatures(ctx, storeIndex, func(val sdk.ValAddress, h []byte) bool { | ||
ctx.KVStore(k.storeKey).Delete(types.MakeEthereumSignatureKey(storeIndex, val)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a potential issue here deleting elements from something we are iterating over? Perhaps we should instead collect a slice of the keys and delete them after the iterator. |
||
return false | ||
}) | ||
} | ||
|
||
// iterateEthereumSignatures iterates through all valset confirms by nonce in ASC order | ||
func (k Keeper) iterateEthereumSignatures(ctx sdk.Context, storeIndex []byte, cb func(sdk.ValAddress, []byte) bool) { | ||
prefixStore := prefix.NewStore(ctx.KVStore(k.storeKey), append([]byte{types.EthereumSignatureKey}, storeIndex...)) | ||
|
@@ -680,7 +688,7 @@ func (k Keeper) MigrateGravityContract(ctx sdk.Context, newBridgeAddress string, | |
|
||
// Reset all ethereum event nonces to zero | ||
k.setLastObservedEventNonce(ctx, 0) | ||
k.iterateEthereumEventVoteRecords(ctx, func(_ []byte, voteRecord *types.EthereumEventVoteRecord) bool { | ||
k.IterateEthereumEventVoteRecords(ctx, func(_ []byte, voteRecord *types.EthereumEventVoteRecord) bool { | ||
for _, vote := range voteRecord.Votes { | ||
val, err := sdk.ValAddressFromBech32(vote) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you elaborate on this question?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
there is a window of blocks for which the chain waits to prune batch votes so that validators have time to sign them before getting slashed. At the time when I commented this I didn't fully understand how vote records work for events as distinct from signatures for outgoing TXs. There's no reason to keep event vote records around after the last observed event nonce has incremented so I'll remove this comment.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Now that you have me thinking about it, I think there's certainly an argument to be made for a slashing window for the oracle function as well. It's as necessary to complete the bridge transaction lifecycle as signatures are. From this perspective, we may want to keep a sliding window's worth of history for both.