@@ -15,14 +15,6 @@ fn get_state_sync_new_chunks(
15
15
Ok ( store. get_ser ( DBCol :: StateSyncNewChunks , block_hash. as_ref ( ) ) ?)
16
16
}
17
17
18
- fn iter_state_sync_new_chunks_keys < ' a > (
19
- store : & ' a Store ,
20
- ) -> impl Iterator < Item = Result < CryptoHash , std:: io:: Error > > + ' a {
21
- store
22
- . iter ( DBCol :: StateSyncNewChunks )
23
- . map ( |item| item. and_then ( |( k, _v) | CryptoHash :: try_from_slice ( & k) ) )
24
- }
25
-
26
18
fn iter_state_sync_hashes_keys < ' a > (
27
19
store : & ' a Store ,
28
20
) -> impl Iterator < Item = Result < EpochId , std:: io:: Error > > + ' a {
@@ -31,17 +23,18 @@ fn iter_state_sync_hashes_keys<'a>(
31
23
. map ( |item| item. and_then ( |( k, _v) | EpochId :: try_from_slice ( & k) ) )
32
24
}
33
25
26
+ /// Saves new chunk info and returns whether there are at least 2 chunks per shard in the epoch for header.prev_hash()
34
27
fn save_epoch_new_chunks < T : ChainStoreAccess > (
35
28
chain_store : & T ,
36
29
store_update : & mut StoreUpdate ,
37
30
header : & BlockHeader ,
38
- ) -> Result < ( ) , Error > {
31
+ ) -> Result < bool , Error > {
39
32
let Some ( mut num_new_chunks) =
40
33
get_state_sync_new_chunks ( & chain_store. store ( ) , header. prev_hash ( ) ) ?
41
34
else {
42
35
// This might happen in the case of epoch sync where we save individual headers without having all
43
36
// headers that belong to the epoch.
44
- return Ok ( ( ) ) ;
37
+ return Ok ( false ) ;
45
38
} ;
46
39
47
40
// This shouldn't happen because block headers in the same epoch should have chunks masks
@@ -53,17 +46,11 @@ fn save_epoch_new_chunks<T: ChainStoreAccess>(
53
46
block_hash=%header. hash( ) , chunk_mask_len=%header. chunk_mask( ) . len( ) , stored_len=%num_new_chunks. len( ) ,
54
47
"block header's chunk mask not of the same length as stored value in DBCol::StateSyncNewChunks" ,
55
48
) ;
56
- return Ok ( ( ) ) ;
49
+ return Ok ( false ) ;
57
50
}
58
51
59
52
let done = num_new_chunks. iter ( ) . all ( |num_chunks| * num_chunks >= 2 ) ;
60
- if done {
61
- // TODO(current_epoch_state_sync): this will not be correct if this block doesn't end up finalized on the main chain.
62
- // We should fix it by setting the sync hash when it's finalized, which requires making changes to how we take state snapshots.
63
- store_update. set_ser ( DBCol :: StateSyncHashes , header. epoch_id ( ) . as_ref ( ) , header. hash ( ) ) ?;
64
- store_update. delete_all ( DBCol :: StateSyncNewChunks ) ;
65
- return Ok ( ( ) ) ;
66
- }
53
+
67
54
for ( num_new_chunks, new_chunk) in num_new_chunks. iter_mut ( ) . zip ( header. chunk_mask ( ) . iter ( ) ) {
68
55
// Only need to reach 2, so don't bother adding more than that
69
56
if * new_chunk && * num_new_chunks < 2 {
@@ -72,7 +59,7 @@ fn save_epoch_new_chunks<T: ChainStoreAccess>(
72
59
}
73
60
74
61
store_update. set_ser ( DBCol :: StateSyncNewChunks , header. hash ( ) . as_ref ( ) , & num_new_chunks) ?;
75
- Ok ( ( ) )
62
+ Ok ( done )
76
63
}
77
64
78
65
fn on_new_epoch ( store_update : & mut StoreUpdate , header : & BlockHeader ) -> Result < ( ) , Error > {
@@ -96,31 +83,89 @@ fn remove_old_epochs(
96
83
Ok ( ( ) )
97
84
}
98
85
99
- fn remove_old_blocks < T : ChainStoreAccess > (
86
+ /// Helper to turn DBNotFoundErr() into None. We might get DBNotFoundErr() in the case of epoch sync
87
+ /// where we save individual headers without having all headers that belong to the epoch.
88
+ fn maybe_get_block_header < T : ChainStoreAccess > (
89
+ chain_store : & T ,
90
+ block_hash : & CryptoHash ,
91
+ ) -> Result < Option < BlockHeader > , Error > {
92
+ match chain_store. get_block_header ( block_hash) {
93
+ Ok ( block_header) => Ok ( Some ( block_header) ) ,
94
+ // This might happen in the case of epoch sync where we save individual headers without having all
95
+ // headers that belong to the epoch.
96
+ Err ( Error :: DBNotFoundErr ( _) ) => Ok ( None ) ,
97
+ Err ( e) => Err ( e) ,
98
+ }
99
+ }
100
+
101
+ fn has_enough_new_chunks ( store : & Store , block_hash : & CryptoHash ) -> Result < Option < bool > , Error > {
102
+ let Some ( num_new_chunks) = get_state_sync_new_chunks ( store, block_hash) ? else {
103
+ // This might happen in the case of epoch sync where we save individual headers without having all
104
+ // headers that belong to the epoch.
105
+ return Ok ( None ) ;
106
+ } ;
107
+ Ok ( Some ( num_new_chunks. iter ( ) . all ( |num_chunks| * num_chunks >= 2 ) ) )
108
+ }
109
+
110
+ /// Save num new chunks info and store the state sync hash if it has been found. We store it only
111
+ /// once it becomes final.
112
+ /// This should only be called if DBCol::StateSyncHashes does not yet have an entry for header.epoch_id().
113
+ /// The logic should still be correct if it is, but it's unnecessary and will waste a lot of time if called
114
+ /// on a header far away from the epoch start.
115
+ fn on_new_header < T : ChainStoreAccess > (
100
116
chain_store : & T ,
101
117
store_update : & mut StoreUpdate ,
102
118
header : & BlockHeader ,
103
119
) -> Result < ( ) , Error > {
104
- if header. last_final_block ( ) == & CryptoHash :: default ( ) {
120
+ let done = save_epoch_new_chunks ( chain_store, store_update, header) ?;
121
+ if !done {
105
122
return Ok ( ( ) ) ;
106
123
}
107
- // We don't need to keep info for old blocks around. After a block is finalized, we don't need anything before it
108
- let last_final_header = match chain_store. get_block_header ( header. last_final_block ( ) ) {
109
- Ok ( h) => h,
110
- // This might happen in the case of epoch sync where we save individual headers without having all
111
- // headers that belong to the epoch.
112
- Err ( Error :: DBNotFoundErr ( _) ) => return Ok ( ( ) ) ,
113
- Err ( e) => return Err ( e) ,
124
+
125
+ // Now check if the sync hash is known and finalized. The sync hash is the block after the first block with at least 2
126
+ // chunks per shard in the epoch. Note that we cannot just check if the current header.last_final_block() is the sync
127
+ // hash, because even though this function is called for each header, it is not guaranteed that we'll see every block
128
+ // by checking header.last_final_block(), because it is possible for the final block to jump by more than one upon a new
129
+ // head update. So here we iterate backwards until we find it, if it exists yet.
130
+
131
+ let epoch_id = header. epoch_id ( ) ;
132
+ let last_final_hash = header. last_final_block ( ) ;
133
+
134
+ let Some ( mut sync) = maybe_get_block_header ( chain_store, last_final_hash) ? else {
135
+ return Ok ( ( ) ) ;
114
136
} ;
115
- for block_hash in iter_state_sync_new_chunks_keys ( & chain_store. store ( ) ) {
116
- let block_hash = block_hash?;
117
- let old_header = chain_store. get_block_header ( & block_hash) ?;
118
- if old_header. height ( ) < last_final_header. height ( ) {
119
- store_update. delete ( DBCol :: StateSyncNewChunks , block_hash. as_ref ( ) ) ;
137
+ loop {
138
+ let Some ( sync_prev) = maybe_get_block_header ( chain_store, sync. prev_hash ( ) ) ? else {
139
+ return Ok ( ( ) ) ;
140
+ } ;
141
+ if sync_prev. epoch_id ( ) != epoch_id
142
+ || sync_prev. height ( ) == chain_store. get_genesis_height ( )
143
+ {
144
+ return Ok ( ( ) ) ;
145
+ }
146
+ if has_enough_new_chunks ( & chain_store. store ( ) , sync_prev. hash ( ) ) ? != Some ( true ) {
147
+ return Ok ( ( ) ) ;
120
148
}
121
- }
122
149
123
- Ok ( ( ) )
150
+ let Some ( sync_prev_prev) = maybe_get_block_header ( chain_store, sync_prev. prev_hash ( ) ) ?
151
+ else {
152
+ return Ok ( ( ) ) ;
153
+ } ;
154
+ let Some ( prev_prev_done) =
155
+ has_enough_new_chunks ( & chain_store. store ( ) , sync_prev_prev. hash ( ) ) ?
156
+ else {
157
+ return Ok ( ( ) ) ;
158
+ } ;
159
+
160
+ if !prev_prev_done {
161
+ // `sync_prev_prev` doesn't have enough new chunks, and `sync_prev` does, meaning `sync` is the first final
162
+ // valid sync block
163
+ store_update. set_ser ( DBCol :: StateSyncHashes , epoch_id. as_ref ( ) , sync. hash ( ) ) ?;
164
+ store_update. delete_all ( DBCol :: StateSyncNewChunks ) ;
165
+ return Ok ( ( ) ) ;
166
+ }
167
+ sync = sync_prev;
168
+ }
124
169
}
125
170
126
171
/// Updates information in the DB related to calculating the correct "sync_hash" for this header's epoch,
@@ -154,6 +199,35 @@ pub(crate) fn update_sync_hashes<T: ChainStoreAccess>(
154
199
return remove_old_epochs ( & chain_store. store ( ) , store_update, header, & prev_header) ;
155
200
}
156
201
157
- save_epoch_new_chunks ( chain_store, store_update, header) ?;
158
- remove_old_blocks ( chain_store, store_update, header)
202
+ on_new_header ( chain_store, store_update, header)
203
+ }
204
+
205
+ ///. Returns whether `block_hash` is the block that will appear immediately before the "sync_hash" block. That is,
206
+ /// whether it is going to be the prev_hash of the "sync_hash" block, when it is found.
207
+ ///
208
+ /// `block_hash` is the prev_hash of the future "sync_hash" block iff it is the first block for which the
209
+ /// number of new chunks in the epoch in each shard is at least 2
210
+ ///
211
+ /// This function can only return true before we save the "sync_hash" block to the `StateSyncHashes` column,
212
+ /// because it relies on data stored in the `StateSyncNewChunks` column, which is cleaned up after that.
213
+ ///
214
+ /// This is used when making state snapshots, because in that case we don't need to wait for the "sync_hash"
215
+ /// block to be finalized to take a snapshot of the state as of its prev prev block
216
+ pub ( crate ) fn is_sync_prev_hash (
217
+ store : & Store ,
218
+ block_hash : & CryptoHash ,
219
+ prev_hash : & CryptoHash ,
220
+ ) -> Result < bool , Error > {
221
+ let Some ( new_chunks) = get_state_sync_new_chunks ( store, block_hash) ? else {
222
+ return Ok ( false ) ;
223
+ } ;
224
+ let done = new_chunks. iter ( ) . all ( |num_chunks| * num_chunks >= 2 ) ;
225
+ if !done {
226
+ return Ok ( false ) ;
227
+ }
228
+ let Some ( prev_new_chunks) = get_state_sync_new_chunks ( store, prev_hash) ? else {
229
+ return Ok ( false ) ;
230
+ } ;
231
+ let prev_done = prev_new_chunks. iter ( ) . all ( |num_chunks| * num_chunks >= 2 ) ;
232
+ Ok ( !prev_done)
159
233
}
0 commit comments