From a25ea63dec0460d9418432bd67f78e17520dfef4 Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Mon, 2 Sep 2024 10:34:42 +0000 Subject: [PATCH 1/2] Revert lazy implementation (#2585) --- .../aristo/aristo_delete/delete_subtree.nim | 94 +++++++------------ nimbus/db/aristo/aristo_delta.nim | 26 ----- nimbus/db/aristo/aristo_delta/delta_merge.nim | 4 - .../db/aristo/aristo_delta/delta_reverse.nim | 4 - .../db/aristo/aristo_desc/desc_structural.nim | 2 - nimbus/db/aristo/aristo_layers.nim | 6 -- 6 files changed, 35 insertions(+), 101 deletions(-) diff --git a/nimbus/db/aristo/aristo_delete/delete_subtree.nim b/nimbus/db/aristo/aristo_delete/delete_subtree.nim index 996703e775..e0afa2bcaa 100644 --- a/nimbus/db/aristo/aristo_delete/delete_subtree.nim +++ b/nimbus/db/aristo/aristo_delete/delete_subtree.nim @@ -12,7 +12,6 @@ import eth/common, - results, ".."/[aristo_desc, aristo_get, aristo_layers], ./delete_helpers @@ -20,13 +19,34 @@ import # Private heplers # ------------------------------------------------------------------------------ -proc collectStoTreeLazily( - db: AristoDbRef; # Database, top layer - rvid: RootedVertexID; # Root vertex - accPath: Hash256; # Accounts cache designator - stoPath: NibblesBuf; # Current storage path +proc delSubTreeNow( + db: AristoDbRef; + rvid: RootedVertexID; + ): Result[void,AristoError] = + ## Delete sub-tree now + let (vtx, _) = db.getVtxRc(rvid).valueOr: + if error == GetVtxNotFound: + return ok() + return err(error) + + if vtx.vType == Branch: + for n in 0..15: + if vtx.bVid[n].isValid: + ? db.delSubTreeNow((rvid.root,vtx.bVid[n])) + + db.disposeOfVtx(rvid) + + ok() + + +proc delStoTreeNow( + db: AristoDbRef; # Database, top layer + rvid: RootedVertexID; # Root vertex + accPath: Hash256; # Accounts cache designator + stoPath: NibblesBuf; # Current storage path ): Result[void,AristoError] = - ## Collect vertex/vid and delete cache entries. + ## Implementation of *delete* sub-trie. + let (vtx, _) = db.getVtxRc(rvid).valueOr: if error == GetVtxNotFound: return ok() @@ -36,7 +56,7 @@ proc collectStoTreeLazily( of Branch: for i in 0..15: if vtx.bVid[i].isValid: - ? db.collectStoTreeLazily( + ? db.delStoTreeNow( (rvid.root, vtx.bVid[i]), accPath, stoPath & vtx.ePfx & NibblesBuf.nibble(byte i)) @@ -44,54 +64,19 @@ proc collectStoTreeLazily( let stoPath = Hash256(data: (stoPath & vtx.lPfx).getBytes()) db.layersPutStoLeaf(AccountKey.mixUp(accPath, stoPath), nil) - # There is no useful approach avoiding to walk the whole tree for updating - # the storage data access cache. - # - # The alternative of stopping here and clearing the whole cache did degrade - # performance significantly in some tests on mainnet when importing `era1`. - # - # The cache it was seen - # * filled up to maximum size most of the time - # * at the same time having no `stoPath` hit at all (so there was nothing - # to be cleared.) - # - ok() - - -proc disposeOfSubTree( - db: AristoDbRef; # Database, top layer - rvid: RootedVertexID; # Root vertex - ) = - ## Evaluate results from `collectSubTreeLazyImpl()` or ftom - ## `collectStoTreeLazyImpl)`. - ## - let vtx = db.getVtxRc(rvid).value[0] - if vtx.vType == Branch: - for n in 0..15: - if vtx.bVid[n].isValid: - db.top.delTree.add (rvid.root,vtx.bVid[n]) - - # Delete top of tree now. db.disposeOfVtx(rvid) + ok() + # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ proc delSubTreeImpl*( - db: AristoDbRef; # Database, top layer - root: VertexID; # Root vertex + db: AristoDbRef; + root: VertexID; ): Result[void,AristoError] = - ## Delete all the `subRoots`if there are a few, only. Otherwise - ## mark it for deleting later. - discard db.getVtxRc((root,root)).valueOr: - if error == GetVtxNotFound: - return ok() - return err(error) - - db.disposeOfSubTree((root,root)) - - ok() + db.delSubTreeNow (root,root) proc delStoTreeImpl*( @@ -99,17 +84,8 @@ proc delStoTreeImpl*( rvid: RootedVertexID; # Root vertex accPath: Hash256; ): Result[void,AristoError] = - ## Collect vertex/vid and cache entry. - discard db.getVtxRc(rvid).valueOr: - if error == GetVtxNotFound: - return ok() - return err(error) - - ? db.collectStoTreeLazily(rvid, accPath, NibblesBuf()) - - db.disposeOfSubTree(rvid) - - ok() + ## Implementation of *delete* sub-trie. + db.delStoTreeNow(rvid, accPath, NibblesBuf()) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_delta.nim b/nimbus/db/aristo/aristo_delta.nim index cec172c985..f408a394c9 100644 --- a/nimbus/db/aristo/aristo_delta.nim +++ b/nimbus/db/aristo/aristo_delta.nim @@ -24,26 +24,6 @@ import logScope: topics = "aristo-delta" -# ------------------------------------------------------------------------------ -# Private functions -# ------------------------------------------------------------------------------ - -proc toStr(rvid: RootedVertexID): string = - "$" & rvid.root.uint64.toHex & ":" & rvid.vid.uint64.toHex - -proc delSubTree(db: AristoDbRef; writer: PutHdlRef; rvid: RootedVertexID) = - ## Collect subtrees marked for deletion - let (vtx,_) = db.getVtxRc(rvid).valueOr: - notice "Descending for deletion stopped", rvid=(rvid.toStr), error - return - for vid in vtx.subVids: - db.delSubTree(writer, (rvid.root, vid)) - db.backend.putVtxFn(writer, rvid, VertexRef(nil)) - db.backend.putKeyFn(writer, rvid, VOID_HASH_KEY) - # Make sure the `rvid` is not mentioned here, anymore for further update. - db.balancer.sTab.del rvid - db.balancer.kMap.del rvid - # ------------------------------------------------------------------------------ # Public functions, save to backend # ------------------------------------------------------------------------------ @@ -109,12 +89,6 @@ proc deltaPersistent*( # Store structural single trie entries let writeBatch = ? be.putBegFn() - # This one must come first in order to avoid duplicate `sTree[]` or - # `kMap[]` instructions, in the worst case overwiting previously deleted - # entries. - for rvid in db.balancer.delTree: - db.delSubTree(writeBatch, rvid) - # Now the standard `sTree[]` and `kMap[]` instructions. for rvid, vtx in db.balancer.sTab: be.putVtxFn(writeBatch, rvid, vtx) for rvid, key in db.balancer.kMap: diff --git a/nimbus/db/aristo/aristo_delta/delta_merge.nim b/nimbus/db/aristo/aristo_delta/delta_merge.nim index 8f9c7265d1..c2dc3119ac 100644 --- a/nimbus/db/aristo/aristo_delta/delta_merge.nim +++ b/nimbus/db/aristo/aristo_delta/delta_merge.nim @@ -48,7 +48,6 @@ proc deltaMerge*( result = LayerRef( sTab: lower.sTab, # shallow copy (entries will not be modified) kMap: lower.kMap, - delTree: lower.delTree, accLeaves: lower.accLeaves, stoLeaves: lower.stoLeaves, vTop: upper.vTop) @@ -68,9 +67,6 @@ proc deltaMerge*( if not upper.kMap.hasKey(rvid): upper.kMap[rvid] = key - for rvid in lower.delTree: - upper.delTree.add rvid - for (accPath,leafVtx) in lower.accLeaves.pairs: if not upper.accLeaves.hasKey(accPath): upper.accLeaves[accPath] = leafVtx diff --git a/nimbus/db/aristo/aristo_delta/delta_reverse.nim b/nimbus/db/aristo/aristo_delta/delta_reverse.nim index 2650075584..308e54089d 100644 --- a/nimbus/db/aristo/aristo_delta/delta_reverse.nim +++ b/nimbus/db/aristo/aristo_delta/delta_reverse.nim @@ -97,10 +97,6 @@ proc revFilter*( else: return err((rvid.vid,rc.error)) - # Reverse changes for `delTree[]` list. - for rvid in filter.delTree: - ? db.revSubTree(rev, rvid) - ok(rev) # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index a6b58b0647..b1e277ac50 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -124,8 +124,6 @@ type kMap*: Table[RootedVertexID,HashKey] ## Merkle hash key mapping vTop*: VertexID ## Last used vertex ID - delTree*: seq[RootedVertexID] ## Not yet fully deleted sub-trees - accLeaves*: Table[Hash256, VertexRef] ## Account path -> VertexRef stoLeaves*: Table[Hash256, VertexRef] ## Storage path -> VertexRef diff --git a/nimbus/db/aristo/aristo_layers.nim b/nimbus/db/aristo/aristo_layers.nim index 03eb9884ce..b4d8a33dd4 100644 --- a/nimbus/db/aristo/aristo_layers.nim +++ b/nimbus/db/aristo/aristo_layers.nim @@ -171,7 +171,6 @@ func isEmpty*(ly: LayerRef): bool = ## tables are empty. The field `txUid` is ignored, here. ly.sTab.len == 0 and ly.kMap.len == 0 and - ly.delTree.len == 0 and ly.accLeaves.len == 0 and ly.stoLeaves.len == 0 @@ -187,8 +186,6 @@ func layersMergeOnto*(src: LayerRef; trg: var LayerObj) = for (vid,key) in src.kMap.pairs: trg.kMap[vid] = key trg.vTop = src.vTop - for rvid in src.delTree: - trg.delTree.add rvid for (accPath,leafVtx) in src.accLeaves.pairs: trg.accLeaves[accPath] = leafVtx for (mixPath,leafVtx) in src.stoLeaves.pairs: @@ -207,7 +204,6 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = sTab: layers[0].sTab.dup, # explicit dup for ref values kMap: layers[0].kMap, vTop: layers[^1].vTop, - delTree: layers[0].delTree, accLeaves: layers[0].accLeaves, stoLeaves: layers[0].stoLeaves) @@ -217,8 +213,6 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = result.sTab[vid] = vtx for (vid,key) in layers[n].kMap.pairs: result.kMap[vid] = key - for rvid in layers[n].delTree: - result.delTree.add rvid for (accPath,vtx) in layers[n].accLeaves.pairs: result.accLeaves[accPath] = vtx for (mixPath,vtx) in layers[n].stoLeaves.pairs: From ef1bab0802d764b87f7b7dd51b0d34c1721c0905 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 2 Sep 2024 16:03:10 +0200 Subject: [PATCH 2/2] avoid some trivial memory allocations (#2587) * pre-allocate `blobify` data and remove redundant error handling (cannot fail on correct data) * use threadvar for temporary storage when decoding rdb, avoiding closure env * speed up database walkers by avoiding many temporaries ~5% perf improvement on block import, 100x on database iteration (useful for building analysis tooling) --- nimbus/db/aristo/aristo_blobify.nim | 50 ++++++++++--------- nimbus/db/aristo/aristo_compute.nim | 4 +- nimbus/db/aristo/aristo_desc/desc_nibbles.nim | 31 +++++++----- nimbus/db/aristo/aristo_init/memory_db.nim | 9 +--- nimbus/db/aristo/aristo_init/rocks_db.nim | 6 +-- .../aristo/aristo_init/rocks_db/rdb_get.nim | 20 ++++---- .../aristo/aristo_init/rocks_db/rdb_put.nim | 7 +-- .../aristo/aristo_init/rocks_db/rdb_walk.nim | 36 ++++++++++--- nimbus/db/aristo/aristo_serialise.nim | 8 +-- tests/test_aristo/test_blobify.nim | 10 ++-- tests/test_aristo/test_portal_proof.nim | 2 +- vendor/nim-stew | 2 +- 12 files changed, 100 insertions(+), 85 deletions(-) diff --git a/nimbus/db/aristo/aristo_blobify.nim b/nimbus/db/aristo/aristo_blobify.nim index 493a11497b..7245a6cebc 100644 --- a/nimbus/db/aristo/aristo_blobify.nim +++ b/nimbus/db/aristo/aristo_blobify.nim @@ -203,12 +203,12 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] = if vtx.ePfx.len > 0: vtx.ePfx.toHexPrefix(isleaf = false) else: - @[] + default(HexPrefixBuf) psLen = pSegm.len.byte if 33 < psLen: return err(BlobifyExtPathOverflow) - data &= pSegm + data &= pSegm.data() data &= lens.toBytesBE data &= [0x80u8 or psLen] @@ -219,16 +219,16 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] = if psLen == 0 or 33 < psLen: return err(BlobifyLeafPathOverflow) vtx.lData.blobifyTo(data) - data &= pSegm + data &= pSegm.data() data &= [0xC0u8 or psLen] ok() -proc blobify*(vtx: VertexRef): Result[Blob, AristoError] = +proc blobify*(vtx: VertexRef): Blob = ## Variant of `blobify()` - var data: Blob - ? vtx.blobifyTo data - ok(move(data)) + result = newSeqOfCap[byte](128) + if vtx.blobifyTo(result).isErr: + result.setLen(0) # blobify only fails on invalid verticies proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] = ## Serialise a last saved state record @@ -246,45 +246,48 @@ proc blobify*(lSst: SavedState): Result[Blob,AristoError] = # ------------- proc deblobify( data: openArray[byte]; - T: type LeafPayload; - ): Result[LeafPayload,AristoError] = + pyl: var LeafPayload; + ): Result[void,AristoError] = if data.len == 0: - return ok LeafPayload(pType: RawData) + pyl = LeafPayload(pType: RawData) + return ok() let mask = data[^1] if (mask and 0x10) > 0: # unstructured payload - return ok LeafPayload(pType: RawData, rawBlob: data[0 .. ^2]) + pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2]) + return ok() if (mask and 0x20) > 0: # Slot storage data - return ok LeafPayload( + pyl = LeafPayload( pType: StoData, stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256)) + return ok() + pyl = LeafPayload(pType: AccountData) var - pAcc = LeafPayload(pType: AccountData) start = 0 lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2)) if (mask and 0x01) > 0: let len = lens and 0b111 - pAcc.account.nonce = ? load64(data, start, int(len + 1)) + pyl.account.nonce = ? load64(data, start, int(len + 1)) if (mask and 0x02) > 0: let len = (lens shr 3) and 0b11111 - pAcc.account.balance = ? load256(data, start, int(len + 1)) + pyl.account.balance = ? load256(data, start, int(len + 1)) if (mask and 0x04) > 0: let len = (lens shr 8) and 0b111 - pAcc.stoID = (true, VertexID(? load64(data, start, int(len + 1)))) + pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1)))) if (mask and 0x08) > 0: if data.len() < start + 32: return err(DeblobCodeLenUnsupported) - discard pAcc.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31)) + discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31)) else: - pAcc.account.codeHash = EMPTY_CODE_HASH + pyl.account.codeHash = EMPTY_CODE_HASH - ok(pAcc) + ok() proc deblobify*( record: openArray[byte]; @@ -336,11 +339,12 @@ proc deblobify*( NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1) if not isLeaf: return err(DeblobLeafGotExtPrefix) - let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(LeafPayload) - VertexRef( + let vtx = VertexRef( vType: Leaf, - lPfx: pathSegment, - lData: pyl) + lPfx: pathSegment) + + ? record.toOpenArray(0, pLen - 1).deblobify(vtx.lData) + vtx else: return err(DeblobUnknown) diff --git a/nimbus/db/aristo/aristo_compute.nim b/nimbus/db/aristo/aristo_compute.nim index e3c26d3f64..46a02f1c96 100644 --- a/nimbus/db/aristo/aristo_compute.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -68,7 +68,7 @@ proc computeKeyImpl( case vtx.vType: of Leaf: writer.startList(2) - writer.append(vtx.lPfx.toHexPrefix(isLeaf = true)) + writer.append(vtx.lPfx.toHexPrefix(isLeaf = true).data()) case vtx.lData.pType of AccountData: @@ -111,7 +111,7 @@ proc computeKeyImpl( writeBranch(bwriter) writer.startList(2) - writer.append(vtx.ePfx.toHexPrefix(isleaf = false)) + writer.append(vtx.ePfx.toHexPrefix(isleaf = false).data()) writer.append(bwriter.finish().digestTo(HashKey)) else: writeBranch(writer) diff --git a/nimbus/db/aristo/aristo_desc/desc_nibbles.nim b/nimbus/db/aristo/aristo_desc/desc_nibbles.nim index 58ecc97088..48f67633dd 100644 --- a/nimbus/db/aristo/aristo_desc/desc_nibbles.nim +++ b/nimbus/db/aristo/aristo_desc/desc_nibbles.nim @@ -8,15 +8,20 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. -import stew/arrayops +import stew/[arraybuf, arrayops] -type NibblesBuf* = object - ## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the - ## Ethereum MPT - bytes: array[32, byte] - ibegin, iend: int8 - # Where valid nibbles can be found - we use indices here to avoid copies - # wen slicing - iend not inclusive +export arraybuf + +type + NibblesBuf* = object + ## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the + ## Ethereum MPT + bytes: array[32, byte] + ibegin, iend: int8 + # Where valid nibbles can be found - we use indices here to avoid copies + # wen slicing - iend not inclusive + + HexPrefixBuf* = ArrayBuf[33, byte] func high*(T: type NibblesBuf): int = 63 @@ -61,7 +66,7 @@ func `$`*(r: NibblesBuf): string = const chars = "0123456789abcdef" result.add chars[r[i]] -func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf = +func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf {.noinit.} = result.bytes = r.bytes result.ibegin = r.ibegin + ibegin.int8 let e = @@ -75,7 +80,7 @@ func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf = template writeFirstByte(nibbleCountExpr) {.dirty.} = let nibbleCount = nibbleCountExpr var oddnessFlag = (nibbleCount and 1) != 0 - newSeq(result, (nibbleCount div 2) + 1) + result.setLen((nibbleCount div 2) + 1) result[0] = byte((int(isLeaf) * 2 + int(oddnessFlag)) shl 4) var writeHead = 0 @@ -89,11 +94,11 @@ template writeNibbles(r) {.dirty.} = result[writeHead] = nextNibble shl 4 oddnessFlag = not oddnessFlag -func toHexPrefix*(r: NibblesBuf, isLeaf = false): seq[byte] = +func toHexPrefix*(r: NibblesBuf, isLeaf = false): HexPrefixBuf = writeFirstByte(r.len) writeNibbles(r) -func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): seq[byte] = +func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): HexPrefixBuf = writeFirstByte(r1.len + r2.len) writeNibbles(r1) writeNibbles(r2) @@ -131,7 +136,7 @@ func fromHexPrefix*( else: result.isLeaf = false -func `&`*(a, b: NibblesBuf): NibblesBuf = +func `&`*(a, b: NibblesBuf): NibblesBuf {.noinit.} = for i in 0 ..< a.len: result[i] = a[i] diff --git a/nimbus/db/aristo/aristo_init/memory_db.nim b/nimbus/db/aristo/aristo_init/memory_db.nim index a90773ceb3..2e7c95a7a3 100644 --- a/nimbus/db/aristo/aristo_init/memory_db.nim +++ b/nimbus/db/aristo/aristo_init/memory_db.nim @@ -133,14 +133,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn = let hdl = hdl.getSession db if hdl.error.isNil: if vtx.isValid: - let rc = vtx.blobify() - if rc.isErr: - hdl.error = TypedPutHdlErrRef( - pfx: VtxPfx, - vid: rvid.vid, - code: rc.error) - return - hdl.sTab[rvid] = rc.value + hdl.sTab[rvid] = vtx.blobify() else: hdl.sTab[rvid] = EmptyBlob diff --git a/nimbus/db/aristo/aristo_init/rocks_db.nim b/nimbus/db/aristo/aristo_init/rocks_db.nim index 702d359f1e..15c51c054f 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db.nim @@ -312,10 +312,8 @@ iterator walkVtx*( be: RdbBackendRef; ): tuple[evid: RootedVertexID, vtx: VertexRef] = ## Variant of `walk()` iteration over the vertex sub-table. - for (rvid, data) in be.rdb.walkVtx: - let rc = data.deblobify VertexRef - if rc.isOk: - yield (rvid, rc.value) + for (rvid, vtx) in be.rdb.walkVtx: + yield (rvid, vtx) iterator walkKey*( be: RdbBackendRef; diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim index b36a279e2e..f3fad1f547 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim @@ -63,10 +63,10 @@ proc getKey*( return ok(move(rc.value)) # Otherwise fetch from backend database - var res: Result[HashKey,(AristoError,string)] + # A threadvar is used to avoid allocating an environment for onData + var res{.threadvar.}: Opt[HashKey] let onData = proc(data: openArray[byte]) = - res = HashKey.fromBytes(data).mapErr(proc(): auto = - (RdbHashKeyExpected,"")) + res = HashKey.fromBytes(data) let gotData = rdb.keyCol.get(rvid.blobify().data(), onData).valueOr: const errSym = RdbBeDriverGetKeyError @@ -76,9 +76,9 @@ proc getKey*( # Correct result if needed if not gotData: - res = ok(VOID_HASH_KEY) + res.ok(VOID_HASH_KEY) elif res.isErr(): - return res # Parsing failed + return err((RdbHashKeyExpected,"")) # Parsing failed # Update cache and return ok rdb.rdKeyLru.lruAppend(rvid.vid, res.value(), RdKeyLruMaxSize) @@ -93,10 +93,10 @@ proc getVtx*( return ok(move(rc.value)) # Otherwise fetch from backend database - var res: Result[VertexRef,(AristoError,string)] + # A threadvar is used to avoid allocating an environment for onData + var res {.threadvar.}: Result[VertexRef,AristoError] let onData = proc(data: openArray[byte]) = - res = data.deblobify(VertexRef).mapErr(proc(error: AristoError): auto = - (error,"")) + res = data.deblobify(VertexRef) let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr: const errSym = RdbBeDriverGetVtxError @@ -105,9 +105,9 @@ proc getVtx*( return err((errSym,error)) if not gotData: - res = ok(VertexRef(nil)) + res.ok(VertexRef(nil)) elif res.isErr(): - return res # Parsing failed + return err((res.error(), "Parsing failed")) # Parsing failed # Update cache and return ok rdb.rdVtxLru.lruAppend(rvid.vid, res.value(), RdVtxLruMaxSize) diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim index 640097b9eb..f303191640 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim @@ -122,12 +122,7 @@ proc putVtx*( ): Result[void,(VertexID,AristoError,string)] = let dsc = rdb.session if vtx.isValid: - let rc = vtx.blobify() - if rc.isErr: - # Caller must `rollback()` which will flush the `rdVtxLru` cache - return err((rvid.vid,rc.error,"")) - - dsc.put(rvid.blobify().data(), rc.value, rdb.vtxCol.handle()).isOkOr: + dsc.put(rvid.blobify().data(), vtx.blobify(), rdb.vtxCol.handle()).isOkOr: # Caller must `rollback()` which will flush the `rdVtxLru` cache const errSym = RdbBeDriverPutVtxError when extraTraceMessages: diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim index 637c6272d7..ab57dd9a25 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim @@ -72,8 +72,8 @@ iterator walkKey*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] = yield (rvid, val) -iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] = - ## Walk over key-value pairs of the hash key column of the database. +iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: VertexRef] = + ## Walk over key-value pairs of the vertex column of the database. ## ## Non-decodable entries are are ignored. ## @@ -84,12 +84,32 @@ iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] = break walkBody defer: rit.close() - for (key,val) in rit.pairs: - if val.len != 0: - let rvid = key.deblobify(RootedVertexID).valueOr: - continue - - yield (rvid, val) + rit.seekToFirst() + var key: RootedVertexID + var value: VertexRef + while rit.isValid(): + var valid = true + rit.key( + proc(data: openArray[byte]) = + key = deblobify(data, RootedVertexID).valueOr: + valid = false + default(RootedVertexID) + ) + if not valid: + continue + + rit.value( + proc(data: openArray[byte]) = + value = deblobify(data, VertexRef).valueOr: + valid = false + default(VertexRef) + ) + if not valid: + continue + + rit.next() + yield (key, value) + rit.close() # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_serialise.nim b/nimbus/db/aristo/aristo_serialise.nim index 77b1b9a9c5..13f2810f1f 100644 --- a/nimbus/db/aristo/aristo_serialise.nim +++ b/nimbus/db/aristo/aristo_serialise.nim @@ -86,7 +86,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T = var wrx = initRlpWriter() wrx.startList(2) - wrx.append node.ePfx.toHexPrefix(isleaf = false) + wrx.append node.ePfx.toHexPrefix(isleaf = false).data() wrx.append brHash result.add wrx.finish() @@ -104,7 +104,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T = var wr = initRlpWriter() wr.startList(2) - wr.append node.lPfx.toHexPrefix(isleaf = true) + wr.append node.lPfx.toHexPrefix(isleaf = true).data() wr.append node.lData.serialise(getKey0).value result.add (wr.finish()) @@ -127,7 +127,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T = let brHash = wr.finish().digestTo(HashKey) wr = initRlpWriter() wr.startList(2) - wr.append node.ePfx.toHexPrefix(isleaf = false) + wr.append node.ePfx.toHexPrefix(isleaf = false).data() wr.append brHash of Leaf: @@ -138,7 +138,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T = ok(node.key[0]) # always succeeds wr.startList(2) - wr.append node.lPfx.toHexPrefix(isleaf = true) + wr.append node.lPfx.toHexPrefix(isleaf = true).data() wr.append node.lData.serialise(getKey0).value wr.finish().digestTo(HashKey) diff --git a/tests/test_aristo/test_blobify.nim b/tests/test_aristo/test_blobify.nim index b94ee8dfa1..03d39b1c94 100644 --- a/tests/test_aristo/test_blobify.nim +++ b/tests/test_aristo/test_blobify.nim @@ -65,8 +65,8 @@ suite "Aristo blobify": ) check: - deblobify(blobify(leafRawData)[], VertexRef)[] == leafRawData - deblobify(blobify(leafAccount)[], VertexRef)[] == leafAccount - deblobify(blobify(leafStoData)[], VertexRef)[] == leafStoData - deblobify(blobify(branch)[], VertexRef)[] == branch - deblobify(blobify(extension)[], VertexRef)[] == extension + deblobify(blobify(leafRawData), VertexRef)[] == leafRawData + deblobify(blobify(leafAccount), VertexRef)[] == leafAccount + deblobify(blobify(leafStoData), VertexRef)[] == leafStoData + deblobify(blobify(branch), VertexRef)[] == branch + deblobify(blobify(extension), VertexRef)[] == extension diff --git a/tests/test_aristo/test_portal_proof.nim b/tests/test_aristo/test_portal_proof.nim index 4d6b5db70a..8af4e39b0a 100644 --- a/tests/test_aristo/test_portal_proof.nim +++ b/tests/test_aristo/test_portal_proof.nim @@ -115,7 +115,7 @@ func asExtension(b: Blob; path: Hash256): Blob = var wr = initRlpWriter() wr.startList(2) - wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false) + wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false).data() wr.append node.listElem(nibble.int).toBytes wr.finish() diff --git a/vendor/nim-stew b/vendor/nim-stew index 54cc67cbb8..fc09b2e023 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 54cc67cbb83f61b6e3168b09701758c5b805120a +Subproject commit fc09b2e023ab2d73e425f7d15cf94871c7867868