Skip to content

Commit

Permalink
Remove RawData from possible leaf payload types (#2794)
Browse files Browse the repository at this point in the history
This kind of data is not used except in tests where it is used only to
create databases that don't match actual usage of aristo.

Removing simplifies future optimizations that can focus on processing
specific leaf types more efficiently.

A casualty of this removal is some test code as well as some proof
generation code that is unused - on the surface, it looks like it should
be possible to port both of these to the more specific data types -
doing so would ensure that a database written by one part of the
codebase can interact with the other - as it stands, there is confusion
on this point since using the proof generation code will result in a
database of a shape that is incompatible with the rest of eth1.
  • Loading branch information
arnetheduck authored Nov 2, 2024
1 parent a5541a5 commit 58cde36
Show file tree
Hide file tree
Showing 18 changed files with 352 additions and 494 deletions.
20 changes: 10 additions & 10 deletions TracerTests.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,16 @@ TracerTests
===
## TracerTests
```diff
+ block46147.json OK
+ block46400.json OK
+ block46402.json OK
+ block47205.json OK
+ block48712.json OK
+ block48915.json OK
+ block49018.json OK
+ block97.json OK
block46147.json Skip
block46400.json Skip
block46402.json Skip
block47205.json Skip
block48712.json Skip
block48915.json Skip
block49018.json Skip
block97.json Skip
```
OK: 8/8 Fail: 0/8 Skip: 0/8
OK: 0/8 Fail: 0/8 Skip: 8/8

---TOTAL---
OK: 8/8 Fail: 0/8 Skip: 0/8
OK: 0/8 Fail: 0/8 Skip: 8/8
1 change: 0 additions & 1 deletion nimbus/db/aristo.nim
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ export
leftPairs, # iterators
rightPairs,
rightPairsAccount,
rightPairsGeneric,
rightPairsStorage

import
Expand Down
55 changes: 24 additions & 31 deletions nimbus/db/aristo/aristo_blobify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,6 @@ proc load256(data: openArray[byte]; start: var int, len: int): Result[UInt256,Ar

proc blobifyTo*(pyl: LeafPayload, data: var seq[byte]) =
case pyl.pType
of RawData:
data &= pyl.rawBlob
data &= [0x10.byte]

of AccountData:
# `lens` holds `len-1` since `mask` filters out the zero-length case (which
# allows saving 1 bit per length)
Expand Down Expand Up @@ -248,45 +244,42 @@ proc deblobify(
pyl: var LeafPayload;
): Result[void,AristoError] =
if data.len == 0:
pyl = LeafPayload(pType: RawData)
return ok()
return err(DeblobVtxTooShort)

let mask = data[^1]
if (mask and 0x10) > 0: # unstructured payload
pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
return ok()

if (mask and 0x20) > 0: # Slot storage data
pyl = LeafPayload(
pType: StoData,
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
return ok()
ok()
elif (mask and 0xf0) == 0: # Only account fields set
pyl = LeafPayload(pType: AccountData)
var
start = 0
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))

pyl = LeafPayload(pType: AccountData)
var
start = 0
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
if (mask and 0x01) > 0:
let len = lens and 0b111
pyl.account.nonce = ? load64(data, start, int(len + 1))

if (mask and 0x01) > 0:
let len = lens and 0b111
pyl.account.nonce = ? load64(data, start, int(len + 1))
if (mask and 0x02) > 0:
let len = (lens shr 3) and 0b11111
pyl.account.balance = ? load256(data, start, int(len + 1))

if (mask and 0x02) > 0:
let len = (lens shr 3) and 0b11111
pyl.account.balance = ? load256(data, start, int(len + 1))
if (mask and 0x04) > 0:
let len = (lens shr 8) and 0b111
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))

if (mask and 0x04) > 0:
let len = (lens shr 8) and 0b111
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
if (mask and 0x08) > 0:
if data.len() < start + 32:
return err(DeblobCodeLenUnsupported)
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
else:
pyl.account.codeHash = EMPTY_CODE_HASH

if (mask and 0x08) > 0:
if data.len() < start + 32:
return err(DeblobCodeLenUnsupported)
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
ok()
else:
pyl.account.codeHash = EMPTY_CODE_HASH

ok()
err(DeblobUnknown)

proc deblobifyType*(record: openArray[byte]; T: type VertexRef):
Result[VertexType, AristoError] =
Expand Down
4 changes: 0 additions & 4 deletions nimbus/db/aristo/aristo_compute.nim
Original file line number Diff line number Diff line change
Expand Up @@ -249,8 +249,6 @@ proc computeKeyImpl(
storageRoot: skey.to(Hash32),
codeHash: vtx.lData.account.codeHash,
)
of RawData:
vtx.lData.rawBlob
of StoData:
# TODO avoid memory allocation when encoding storage data
rlp.encode(vtx.lData.stoData)
Expand Down Expand Up @@ -371,8 +369,6 @@ proc computeLeafKeysImpl(
codeHash: vtx.lData.account.codeHash,
)
writer2.finish()
of RawData:
vtx.lData.rawBlob
of StoData:
writer2.clear()
writer2.append(vtx.lData.stoData)
Expand Down
2 changes: 0 additions & 2 deletions nimbus/db/aristo/aristo_debug.nim
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,6 @@ func ppAriAccount(a: AristoAccount): string =

func ppPayload(p: LeafPayload, db: AristoDbRef): string =
case p.pType:
of RawData:
result &= p.rawBlob.toHex.squeeze(hex=true)
of AccountData:
result = "(" & p.account.ppAriAccount() & "," & p.stoID.ppVid & ")"
of StoData:
Expand Down
11 changes: 0 additions & 11 deletions nimbus/db/aristo/aristo_desc/desc_structural.nim
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ type

PayloadType* = enum
## Type of leaf data.
RawData ## Generic data
AccountData ## `Aristo account` with vertex IDs links
StoData ## Slot storage data

Expand All @@ -58,10 +57,7 @@ type
LeafPayload* = object
## The payload type depends on the sub-tree used. The `VertexID(1)` rooted
## sub-tree only has `AccountData` type payload, stoID-based have StoData
## while generic have RawData
case pType*: PayloadType
of RawData:
rawBlob*: seq[byte] ## Opaque data, default value
of AccountData:
account*: AristoAccount
stoID*: StorageID ## Storage vertex ID (if any)
Expand Down Expand Up @@ -157,9 +153,6 @@ proc `==`*(a, b: LeafPayload): bool =
if a.pType != b.pType:
return false
case a.pType:
of RawData:
if a.rawBlob != b.rawBlob:
return false
of AccountData:
if a.account != b.account or
a.stoID != b.stoID:
Expand Down Expand Up @@ -208,10 +201,6 @@ proc `==`*(a, b: NodeRef): bool =
func dup*(pld: LeafPayload): LeafPayload =
## Duplicate payload.
case pld.pType:
of RawData:
LeafPayload(
pType: RawData,
rawBlob: pld.rawBlob)
of AccountData:
LeafPayload(
pType: AccountData,
Expand Down
44 changes: 0 additions & 44 deletions nimbus/db/aristo/aristo_fetch.nim
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,6 @@ import
# Private functions
# ------------------------------------------------------------------------------

func mustBeGeneric(
root: VertexID;
): Result[void,AristoError] =
## Verify that `root` is neither from an accounts tree nor a strorage tree.
if not root.isValid:
return err(FetchRootVidMissing)
elif root == VertexID(1):
return err(FetchAccRootNotAccepted)
elif LEAST_FREE_VID <= root.distinctBase:
return err(FetchStoRootNotAccepted)
ok()

proc retrieveLeaf(
db: AristoDbRef;
root: VertexID;
Expand Down Expand Up @@ -260,38 +248,6 @@ proc hasPathAccount*(
##
db.hasAccountPayload(accPath)

proc fetchGenericData*(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[seq[byte],AristoError] =
## For a generic sub-tree starting at `root`, fetch the data record
## indexed by `path`.
##
? root.mustBeGeneric()
let pyl = ? db.retrieveLeaf(root, path)
assert pyl.lData.pType == RawData # debugging only
ok pyl.lData.rawBlob

proc fetchGenericState*(
db: AristoDbRef;
root: VertexID;
updateOk: bool;
): Result[Hash32,AristoError] =
## Fetch the Merkle hash of the argument `root`.
db.retrieveMerkleHash(root, updateOk)

proc hasPathGeneric*(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[bool,AristoError] =
## For a generic sub-tree starting at `root` and indexed by `path`, query
## whether this record exists on the database.
##
? root.mustBeGeneric()
db.hasPayload(root, path)

proc fetchStorageData*(
db: AristoDbRef;
accPath: Hash32;
Expand Down
31 changes: 0 additions & 31 deletions nimbus/db/aristo/aristo_merge.nim
Original file line number Diff line number Diff line change
Expand Up @@ -202,37 +202,6 @@ proc mergeAccountRecord*(

ok true

proc mergeGenericData*(
db: AristoDbRef; # Database, top layer
root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database
data: openArray[byte]; # Raw data payload value
): Result[bool,AristoError] =
## Variant of `mergeXXX()` for generic sub-trees, i.e. for arguments
## `root` greater than `VertexID(1)` and smaller than `LEAST_FREE_VID`.
##
## On success, the function returns `true` if the `data` argument was merged
## into the database ot updated, and `false` if it was on the database
## already.
##
# Verify that `root` is neither an accounts tree nor a strorage tree.
if not root.isValid:
return err(MergeRootVidMissing)
elif root == VertexID(1):
return err(MergeAccRootNotAccepted)
elif LEAST_FREE_VID <= root.distinctBase:
return err(MergeStoRootNotAccepted)

let
pyl = LeafPayload(pType: RawData, rawBlob: @data)

discard db.mergePayloadImpl(root, path, Opt.none(VertexRef), pyl).valueOr:
if error == MergeNoAction:
return ok false
return err error

ok true

proc mergeStorageData*(
db: AristoDbRef; # Database, top layer
accPath: Hash32; # Needed for accounts payload
Expand Down
11 changes: 0 additions & 11 deletions nimbus/db/aristo/aristo_nearby.nim
Original file line number Diff line number Diff line change
Expand Up @@ -439,17 +439,6 @@ iterator rightPairsAccount*(
for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start):
yield (lty.path, pyl.account)

iterator rightPairsGeneric*(
db: AristoDbRef; # Database layer
root: VertexID; # Generic root (different from VertexID)
start = low(PathID); # Before or at first value
): (PathID,seq[byte]) =
## Variant of `rightPairs()` for a generic tree
# Verify that `root` is neither from an accounts tree nor a strorage tree.
if VertexID(1) < root and root.distinctBase < LEAST_FREE_VID:
for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start):
yield (lty.path, pyl.rawBlob)

iterator rightPairsStorage*(
db: AristoDbRef; # Database layer
accPath: Hash32; # Account the storage data belong to
Expand Down
31 changes: 0 additions & 31 deletions nimbus/db/aristo/aristo_part.nim
Original file line number Diff line number Diff line change
Expand Up @@ -366,37 +366,6 @@ proc partReRoot*(
# Public merge functions on partial tree database
# ------------------------------------------------------------------------------

proc partMergeGenericData*(
ps: PartStateRef;
root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database
data: openArray[byte]; # Raw data payload value
): Result[bool,AristoError] =
## ..
let mergeError = block:
# Opportunistically try whether it just works
let rc = ps.db.mergeGenericData(root, path, data)
if rc.isOk or rc.error != GetVtxNotFound:
return rc
rc.error

# Otherwise clean the way removing blind link and retry
let
ctx = ps.ctxMergeBegin(root, path).valueOr:
let ctxErr = if error == PartCtxNotAvailable: mergeError else: error
return err(ctxErr)
rc = ps.db.mergeGenericData(root, path, data)

# Evaluate result => commit/rollback
if rc.isErr:
? ctx.ctxMergeRollback()
return rc
if not ? ctx.ctxMergeCommit():
return err(PartVtxSlotWasNotModified)

ok(rc.value)


proc partMergeAccountRecord*(
ps: PartStateRef;
accPath: Hash32; # Even nibbled byte path
Expand Down
21 changes: 12 additions & 9 deletions nimbus/db/aristo/aristo_part/part_helpers.nim
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,15 @@ proc read(rlp: var Rlp; T: type PrfNode): T {.gcsafe, raises: [RlpError].} =
let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0]
if isLeaf:
return PrfNode(
prfType: ignore,

vtx: VertexRef(
vType: Leaf,
pfx: pathSegment,
lData: LeafPayload(
pType: RawData,
rawBlob: blobs[1])))
prfType: ignore, )

# TODO interpret the blob (?)
# vtx: VertexRef(
# vType: Leaf,
# pfx: pathSegment,
# lData: LeafPayload(
# pType: RawData,
# rawBlob: blobs[1])))
else:
var node = PrfNode(
prfType: isExtension,
Expand Down Expand Up @@ -145,7 +146,9 @@ func toNodesTab*(
# Decode payload to deficated format for storage or accounts
var pyl: PrfPayload
try:
pyl = rlp.decode(nd.vtx.lData.rawBlob, PrfPayload)
# TODO interpret the blob
# pyl = rlp.decode(nd.vtx.lData.rawBlob, PrfPayload)
pyl = PrfPayload(prfType: isError, error: PartRlpPayloadException)
except RlpError:
pyl = PrfPayload(prfType: isError, error: PartRlpPayloadException)

Expand Down
2 changes: 0 additions & 2 deletions nimbus/db/aristo/aristo_serialise.nim
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ proc serialise(
## of account type, otherwise pass the data as is.
##
case pyl.pType:
of RawData:
ok pyl.rawBlob
of AccountData:
let key = block:
if pyl.stoID.isValid:
Expand Down
2 changes: 0 additions & 2 deletions tests/test_aristo/test_blobify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import unittest2, ../../nimbus/db/aristo/aristo_blobify
suite "Aristo blobify":
test "VertexRef roundtrip":
let
leafRawData = VertexRef(vType: Leaf, lData: LeafPayload(pType: RawData))
leafAccount = VertexRef(vType: Leaf, lData: LeafPayload(pType: AccountData))
leafStoData =
VertexRef(vType: Leaf, lData: LeafPayload(pType: StoData, stoData: 42.u256))
Expand Down Expand Up @@ -65,7 +64,6 @@ suite "Aristo blobify":
)

check:
deblobify(blobify(leafRawData), VertexRef)[] == leafRawData
deblobify(blobify(leafAccount), VertexRef)[] == leafAccount
deblobify(blobify(leafStoData), VertexRef)[] == leafStoData
deblobify(blobify(branch), VertexRef)[] == branch
Expand Down
Loading

0 comments on commit 58cde36

Please sign in to comment.