Skip to content

Commit

Permalink
avoid some trivial memory allocations (#2587)
Browse files Browse the repository at this point in the history
* pre-allocate `blobify` data and remove redundant error handling
(cannot fail on correct data)
* use threadvar for temporary storage when decoding rdb, avoiding
closure env
* speed up database walkers by avoiding many temporaries

~5% perf improvement on block import, 100x on database iteration (useful
for building analysis tooling)
  • Loading branch information
arnetheduck committed Sep 2, 2024
1 parent a25ea63 commit ef1bab0
Show file tree
Hide file tree
Showing 12 changed files with 100 additions and 85 deletions.
50 changes: 27 additions & 23 deletions nimbus/db/aristo/aristo_blobify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -203,12 +203,12 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
if vtx.ePfx.len > 0:
vtx.ePfx.toHexPrefix(isleaf = false)
else:
@[]
default(HexPrefixBuf)
psLen = pSegm.len.byte
if 33 < psLen:
return err(BlobifyExtPathOverflow)

data &= pSegm
data &= pSegm.data()
data &= lens.toBytesBE
data &= [0x80u8 or psLen]

Expand All @@ -219,16 +219,16 @@ proc blobifyTo*(vtx: VertexRef; data: var Blob): Result[void,AristoError] =
if psLen == 0 or 33 < psLen:
return err(BlobifyLeafPathOverflow)
vtx.lData.blobifyTo(data)
data &= pSegm
data &= pSegm.data()
data &= [0xC0u8 or psLen]

ok()

proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
proc blobify*(vtx: VertexRef): Blob =
## Variant of `blobify()`
var data: Blob
? vtx.blobifyTo data
ok(move(data))
result = newSeqOfCap[byte](128)
if vtx.blobifyTo(result).isErr:
result.setLen(0) # blobify only fails on invalid verticies

proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
## Serialise a last saved state record
Expand All @@ -246,45 +246,48 @@ proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
# -------------
proc deblobify(
data: openArray[byte];
T: type LeafPayload;
): Result[LeafPayload,AristoError] =
pyl: var LeafPayload;
): Result[void,AristoError] =
if data.len == 0:
return ok LeafPayload(pType: RawData)
pyl = LeafPayload(pType: RawData)
return ok()

let mask = data[^1]
if (mask and 0x10) > 0: # unstructured payload
return ok LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
return ok()

if (mask and 0x20) > 0: # Slot storage data
return ok LeafPayload(
pyl = LeafPayload(
pType: StoData,
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
return ok()

pyl = LeafPayload(pType: AccountData)
var
pAcc = LeafPayload(pType: AccountData)
start = 0
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))

if (mask and 0x01) > 0:
let len = lens and 0b111
pAcc.account.nonce = ? load64(data, start, int(len + 1))
pyl.account.nonce = ? load64(data, start, int(len + 1))

if (mask and 0x02) > 0:
let len = (lens shr 3) and 0b11111
pAcc.account.balance = ? load256(data, start, int(len + 1))
pyl.account.balance = ? load256(data, start, int(len + 1))

if (mask and 0x04) > 0:
let len = (lens shr 8) and 0b111
pAcc.stoID = (true, VertexID(? load64(data, start, int(len + 1))))
pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1))))

if (mask and 0x08) > 0:
if data.len() < start + 32:
return err(DeblobCodeLenUnsupported)
discard pAcc.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31))
else:
pAcc.account.codeHash = EMPTY_CODE_HASH
pyl.account.codeHash = EMPTY_CODE_HASH

ok(pAcc)
ok()

proc deblobify*(
record: openArray[byte];
Expand Down Expand Up @@ -336,11 +339,12 @@ proc deblobify*(
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
if not isLeaf:
return err(DeblobLeafGotExtPrefix)
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(LeafPayload)
VertexRef(
let vtx = VertexRef(
vType: Leaf,
lPfx: pathSegment,
lData: pyl)
lPfx: pathSegment)

? record.toOpenArray(0, pLen - 1).deblobify(vtx.lData)
vtx

else:
return err(DeblobUnknown)
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/aristo/aristo_compute.nim
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ proc computeKeyImpl(
case vtx.vType:
of Leaf:
writer.startList(2)
writer.append(vtx.lPfx.toHexPrefix(isLeaf = true))
writer.append(vtx.lPfx.toHexPrefix(isLeaf = true).data())

case vtx.lData.pType
of AccountData:
Expand Down Expand Up @@ -111,7 +111,7 @@ proc computeKeyImpl(
writeBranch(bwriter)

writer.startList(2)
writer.append(vtx.ePfx.toHexPrefix(isleaf = false))
writer.append(vtx.ePfx.toHexPrefix(isleaf = false).data())
writer.append(bwriter.finish().digestTo(HashKey))
else:
writeBranch(writer)
Expand Down
31 changes: 18 additions & 13 deletions nimbus/db/aristo/aristo_desc/desc_nibbles.nim
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,20 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.

import stew/arrayops
import stew/[arraybuf, arrayops]

type NibblesBuf* = object
## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the
## Ethereum MPT
bytes: array[32, byte]
ibegin, iend: int8
# Where valid nibbles can be found - we use indices here to avoid copies
# wen slicing - iend not inclusive
export arraybuf

type
NibblesBuf* = object
## Allocation-free type for storing up to 64 4-bit nibbles, as seen in the
## Ethereum MPT
bytes: array[32, byte]
ibegin, iend: int8
# Where valid nibbles can be found - we use indices here to avoid copies
# wen slicing - iend not inclusive

HexPrefixBuf* = ArrayBuf[33, byte]

func high*(T: type NibblesBuf): int =
63
Expand Down Expand Up @@ -61,7 +66,7 @@ func `$`*(r: NibblesBuf): string =
const chars = "0123456789abcdef"
result.add chars[r[i]]

func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf =
func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf {.noinit.} =
result.bytes = r.bytes
result.ibegin = r.ibegin + ibegin.int8
let e =
Expand All @@ -75,7 +80,7 @@ func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf =
template writeFirstByte(nibbleCountExpr) {.dirty.} =
let nibbleCount = nibbleCountExpr
var oddnessFlag = (nibbleCount and 1) != 0
newSeq(result, (nibbleCount div 2) + 1)
result.setLen((nibbleCount div 2) + 1)
result[0] = byte((int(isLeaf) * 2 + int(oddnessFlag)) shl 4)
var writeHead = 0

Expand All @@ -89,11 +94,11 @@ template writeNibbles(r) {.dirty.} =
result[writeHead] = nextNibble shl 4
oddnessFlag = not oddnessFlag

func toHexPrefix*(r: NibblesBuf, isLeaf = false): seq[byte] =
func toHexPrefix*(r: NibblesBuf, isLeaf = false): HexPrefixBuf =
writeFirstByte(r.len)
writeNibbles(r)

func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): seq[byte] =
func toHexPrefix*(r1, r2: NibblesBuf, isLeaf = false): HexPrefixBuf =
writeFirstByte(r1.len + r2.len)
writeNibbles(r1)
writeNibbles(r2)
Expand Down Expand Up @@ -131,7 +136,7 @@ func fromHexPrefix*(
else:
result.isLeaf = false

func `&`*(a, b: NibblesBuf): NibblesBuf =
func `&`*(a, b: NibblesBuf): NibblesBuf {.noinit.} =
for i in 0 ..< a.len:
result[i] = a[i]

Expand Down
9 changes: 1 addition & 8 deletions nimbus/db/aristo/aristo_init/memory_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -133,14 +133,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
let hdl = hdl.getSession db
if hdl.error.isNil:
if vtx.isValid:
let rc = vtx.blobify()
if rc.isErr:
hdl.error = TypedPutHdlErrRef(
pfx: VtxPfx,
vid: rvid.vid,
code: rc.error)
return
hdl.sTab[rvid] = rc.value
hdl.sTab[rvid] = vtx.blobify()
else:
hdl.sTab[rvid] = EmptyBlob

Expand Down
6 changes: 2 additions & 4 deletions nimbus/db/aristo/aristo_init/rocks_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -312,10 +312,8 @@ iterator walkVtx*(
be: RdbBackendRef;
): tuple[evid: RootedVertexID, vtx: VertexRef] =
## Variant of `walk()` iteration over the vertex sub-table.
for (rvid, data) in be.rdb.walkVtx:
let rc = data.deblobify VertexRef
if rc.isOk:
yield (rvid, rc.value)
for (rvid, vtx) in be.rdb.walkVtx:
yield (rvid, vtx)

iterator walkKey*(
be: RdbBackendRef;
Expand Down
20 changes: 10 additions & 10 deletions nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ proc getKey*(
return ok(move(rc.value))

# Otherwise fetch from backend database
var res: Result[HashKey,(AristoError,string)]
# A threadvar is used to avoid allocating an environment for onData
var res{.threadvar.}: Opt[HashKey]
let onData = proc(data: openArray[byte]) =
res = HashKey.fromBytes(data).mapErr(proc(): auto =
(RdbHashKeyExpected,""))
res = HashKey.fromBytes(data)

let gotData = rdb.keyCol.get(rvid.blobify().data(), onData).valueOr:
const errSym = RdbBeDriverGetKeyError
Expand All @@ -76,9 +76,9 @@ proc getKey*(

# Correct result if needed
if not gotData:
res = ok(VOID_HASH_KEY)
res.ok(VOID_HASH_KEY)
elif res.isErr():
return res # Parsing failed
return err((RdbHashKeyExpected,"")) # Parsing failed

# Update cache and return
ok rdb.rdKeyLru.lruAppend(rvid.vid, res.value(), RdKeyLruMaxSize)
Expand All @@ -93,10 +93,10 @@ proc getVtx*(
return ok(move(rc.value))

# Otherwise fetch from backend database
var res: Result[VertexRef,(AristoError,string)]
# A threadvar is used to avoid allocating an environment for onData
var res {.threadvar.}: Result[VertexRef,AristoError]
let onData = proc(data: openArray[byte]) =
res = data.deblobify(VertexRef).mapErr(proc(error: AristoError): auto =
(error,""))
res = data.deblobify(VertexRef)

let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr:
const errSym = RdbBeDriverGetVtxError
Expand All @@ -105,9 +105,9 @@ proc getVtx*(
return err((errSym,error))

if not gotData:
res = ok(VertexRef(nil))
res.ok(VertexRef(nil))
elif res.isErr():
return res # Parsing failed
return err((res.error(), "Parsing failed")) # Parsing failed

# Update cache and return
ok rdb.rdVtxLru.lruAppend(rvid.vid, res.value(), RdVtxLruMaxSize)
Expand Down
7 changes: 1 addition & 6 deletions nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim
Original file line number Diff line number Diff line change
Expand Up @@ -122,12 +122,7 @@ proc putVtx*(
): Result[void,(VertexID,AristoError,string)] =
let dsc = rdb.session
if vtx.isValid:
let rc = vtx.blobify()
if rc.isErr:
# Caller must `rollback()` which will flush the `rdVtxLru` cache
return err((rvid.vid,rc.error,""))

dsc.put(rvid.blobify().data(), rc.value, rdb.vtxCol.handle()).isOkOr:
dsc.put(rvid.blobify().data(), vtx.blobify(), rdb.vtxCol.handle()).isOkOr:
# Caller must `rollback()` which will flush the `rdVtxLru` cache
const errSym = RdbBeDriverPutVtxError
when extraTraceMessages:
Expand Down
36 changes: 28 additions & 8 deletions nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ iterator walkKey*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
yield (rvid, val)


iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
## Walk over key-value pairs of the hash key column of the database.
iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: VertexRef] =
## Walk over key-value pairs of the vertex column of the database.
##
## Non-decodable entries are are ignored.
##
Expand All @@ -84,12 +84,32 @@ iterator walkVtx*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: Blob] =
break walkBody
defer: rit.close()

for (key,val) in rit.pairs:
if val.len != 0:
let rvid = key.deblobify(RootedVertexID).valueOr:
continue

yield (rvid, val)
rit.seekToFirst()
var key: RootedVertexID
var value: VertexRef
while rit.isValid():
var valid = true
rit.key(
proc(data: openArray[byte]) =
key = deblobify(data, RootedVertexID).valueOr:
valid = false
default(RootedVertexID)
)
if not valid:
continue

rit.value(
proc(data: openArray[byte]) =
value = deblobify(data, VertexRef).valueOr:
valid = false
default(VertexRef)
)
if not valid:
continue

rit.next()
yield (key, value)
rit.close()

# ------------------------------------------------------------------------------
# End
Expand Down
8 changes: 4 additions & 4 deletions nimbus/db/aristo/aristo_serialise.nim
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T =

var wrx = initRlpWriter()
wrx.startList(2)
wrx.append node.ePfx.toHexPrefix(isleaf = false)
wrx.append node.ePfx.toHexPrefix(isleaf = false).data()
wrx.append brHash

result.add wrx.finish()
Expand All @@ -104,7 +104,7 @@ proc to*(node: NodeRef; T: type seq[Blob]): T =

var wr = initRlpWriter()
wr.startList(2)
wr.append node.lPfx.toHexPrefix(isleaf = true)
wr.append node.lPfx.toHexPrefix(isleaf = true).data()
wr.append node.lData.serialise(getKey0).value

result.add (wr.finish())
Expand All @@ -127,7 +127,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
let brHash = wr.finish().digestTo(HashKey)
wr = initRlpWriter()
wr.startList(2)
wr.append node.ePfx.toHexPrefix(isleaf = false)
wr.append node.ePfx.toHexPrefix(isleaf = false).data()
wr.append brHash

of Leaf:
Expand All @@ -138,7 +138,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
ok(node.key[0]) # always succeeds

wr.startList(2)
wr.append node.lPfx.toHexPrefix(isleaf = true)
wr.append node.lPfx.toHexPrefix(isleaf = true).data()
wr.append node.lData.serialise(getKey0).value

wr.finish().digestTo(HashKey)
Expand Down
10 changes: 5 additions & 5 deletions tests/test_aristo/test_blobify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ suite "Aristo blobify":
)

check:
deblobify(blobify(leafRawData)[], VertexRef)[] == leafRawData
deblobify(blobify(leafAccount)[], VertexRef)[] == leafAccount
deblobify(blobify(leafStoData)[], VertexRef)[] == leafStoData
deblobify(blobify(branch)[], VertexRef)[] == branch
deblobify(blobify(extension)[], VertexRef)[] == extension
deblobify(blobify(leafRawData), VertexRef)[] == leafRawData
deblobify(blobify(leafAccount), VertexRef)[] == leafAccount
deblobify(blobify(leafStoData), VertexRef)[] == leafStoData
deblobify(blobify(branch), VertexRef)[] == branch
deblobify(blobify(extension), VertexRef)[] == extension
Loading

0 comments on commit ef1bab0

Please sign in to comment.