diff --git a/.github/workflows/fluffy.yml b/.github/workflows/fluffy.yml index d8fd16ac34..e6f64bb1cc 100644 --- a/.github/workflows/fluffy.yml +++ b/.github/workflows/fluffy.yml @@ -80,6 +80,8 @@ jobs: - name: run test app with simulator run: | + : find / -name docker-compose -printf "%h\n%f\n%m\n\n" 2>/dev/null + PATH=$PATH$(find /usr/libexec/docker -name docker-compose -printf ":%h") SCENARIO="drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_client=10 --rate_to_server=10" docker-compose -f fluffy/tools/utp_testing/docker/docker-compose.yml up -d - name: wait 5 seconds for containers to start @@ -98,7 +100,9 @@ jobs: - name: Stop containers if: always() - run: docker-compose -f fluffy/tools/utp_testing/docker/docker-compose.yml down + run: | + PATH=$PATH$(find /usr/libexec/docker -name docker-compose -printf ":%h") + docker-compose -f fluffy/tools/utp_testing/docker/docker-compose.yml down build: strategy: diff --git a/fluffy/docs/the_fluffy_book/docs/fluffy-with-portal-hive.md b/fluffy/docs/the_fluffy_book/docs/fluffy-with-portal-hive.md index d8ceabc25c..f709e030ae 100644 --- a/fluffy/docs/the_fluffy_book/docs/fluffy-with-portal-hive.md +++ b/fluffy/docs/the_fluffy_book/docs/fluffy-with-portal-hive.md @@ -17,11 +17,11 @@ go build . Example commands for running test suites: ```sh -# Run the history tests with the 3 different clients -./hive --sim history --client fluffy,trin,ultralight +# Run the portal tests with only the fluffy client +./hive --sim portal --client fluffy -# Run the state tests with only the fluffy client -./hive --sim state --client fluffy +# Run the portal tests with the 3 different clients +./hive --sim portal --client fluffy,trin,ultralight # Access results through web-ui: ```sh diff --git a/fluffy/network/state/content/content_keys.nim b/fluffy/network/state/content/content_keys.nim index acaefbc800..9d1fe2732a 100644 --- a/fluffy/network/state/content/content_keys.nim +++ b/fluffy/network/state/content/content_keys.nim @@ -23,7 +23,7 @@ export ssz_serialization, common_types, hash, results type NodeHash* = KeccakHash CodeHash* = KeccakHash - Address* = EthAddress + AddressHash* = KeccakHash ContentType* = enum # Note: Need to add this unused value as a case object with an enum without @@ -43,12 +43,12 @@ type nodeHash*: NodeHash ContractTrieNodeKey* = object - address*: Address + addressHash*: AddressHash path*: Nibbles nodeHash*: NodeHash ContractCodeKey* = object - address*: Address + addressHash*: AddressHash codeHash*: CodeHash ContentKey* = object @@ -68,12 +68,15 @@ func init*(T: type AccountTrieNodeKey, path: Nibbles, nodeHash: NodeHash): T = AccountTrieNodeKey(path: path, nodeHash: nodeHash) func init*( - T: type ContractTrieNodeKey, address: Address, path: Nibbles, nodeHash: NodeHash + T: type ContractTrieNodeKey, + addressHash: AddressHash, + path: Nibbles, + nodeHash: NodeHash, ): T = - ContractTrieNodeKey(address: address, path: path, nodeHash: nodeHash) + ContractTrieNodeKey(addressHash: addressHash, path: path, nodeHash: nodeHash) -func init*(T: type ContractCodeKey, address: Address, codeHash: CodeHash): T = - ContractCodeKey(address: address, codeHash: codeHash) +func init*(T: type ContractCodeKey, addressHash: AddressHash, codeHash: CodeHash): T = + ContractCodeKey(addressHash: addressHash, codeHash: codeHash) func toContentKey*(key: AccountTrieNodeKey): ContentKey = ContentKey(contentType: accountTrieNode, accountTrieNodeKey: key) diff --git a/fluffy/network/state/state_endpoints.nim b/fluffy/network/state/state_endpoints.nim index d4efb8544b..78ff85f35c 100644 --- a/fluffy/network/state/state_endpoints.nim +++ b/fluffy/network/state/state_endpoints.nim @@ -67,7 +67,7 @@ proc getNextNodeHash( raiseAssert(e.msg) proc getAccountProof( - n: StateNetwork, stateRoot: KeccakHash, address: Address + n: StateNetwork, stateRoot: KeccakHash, address: EthAddress ): Future[Opt[TrieProof]] {.async: (raises: [CancelledError]).} = let nibbles = address.toPath().unpackNibbles() @@ -94,13 +94,14 @@ proc getAccountProof( Opt.some(proof) proc getStorageProof( - n: StateNetwork, storageRoot: KeccakHash, address: Address, storageKey: UInt256 + n: StateNetwork, storageRoot: KeccakHash, address: EthAddress, storageKey: UInt256 ): Future[Opt[TrieProof]] {.async: (raises: [CancelledError]).} = let nibbles = storageKey.toPath().unpackNibbles() var + addressHash = keccakHash(address) nibblesIdx = 0 - key = ContractTrieNodeKey.init(address, Nibbles.empty(), storageRoot) + key = ContractTrieNodeKey.init(addressHash, Nibbles.empty(), storageRoot) proof = TrieProof.empty() while nibblesIdx < nibbles.len(): @@ -116,12 +117,12 @@ proc getStorageProof( let (nextPath, nextNodeHash) = trieNode.getNextNodeHash(nibbles, nibblesIdx).valueOr: break - key = ContractTrieNodeKey.init(address, nextPath, nextNodeHash) + key = ContractTrieNodeKey.init(addressHash, nextPath, nextNodeHash) Opt.some(proof) proc getAccount( - n: StateNetwork, blockHash: BlockHash, address: Address + n: StateNetwork, blockHash: BlockHash, address: EthAddress ): Future[Opt[Account]] {.async: (raises: [CancelledError]).} = let stateRoot = (await n.getStateRootByBlockHash(blockHash)).valueOr: @@ -138,7 +139,7 @@ proc getAccount( # Used by: eth_getBalance, proc getBalance*( - n: StateNetwork, blockHash: BlockHash, address: Address + n: StateNetwork, blockHash: BlockHash, address: EthAddress ): Future[Opt[UInt256]] {.async: (raises: [CancelledError]).} = let account = (await n.getAccount(blockHash, address)).valueOr: return Opt.none(UInt256) @@ -147,7 +148,7 @@ proc getBalance*( # Used by: eth_getTransactionCount proc getTransactionCount*( - n: StateNetwork, blockHash: BlockHash, address: Address + n: StateNetwork, blockHash: BlockHash, address: EthAddress ): Future[Opt[AccountNonce]] {.async: (raises: [CancelledError]).} = let account = (await n.getAccount(blockHash, address)).valueOr: return Opt.none(AccountNonce) @@ -156,7 +157,7 @@ proc getTransactionCount*( # Used by: eth_getStorageAt proc getStorageAt*( - n: StateNetwork, blockHash: BlockHash, address: Address, slotKey: UInt256 + n: StateNetwork, blockHash: BlockHash, address: EthAddress, slotKey: UInt256 ): Future[Opt[UInt256]] {.async: (raises: [CancelledError]).} = let account = (await n.getAccount(blockHash, address)).valueOr: @@ -172,12 +173,12 @@ proc getStorageAt*( # Used by: eth_getCode proc getCode*( - n: StateNetwork, blockHash: BlockHash, address: Address + n: StateNetwork, blockHash: BlockHash, address: EthAddress ): Future[Opt[Bytecode]] {.async: (raises: [CancelledError]).} = let account = (await n.getAccount(blockHash, address)).valueOr: return Opt.none(Bytecode) - contractCodeKey = ContractCodeKey.init(address, account.codeHash) + contractCodeKey = ContractCodeKey.init(keccakHash(address), account.codeHash) let contractCodeRetrieval = (await n.getContractCode(contractCodeKey)).valueOr: warn "Failed to get contract code" diff --git a/fluffy/network/state/state_gossip.nim b/fluffy/network/state/state_gossip.nim index 4ad2ddc7df..3ff2e647c5 100644 --- a/fluffy/network/state/state_gossip.nim +++ b/fluffy/network/state/state_gossip.nim @@ -60,7 +60,9 @@ func getParent(p: ProofWithPath): ProofWithPath = # leaf or extension node so we need to remove one or more nibbles let (_, _, prefixNibbles) = decodePrefix(parentEndNode.listElem(0)) - parentProof.withPath(unpackedNibbles.dropN(prefixNibbles.len()).packNibbles()) + parentProof.withPath( + unpackedNibbles.dropN(prefixNibbles.unpackNibbles().len()).packNibbles() + ) except RlpError as e: raiseAssert(e.msg) @@ -79,7 +81,7 @@ func getParent*(offerWithKey: ContractTrieOfferWithKey): ContractTrieOfferWithKe (key, offer) = offerWithKey parent = offer.storageProof.withPath(key.path).getParent() parentKey = ContractTrieNodeKey.init( - key.address, parent.path, keccakHash(parent.proof[^1].asSeq()) + key.addressHash, parent.path, keccakHash(parent.proof[^1].asSeq()) ) parentOffer = ContractTrieNodeOffer.init(parent.proof, offer.accountProof, offer.blockHash) @@ -97,7 +99,7 @@ proc gossipOffer*( let req1Peers = await p.neighborhoodGossip( srcNodeId, ContentKeysList.init(@[keyBytes]), @[offerBytes] ) - info "Offered content gossipped successfully with peers", keyBytes, peers = req1Peers + debug "Offered content gossipped successfully with peers", keyBytes, peers = req1Peers proc gossipOffer*( p: PortalProtocol, @@ -110,7 +112,7 @@ proc gossipOffer*( let req1Peers = await p.neighborhoodGossip( srcNodeId, ContentKeysList.init(@[keyBytes]), @[offerBytes] ) - info "Offered content gossipped successfully with peers", keyBytes, peers = req1Peers + debug "Offered content gossipped successfully with peers", keyBytes, peers = req1Peers proc gossipOffer*( p: PortalProtocol, @@ -123,7 +125,7 @@ proc gossipOffer*( let peers = await p.neighborhoodGossip( srcNodeId, ContentKeysList.init(@[keyBytes]), @[offerBytes] ) - info "Offered content gossipped successfully with peers", keyBytes, peers + debug "Offered content gossipped successfully with peers", keyBytes, peers # Currently only used for testing to gossip an entire account trie proof # This may also be useful for the state network bridge @@ -134,12 +136,12 @@ proc recursiveGossipOffer*( offerBytes: seq[byte], key: AccountTrieNodeKey, offer: AccountTrieNodeOffer, -) {.async: (raises: [CancelledError]).} = +): Future[ContentKeyByteList] {.async: (raises: [CancelledError]).} = await gossipOffer(p, srcNodeId, keyBytes, offerBytes, key, offer) # root node, recursive gossip is finished if key.path.unpackNibbles().len() == 0: - return + return keyBytes # continue the recursive gossip by sharing the parent offer with peers let @@ -159,12 +161,12 @@ proc recursiveGossipOffer*( offerBytes: seq[byte], key: ContractTrieNodeKey, offer: ContractTrieNodeOffer, -) {.async: (raises: [CancelledError]).} = +): Future[ContentKeyByteList] {.async: (raises: [CancelledError]).} = await gossipOffer(p, srcNodeId, keyBytes, offerBytes, key, offer) # root node, recursive gossip is finished if key.path.unpackNibbles().len() == 0: - return + return keyBytes # continue the recursive gossip by sharing the parent offer with peers let diff --git a/fluffy/network/state/state_network.nim b/fluffy/network/state/state_network.nim index cdb0649aaf..acfa93cfdb 100644 --- a/fluffy/network/state/state_network.nim +++ b/fluffy/network/state/state_network.nim @@ -173,7 +173,7 @@ proc processOffer*( n.portalProtocol.storeContent( contentKeyBytes, contentId, contentValue.toRetrievalValue().encode() ) - info "Offered content validated successfully", contentKeyBytes + debug "Offered content validated successfully", contentKeyBytes await gossipOffer( n.portalProtocol, maybeSrcNodeId, contentKeyBytes, contentValueBytes, contentKey, diff --git a/fluffy/network/state/state_utils.nim b/fluffy/network/state/state_utils.nim index 76c7f94b4b..22872416c8 100644 --- a/fluffy/network/state/state_utils.nim +++ b/fluffy/network/state/state_utils.nim @@ -87,7 +87,7 @@ func removeLeafKeyEndNibbles*( func toPath*(hash: KeccakHash): Nibbles {.inline.} = Nibbles.init(hash.data, isEven = true) -func toPath*(address: Address): Nibbles {.inline.} = +func toPath*(address: EthAddress): Nibbles {.inline.} = keccakHash(address).toPath() func toPath*(slotKey: UInt256): Nibbles {.inline.} = diff --git a/fluffy/network/state/state_validation.nim b/fluffy/network/state/state_validation.nim index af69fb06e4..c28fa7e478 100644 --- a/fluffy/network/state/state_validation.nim +++ b/fluffy/network/state/state_validation.nim @@ -156,7 +156,7 @@ proc validateOffer*( ): Result[void, string] = ?validateTrieProof( trustedStateRoot, - key.address.toPath(), + key.addressHash.toPath(), offer.accountProof, allowKeyEndInPathForLeafs = true, ) @@ -172,7 +172,7 @@ proc validateOffer*( ): Result[void, string] = ?validateTrieProof( trustedStateRoot, - key.address.toPath(), + key.addressHash.toPath(), offer.accountProof, allowKeyEndInPathForLeafs = true, ) diff --git a/fluffy/tests/state_network_tests/state_test_helpers.nim b/fluffy/tests/state_network_tests/state_test_helpers.nim index 717f1fd2f1..db5f4808a8 100644 --- a/fluffy/tests/state_network_tests/state_test_helpers.nim +++ b/fluffy/tests/state_network_tests/state_test_helpers.nim @@ -26,17 +26,11 @@ export yaml_utils const testVectorDir* = "./vendor/portal-spec-tests/tests/mainnet/state/validation/" type - YamlTrieNodeRecursiveGossipKV* = ref object - content_key*: string - content_value_offer*: string - content_value_retrieval*: string - YamlTrieNodeKV* = object state_root*: string content_key*: string content_value_offer*: string content_value_retrieval*: string - recursive_gossip*: YamlTrieNodeRecursiveGossipKV YamlTrieNodeKVs* = seq[YamlTrieNodeKV] @@ -48,16 +42,6 @@ type YamlContractBytecodeKVs* = seq[YamlContractBytecodeKV] - YamlRecursiveGossipKV* = object - content_key*: string - content_value*: string - - YamlRecursiveGossipData* = object - state_root*: string - recursive_gossip*: seq[YamlRecursiveGossipKV] - - YamlRecursiveGossipKVs* = seq[YamlRecursiveGossipData] - func asNibbles*(key: openArray[byte], isEven = true): Nibbles = Nibbles.init(key, isEven) diff --git a/fluffy/tests/state_network_tests/test_state_content_keys_vectors.nim b/fluffy/tests/state_network_tests/test_state_content_keys_vectors.nim index fd5b656dff..76a44a536c 100644 --- a/fluffy/tests/state_network_tests/test_state_content_keys_vectors.nim +++ b/fluffy/tests/state_network_tests/test_state_content_keys_vectors.nim @@ -8,6 +8,7 @@ import unittest2, stew/byteutils, + eth/common, ../../network/state/state_content, ../../eth_data/yaml_utils @@ -58,10 +59,10 @@ suite "State Content Keys": raiseAssert "Cannot read test vector: " & error packedNibbles = packNibbles(testCase.path) - address = Address.fromHex(testCase.address) + addressHash = EthAddress.fromHex(testCase.address).keccakHash() nodeHash = NodeHash.fromHex(testCase.node_hash) contentKey = - ContractTrieNodeKey.init(address, packedNibbles, nodeHash).toContentKey() + ContractTrieNodeKey.init(addressHash, packedNibbles, nodeHash).toContentKey() encoded = contentKey.encode() check: @@ -73,7 +74,9 @@ suite "State Content Keys": decoded.isOk() decoded.value().contentType == contractTrieNode decoded.value().contractTrieNodeKey == - ContractTrieNodeKey(address: address, path: packedNibbles, nodeHash: nodeHash) + ContractTrieNodeKey( + addressHash: addressHash, path: packedNibbles, nodeHash: nodeHash + ) test "Encode/decode ContractCodeKey": const file = testVectorDir & "contract_bytecode_key.yaml" @@ -88,9 +91,9 @@ suite "State Content Keys": testCase = YamlContractBytecodeKey.loadFromYaml(file).valueOr: raiseAssert "Cannot read test vector: " & error - address = Address.fromHex(testCase.address) + addressHash = EthAddress.fromHex(testCase.address).keccakHash() codeHash = CodeHash.fromHex(testCase.code_hash) - contentKey = ContractCodeKey.init(address, codeHash).toContentKey() + contentKey = ContractCodeKey.init(addressHash, codeHash).toContentKey() encoded = contentKey.encode() check: @@ -101,7 +104,7 @@ suite "State Content Keys": check: decoded.isOk() decoded.value().contentType == contractCode - decoded.value().contractCodeKey.address == address + decoded.value().contractCodeKey.addressHash == addressHash decoded.value().contractCodeKey.codeHash == codeHash test "Invalid prefix - 0 value": diff --git a/fluffy/tests/state_network_tests/test_state_endpoints_genesis.nim b/fluffy/tests/state_network_tests/test_state_endpoints_genesis.nim index 9292d4d860..af5c15969a 100644 --- a/fluffy/tests/state_network_tests/test_state_endpoints_genesis.nim +++ b/fluffy/tests/state_network_tests/test_state_endpoints_genesis.nim @@ -43,8 +43,8 @@ suite "State Endpoints - Genesis JSON Files": let proof = accountState.generateAccountProof(address) leafNode = proof[^1] - addressHash = keccakHash(address).data - path = removeLeafKeyEndNibbles(Nibbles.init(addressHash, true), leafNode) + addressHash = keccakHash(address) + path = removeLeafKeyEndNibbles(Nibbles.init(addressHash.data, true), leafNode) key = AccountTrieNodeKey.init(path, keccakHash(leafNode.asSeq())) offer = AccountTrieNodeOffer(proof: proof) @@ -94,8 +94,9 @@ suite "State Endpoints - Genesis JSON Files": block: # store the code let - key = - ContractCodeKey(address: address, codeHash: keccakHash(account.code)) + key = ContractCodeKey( + addressHash: addressHash, codeHash: keccakHash(account.code) + ) value = ContractCodeRetrieval(code: Bytecode.init(account.code)) let contentKey = key.toContentKey().encode() @@ -121,7 +122,9 @@ suite "State Endpoints - Genesis JSON Files": Nibbles.init(keccakHash(toBytesBE(slotKey)).data, true), leafNode ) key = ContractTrieNodeKey( - address: address, path: path, nodeHash: keccakHash(leafNode.asSeq()) + addressHash: addressHash, + path: path, + nodeHash: keccakHash(leafNode.asSeq()), ) offer = ContractTrieNodeOffer(storageProof: storageProof, accountProof: proof) diff --git a/fluffy/tests/state_network_tests/test_state_endpoints_vectors.nim b/fluffy/tests/state_network_tests/test_state_endpoints_vectors.nim index 6e2e39b19f..5a99770bc4 100644 --- a/fluffy/tests/state_network_tests/test_state_endpoints_vectors.nim +++ b/fluffy/tests/state_network_tests/test_state_endpoints_vectors.nim @@ -27,10 +27,10 @@ procSuite "State Endpoints": let rng = newRng() asyncTest "Gossip then query getBalance and getTransactionCount": - const file = testVectorDir / "recursive_gossip.yaml" + const file = testVectorDir / "account_trie_node.yaml" let - testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: + testCase = YamlTrieNodeKVs.loadFromYaml(file).valueOr: raiseAssert "Cannot read test vector: " & error stateNode1 = newStateNode(rng, STATE_NODE1_PORT) stateNode2 = newStateNode(rng, STATE_NODE2_PORT) @@ -45,16 +45,17 @@ procSuite "State Endpoints": (await stateNode2.portalProtocol().ping(stateNode1.localNode())).isOk() for i, testData in testCase: - if i == 1: + if i != 0 and i != 3: + # only using the leaf nodes from the test data continue let stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - leafData = testData.recursive_gossip[0] + leafData = testData contentKeyBytes = leafData.content_key.hexToSeqByte().ContentKeyByteList contentKey = ContentKey.decode(contentKeyBytes).get() contentId = toContentId(contentKeyBytes) - contentValueBytes = leafData.content_value.hexToSeqByte() + contentValueBytes = leafData.content_value_offer.hexToSeqByte() contentValue = AccountTrieNodeOffer.decode(contentValueBytes).get() # set valid state root @@ -62,7 +63,7 @@ procSuite "State Endpoints": stateNode2.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) # offer the leaf node - await stateNode1.portalProtocol.recursiveGossipOffer( + let rootKeyBytes = await stateNode1.portalProtocol.recursiveGossipOffer( Opt.none(NodeId), contentKeyBytes, contentValueBytes, @@ -70,16 +71,14 @@ procSuite "State Endpoints": contentValue, ) - # wait for recursive gossip to complete - for node in testData.recursive_gossip: - let keyBytes = node.content_key.hexToSeqByte().ContentKeyByteList - await stateNode2.waitUntilContentAvailable(toContentId(keyBytes)) + await stateNode1.waitUntilContentAvailable(toContentId(rootKeyBytes)) + await stateNode2.waitUntilContentAvailable(toContentId(rootKeyBytes)) let address = if i == 0: EthAddress.fromHex("0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2") - elif i == 2: + elif i == 3: EthAddress.fromHex("0x1584a2c066b7a455dbd6ae2807a7334e83c35fa5") else: raiseAssert("Invalid test case") @@ -134,11 +133,14 @@ procSuite "State Endpoints": asyncTest "Gossip then query getStorageAt and getCode": const - file = testVectorDir / "recursive_gossip.yaml" + accountTrieFile = testVectorDir / "account_trie_node.yaml" + contractTrieFile = testVectorDir / "contract_storage_trie_node.yaml" bytecodeFile = testVectorDir / "contract_bytecode.yaml" let - testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: + accountTrieTestCase = YamlTrieNodeKVs.loadFromYaml(accountTrieFile).valueOr: + raiseAssert "Cannot read test vector: " & error + contractTrieTestCase = YamlTrieNodeKVs.loadFromYaml(contractTrieFile).valueOr: raiseAssert "Cannot read test vector: " & error stateNode1 = newStateNode(rng, STATE_NODE1_PORT) stateNode2 = newStateNode(rng, STATE_NODE2_PORT) @@ -155,13 +157,13 @@ procSuite "State Endpoints": block: # seed the account data let - testData = testCase[0] + testData = accountTrieTestCase[0] stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - leafData = testData.recursive_gossip[0] + leafData = testData contentKeyBytes = leafData.content_key.hexToSeqByte().ContentKeyByteList contentKey = ContentKey.decode(contentKeyBytes).get() contentId = toContentId(contentKeyBytes) - contentValueBytes = leafData.content_value.hexToSeqByte() + contentValueBytes = leafData.content_value_offer.hexToSeqByte() contentValue = AccountTrieNodeOffer.decode(contentValueBytes).get() # set valid state root @@ -169,7 +171,7 @@ procSuite "State Endpoints": stateNode2.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) # offer the leaf node - await stateNode1.portalProtocol.recursiveGossipOffer( + let rootKeyBytes = await stateNode1.portalProtocol.recursiveGossipOffer( Opt.none(NodeId), contentKeyBytes, contentValueBytes, @@ -177,16 +179,19 @@ procSuite "State Endpoints": contentValue, ) + # wait for gossip to complete + await stateNode2.waitUntilContentAvailable(toContentId(rootKeyBytes)) + block: # seed the storage data let - testData = testCase[1] + testData = contractTrieTestCase[0] stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - leafData = testData.recursive_gossip[0] + leafData = testData contentKeyBytes = leafData.content_key.hexToSeqByte().ContentKeyByteList contentKey = ContentKey.decode(contentKeyBytes).get() contentId = toContentId(contentKeyBytes) - contentValueBytes = leafData.content_value.hexToSeqByte() + contentValueBytes = leafData.content_value_offer.hexToSeqByte() contentValue = ContractTrieNodeOffer.decode(contentValueBytes).get() # set valid state root @@ -194,7 +199,7 @@ procSuite "State Endpoints": stateNode2.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) # offer the leaf node - await stateNode1.portalProtocol.recursiveGossipOffer( + let storageRootKeyBytes = await stateNode1.portalProtocol.recursiveGossipOffer( Opt.none(NodeId), contentKeyBytes, contentValueBytes, @@ -202,10 +207,8 @@ procSuite "State Endpoints": contentValue, ) - # wait for recursive gossip to complete - for node in testData.recursive_gossip: - let keyBytes = node.content_key.hexToSeqByte().ContentKeyByteList - await stateNode2.waitUntilContentAvailable(toContentId(keyBytes)) + # wait for gossip to complete + await stateNode2.waitUntilContentAvailable(toContentId(storageRootKeyBytes)) let address = EthAddress.fromHex("0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2") diff --git a/fluffy/tests/state_network_tests/test_state_gossip_getparent_genesis.nim b/fluffy/tests/state_network_tests/test_state_gossip_getparent_genesis.nim index 8063a3c10d..29860743d0 100644 --- a/fluffy/tests/state_network_tests/test_state_gossip_getparent_genesis.nim +++ b/fluffy/tests/state_network_tests/test_state_gossip_getparent_genesis.nim @@ -68,7 +68,9 @@ suite "State Gossip getParent - Genesis JSON Files": (accountState, storageStates) = accounts.toState() for address, account in accounts: - let accountProof = accountState.generateAccountProof(address) + let + addressHash = address.keccakHash() + accountProof = accountState.generateAccountProof(address) if account.code.len() > 0: let storageState = storageStates[address] @@ -82,7 +84,9 @@ suite "State Gossip getParent - Genesis JSON Files": Nibbles.init(keccakHash(toBytesBE(slotKey)).data, true), leafNode ) key = ContractTrieNodeKey( - address: address, path: path, nodeHash: keccakHash(leafNode.asSeq()) + addressHash: addressHash, + path: path, + nodeHash: keccakHash(leafNode.asSeq()), ) offer = ContractTrieNodeOffer( storageProof: storageProof, accountProof: accountProof diff --git a/fluffy/tests/state_network_tests/test_state_gossip_getparent_vectors.nim b/fluffy/tests/state_network_tests/test_state_gossip_getparent_vectors.nim index 1e5ba94bf2..a77a13d418 100644 --- a/fluffy/tests/state_network_tests/test_state_gossip_getparent_vectors.nim +++ b/fluffy/tests/state_network_tests/test_state_gossip_getparent_vectors.nim @@ -6,12 +6,11 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[os, strutils], + std/os, results, unittest2, stew/byteutils, eth/common, - ../../common/common_utils, ../../network/state/[state_content, state_gossip], ./state_test_helpers @@ -23,119 +22,52 @@ suite "State Gossip getParent - Test Vectors": raiseAssert "Cannot read test vector: " & error for i, testData in testCase: - var stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - - let key = - ContentKey.decode(testData.content_key.hexToSeqByte().ContentKeyByteList).get() - let offer = - AccountTrieNodeOffer.decode(testData.content_value_offer.hexToSeqByte()).get() - - if i == 1: # second test case only has root node and no recursive gossip - doAssertRaises(AssertionDefect): - discard offer.withKey(key.accountTrieNodeKey).getParent() - continue - - let (parentKey, parentOffer) = offer.withKey(key.accountTrieNodeKey).getParent() - check: - parentKey.path.unpackNibbles().len() < - key.accountTrieNodeKey.path.unpackNibbles().len() - parentOffer.proof.len() == offer.proof.len() - 1 - parentKey.toContentKey().encode() == - testData.recursive_gossip.content_key.hexToSeqByte().ContentKeyByteList - parentOffer.encode() == - testData.recursive_gossip.content_value_offer.hexToSeqByte() - parentOffer.toRetrievalValue().encode() == - testData.recursive_gossip.content_value_retrieval.hexToSeqByte() - - test "Check contract storage trie node parent matches expected recursive gossip": - const file = testVectorDir / "contract_storage_trie_node.yaml" - - let testCase = YamlTrieNodeKVs.loadFromYaml(file).valueOr: - raiseAssert "Cannot read test vector: " & error - - for i, testData in testCase: - var stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - - let key = - ContentKey.decode(testData.content_key.hexToSeqByte().ContentKeyByteList).get() - let offer = - ContractTrieNodeOffer.decode(testData.content_value_offer.hexToSeqByte()).get() - - if i == 1: # second test case only has root node and no recursive gossip - doAssertRaises(AssertionDefect): - discard offer.withKey(key.contractTrieNodeKey).getParent() - continue - - let (parentKey, parentOffer) = offer.withKey(key.contractTrieNodeKey).getParent() - check: - parentKey.path.unpackNibbles().len() < - key.contractTrieNodeKey.path.unpackNibbles().len() - parentOffer.storageProof.len() == offer.storageProof.len() - 1 - parentKey.toContentKey().encode() == - testData.recursive_gossip.content_key.hexToSeqByte().ContentKeyByteList - parentOffer.encode() == - testData.recursive_gossip.content_value_offer.hexToSeqByte() - parentOffer.toRetrievalValue().encode() == - testData.recursive_gossip.content_value_retrieval.hexToSeqByte() - - test "Check each account trie node parent matches expected recursive gossip": - const file = testVectorDir / "recursive_gossip.yaml" - - let testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: - raiseAssert "Cannot read test vector: " & error - - for i, testData in testCase: - if i == 1: - continue - - for j in 0 ..< testData.recursive_gossip.high: + if i == 0 or i == 3: let + parentTestData = testCase[i + 1] key = ContentKey - .decode( - testData.recursive_gossip[j].content_key.hexToSeqByte().ContentKeyByteList - ) + .decode(testData.content_key.hexToSeqByte().ContentKeyByteList) .get() offer = AccountTrieNodeOffer - .decode(testData.recursive_gossip[j].content_value.hexToSeqByte()) + .decode(testData.content_value_offer.hexToSeqByte()) .get() - (parentKey, parentOffer) = offer.withKey(key.accountTrieNodeKey).getParent() + let (parentKey, parentOffer) = offer.withKey(key.accountTrieNodeKey).getParent() check: parentKey.path.unpackNibbles().len() < key.accountTrieNodeKey.path.unpackNibbles().len() parentOffer.proof.len() == offer.proof.len() - 1 parentKey.toContentKey().encode() == - testData.recursive_gossip[j + 1].content_key.hexToSeqByte().ContentKeyByteList - parentOffer.encode() == - testData.recursive_gossip[j + 1].content_value.hexToSeqByte() + parentTestData.content_key.hexToSeqByte().ContentKeyByteList + parentOffer.encode() == parentTestData.content_value_offer.hexToSeqByte() + parentOffer.toRetrievalValue().encode() == + parentTestData.content_value_retrieval.hexToSeqByte() - test "Check each contract trie node parent matches expected recursive gossip": - const file = testVectorDir / "recursive_gossip.yaml" + test "Check contract storage trie node parent matches expected recursive gossip": + const file = testVectorDir / "contract_storage_trie_node.yaml" - let testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: + let testCase = YamlTrieNodeKVs.loadFromYaml(file).valueOr: raiseAssert "Cannot read test vector: " & error for i, testData in testCase: - if i != 1: - continue - - for j in 0 ..< testData.recursive_gossip.high: + if i == 0: let + parentTestData = testCase[i + 1] key = ContentKey - .decode( - testData.recursive_gossip[j].content_key.hexToSeqByte().ContentKeyByteList - ) + .decode(testData.content_key.hexToSeqByte().ContentKeyByteList) .get() offer = ContractTrieNodeOffer - .decode(testData.recursive_gossip[j].content_value.hexToSeqByte()) + .decode(testData.content_value_offer.hexToSeqByte()) .get() - (parentKey, parentOffer) = offer.withKey(key.contractTrieNodeKey).getParent() + let (parentKey, parentOffer) = + offer.withKey(key.contractTrieNodeKey).getParent() check: parentKey.path.unpackNibbles().len() < key.contractTrieNodeKey.path.unpackNibbles().len() parentOffer.storageProof.len() == offer.storageProof.len() - 1 parentKey.toContentKey().encode() == - testData.recursive_gossip[j + 1].content_key.hexToSeqByte().ContentKeyByteList - parentOffer.encode() == - testData.recursive_gossip[j + 1].content_value.hexToSeqByte() + parentTestData.content_key.hexToSeqByte().ContentKeyByteList + parentOffer.encode() == parentTestData.content_value_offer.hexToSeqByte() + parentOffer.toRetrievalValue().encode() == + parentTestData.content_value_retrieval.hexToSeqByte() diff --git a/fluffy/tests/state_network_tests/test_state_gossip_gossipoffer_vectors.nim b/fluffy/tests/state_network_tests/test_state_gossip_gossipoffer_vectors.nim index 03bbdd4572..b81ea5de98 100644 --- a/fluffy/tests/state_network_tests/test_state_gossip_gossipoffer_vectors.nim +++ b/fluffy/tests/state_network_tests/test_state_gossip_gossipoffer_vectors.nim @@ -42,10 +42,11 @@ procSuite "State Gossip - Gossip Offer": (await stateNode1.portalProtocol().ping(stateNode2.localNode())).isOk() for i, testData in testCase: - if i == 1: - continue # skip scenario with no parent + if i != 0 and i != 3: + continue # skip scenarios with no parent let + parentTestData = testCase[i + 1] stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) contentKeyBytes = testData.content_key.hexToSeqByte().ContentKeyByteList contentKey = ContentKey.decode(contentKeyBytes).get() @@ -54,11 +55,10 @@ procSuite "State Gossip - Gossip Offer": contentValue = AccountTrieNodeOffer.decode(contentValueBytes).get() parentContentKeyBytes = - testData.recursive_gossip.content_key.hexToSeqByte().ContentKeyByteList + parentTestData.content_key.hexToSeqByte().ContentKeyByteList parentContentKey = ContentKey.decode(parentContentKeyBytes).get() parentContentId = toContentId(parentContentKeyBytes) - parentContentValueBytes = - testData.recursive_gossip.content_value_offer.hexToSeqByte() + parentContentValueBytes = parentTestData.content_value_offer.hexToSeqByte() parentContentValue = AccountTrieNodeOffer.decode(parentContentValueBytes).get() # set valid state root @@ -115,10 +115,11 @@ procSuite "State Gossip - Gossip Offer": (await stateNode1.portalProtocol().ping(stateNode2.localNode())).isOk() for i, testData in testCase: - if i == 1: - continue # skip scenario with no parent + if i != 0: + continue # skip scenarios with no parent let + parentTestData = testCase[i + 1] stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) contentKeyBytes = testData.content_key.hexToSeqByte().ContentKeyByteList contentKey = ContentKey.decode(contentKeyBytes).get() @@ -127,11 +128,10 @@ procSuite "State Gossip - Gossip Offer": contentValue = ContractTrieNodeOffer.decode(contentValueBytes).get() parentContentKeyBytes = - testData.recursive_gossip.content_key.hexToSeqByte().ContentKeyByteList + parentTestData.content_key.hexToSeqByte().ContentKeyByteList parentContentKey = ContentKey.decode(parentContentKeyBytes).get() parentContentId = toContentId(parentContentKeyBytes) - parentContentValueBytes = - testData.recursive_gossip.content_value_offer.hexToSeqByte() + parentContentValueBytes = parentTestData.content_value_offer.hexToSeqByte() parentContentValue = ContractTrieNodeOffer.decode(parentContentValueBytes).get() # set valid state root @@ -225,161 +225,3 @@ procSuite "State Gossip - Gossip Offer": await stateNode1.stop() await stateNode2.stop() - - asyncTest "Recursive gossip account trie nodes": - const file = testVectorDir / "recursive_gossip.yaml" - - let - testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: - raiseAssert "Cannot read test vector: " & error - stateNode1 = newStateNode(rng, STATE_NODE1_PORT) - stateNode2 = newStateNode(rng, STATE_NODE2_PORT) - - stateNode1.start() - stateNode2.start() - - check: - stateNode1.portalProtocol().addNode(stateNode2.localNode()) == Added - stateNode2.portalProtocol().addNode(stateNode1.localNode()) == Added - (await stateNode1.portalProtocol().ping(stateNode2.localNode())).isOk() - (await stateNode2.portalProtocol().ping(stateNode1.localNode())).isOk() - - for i, testData in testCase: - if i == 1: - continue - - let - stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - leafData = testData.recursive_gossip[0] - contentKeyBytes = leafData.content_key.hexToSeqByte().ContentKeyByteList - contentKey = ContentKey.decode(contentKeyBytes).get() - contentId = toContentId(contentKeyBytes) - contentValueBytes = leafData.content_value.hexToSeqByte() - contentValue = AccountTrieNodeOffer.decode(contentValueBytes).get() - - # set valid state root - stateNode1.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) - stateNode2.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) - - check not stateNode1.containsId(contentId) - check not stateNode2.containsId(contentId) - - # offer the leaf node - await stateNode1.portalProtocol.recursiveGossipOffer( - Opt.none(NodeId), - contentKeyBytes, - contentValueBytes, - contentKey.accountTrieNodeKey, - contentValue, - ) - - # wait for recursive gossip to complete - for node in testData.recursive_gossip: - let keyBytes = node.content_key.hexToSeqByte().ContentKeyByteList - await stateNode2.waitUntilContentAvailable(toContentId(keyBytes)) - - # check that all nodes were received by both state instances - for kv in testData.recursive_gossip: - let - expectedKeyBytes = kv.content_key.hexToSeqByte().ContentKeyByteList - expectedKey = ContentKey.decode(expectedKeyBytes).get() - expectedId = toContentId(expectedKeyBytes) - expectedValue = - AccountTrieNodeOffer.decode(kv.content_value.hexToSeqByte()).get() - res1 = await stateNode1.stateNetwork.getAccountTrieNode( - expectedKey.accountTrieNodeKey - ) - res2 = await stateNode2.stateNetwork.getAccountTrieNode( - expectedKey.accountTrieNodeKey - ) - check: - stateNode1.containsId(expectedId) - stateNode2.containsId(expectedId) - res1.isOk() - res1.get() == expectedValue.toRetrievalValue() - res1.get().node == expectedValue.toRetrievalValue().node - res2.isOk() - res2.get() == expectedValue.toRetrievalValue() - res2.get().node == expectedValue.toRetrievalValue().node - - await stateNode1.stop() - await stateNode2.stop() - - asyncTest "Recursive gossip contract trie nodes": - const file = testVectorDir / "recursive_gossip.yaml" - - let - testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: - raiseAssert "Cannot read test vector: " & error - stateNode1 = newStateNode(rng, STATE_NODE1_PORT) - stateNode2 = newStateNode(rng, STATE_NODE2_PORT) - - stateNode1.start() - stateNode2.start() - - check: - stateNode1.portalProtocol().addNode(stateNode2.localNode()) == Added - stateNode2.portalProtocol().addNode(stateNode1.localNode()) == Added - (await stateNode1.portalProtocol().ping(stateNode2.localNode())).isOk() - (await stateNode2.portalProtocol().ping(stateNode1.localNode())).isOk() - - for i, testData in testCase: - if i != 1: - continue - - let - stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - leafData = testData.recursive_gossip[0] - contentKeyBytes = leafData.content_key.hexToSeqByte().ContentKeyByteList - contentKey = ContentKey.decode(contentKeyBytes).get() - contentId = toContentId(contentKeyBytes) - contentValueBytes = leafData.content_value.hexToSeqByte() - contentValue = ContractTrieNodeOffer.decode(contentValueBytes).get() - - # set valid state root - stateNode1.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) - stateNode2.mockBlockHashToStateRoot(contentValue.blockHash, stateRoot) - - check not stateNode1.containsId(contentId) - check not stateNode2.containsId(contentId) - - # offer the leaf node - await stateNode1.portalProtocol.recursiveGossipOffer( - Opt.none(NodeId), - contentKeyBytes, - contentValueBytes, - contentKey.contractTrieNodeKey, - contentValue, - ) - - # wait for recursive gossip to complete - for node in testData.recursive_gossip: - let keyBytes = node.content_key.hexToSeqByte().ContentKeyByteList - await stateNode2.waitUntilContentAvailable(toContentId(keyBytes)) - - # check that all nodes were received by both state instances - for kv in testData.recursive_gossip: - let - expectedKeyBytes = kv.content_key.hexToSeqByte().ContentKeyByteList - expectedKey = ContentKey.decode(expectedKeyBytes).get() - expectedId = toContentId(expectedKeyBytes) - expectedValue = - ContractTrieNodeOffer.decode(kv.content_value.hexToSeqByte()).get() - res1 = await stateNode1.stateNetwork.getContractTrieNode( - expectedKey.contractTrieNodeKey - ) - res2 = await stateNode2.stateNetwork.getContractTrieNode( - expectedKey.contractTrieNodeKey - ) - check: - stateNode1.containsId(expectedId) - stateNode2.containsId(expectedId) - res1.isOk() - res1.get() == expectedValue.toRetrievalValue() - res1.get().node == expectedValue.toRetrievalValue().node - res2.isOk() - res2.get() == expectedValue.toRetrievalValue() - res2.get().node == expectedValue.toRetrievalValue().node - - await stateNode1.stop() - await stateNode2.stop() diff --git a/fluffy/tests/state_network_tests/test_state_validation_genesis.nim b/fluffy/tests/state_network_tests/test_state_validation_genesis.nim index e6150a0186..d6a321de8a 100644 --- a/fluffy/tests/state_network_tests/test_state_validation_genesis.nim +++ b/fluffy/tests/state_network_tests/test_state_validation_genesis.nim @@ -26,10 +26,10 @@ template checkValidProofsForExistingLeafs( acc.codeHash = keccakHash(account.code) let + addressHash = address.keccakHash() accountProof = accountState.generateAccountProof(address) - accountPath = removeLeafKeyEndNibbles( - Nibbles.init(keccakHash(address).data, true), accountProof[^1] - ) + accountPath = + removeLeafKeyEndNibbles(Nibbles.init(addressHash.data, true), accountProof[^1]) accountTrieNodeKey = AccountTrieNodeKey( path: accountPath, nodeHash: keccakHash(accountProof[^1].asSeq()) ) @@ -40,7 +40,8 @@ template checkValidProofsForExistingLeafs( check proofResult.isOk() let - contractCodeKey = ContractCodeKey(address: address, codeHash: acc.codeHash) + contractCodeKey = + ContractCodeKey(addressHash: addressHash, codeHash: acc.codeHash) contractCode = ContractCodeOffer(code: Bytecode.init(account.code), accountProof: accountProof) codeResult = @@ -58,7 +59,7 @@ template checkValidProofsForExistingLeafs( Nibbles.init(keccakHash(toBytesBE(slotKey)).data, true), storageProof[^1] ) contractTrieNodeKey = ContractTrieNodeKey( - address: address, + addressHash: addressHash, path: slotPath, nodeHash: keccakHash(storageProof[^1].asSeq()), ) @@ -80,10 +81,10 @@ template checkInvalidProofsWithBadValue( acc.codeHash = keccakHash(account.code) var + addressHash = address.keccakHash() accountProof = accountState.generateAccountProof(address) - accountPath = removeLeafKeyEndNibbles( - Nibbles.init(keccakHash(address).data, true), accountProof[^1] - ) + accountPath = + removeLeafKeyEndNibbles(Nibbles.init(addressHash.data, true), accountProof[^1]) accountTrieNodeKey = AccountTrieNodeKey( path: accountPath, nodeHash: keccakHash(accountProof[^1].asSeq()) ) @@ -96,7 +97,8 @@ template checkInvalidProofsWithBadValue( check proofResult.isErr() let - contractCodeKey = ContractCodeKey(address: address, codeHash: acc.codeHash) + contractCodeKey = + ContractCodeKey(addressHash: addressHash, codeHash: acc.codeHash) contractCode = ContractCodeOffer( code: Bytecode.init(@[1u8, 2, 3]), # bad code value accountProof: accountProof, @@ -116,7 +118,7 @@ template checkInvalidProofsWithBadValue( Nibbles.init(keccakHash(toBytesBE(slotKey)).data, true), storageProof[^1] ) contractTrieNodeKey = ContractTrieNodeKey( - address: address, + addressHash: addressHash, path: slotPath, nodeHash: keccakHash(storageProof[^1].asSeq()), ) diff --git a/fluffy/tests/state_network_tests/test_state_validation_vectors.nim b/fluffy/tests/state_network_tests/test_state_validation_vectors.nim index ca1ce7aa40..82b45ea8f0 100644 --- a/fluffy/tests/state_network_tests/test_state_validation_vectors.nim +++ b/fluffy/tests/state_network_tests/test_state_validation_vectors.nim @@ -150,27 +150,13 @@ suite "State Validation - Test Vectors": ) .isOk() - if i == 1: - continue # second test case only has root node and no recursive gossip - - let contentKey = ContentKey - .decode(testData.recursive_gossip.content_key.hexToSeqByte().ContentKeyByteList) - .get() - let contentValueOffer = AccountTrieNodeOffer - .decode(testData.recursive_gossip.content_value_offer.hexToSeqByte()) - .get() - - check: - validateOffer( - Opt.some(stateRoot), contentKey.accountTrieNodeKey, contentValueOffer - ) - .isOk() - test "Validate invalid AccountTrieNodeOffer nodes - bad state roots": const file = testVectorDir / "account_trie_node.yaml" const stateRoots = [ "0xBAD7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", "0xBAD7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", + "0xBAD7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", + "0xBAD8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544", "0xBAD8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544", ] @@ -178,16 +164,16 @@ suite "State Validation - Test Vectors": raiseAssert "Cannot read test vector: " & error for i, testData in testCase: - var stateRoot = KeccakHash.fromBytes(stateRoots[i].hexToSeqByte()) - - let contentKey = - ContentKey.decode(testData.content_key.hexToSeqByte().ContentKeyByteList).get() - let contentValueOffer = - AccountTrieNodeOffer.decode(testData.content_value_offer.hexToSeqByte()).get() - - let res = validateOffer( - Opt.some(stateRoot), contentKey.accountTrieNodeKey, contentValueOffer - ) + let + stateRoot = KeccakHash.fromBytes(stateRoots[i].hexToSeqByte()) + contentKey = ContentKey + .decode(testData.content_key.hexToSeqByte().ContentKeyByteList) + .get() + contentValueOffer = + AccountTrieNodeOffer.decode(testData.content_value_offer.hexToSeqByte()).get() + res = validateOffer( + Opt.some(stateRoot), contentKey.accountTrieNodeKey, contentValueOffer + ) check: res.isErr() res.error() == "hash of proof root node doesn't match the expected root hash" @@ -216,7 +202,7 @@ suite "State Validation - Test Vectors": res.error() == "hash of proof root node doesn't match the expected root hash" for i, testData in testCase: - if i == 1: + if i == 2: continue # second test case only has root node var stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) @@ -275,27 +261,12 @@ suite "State Validation - Test Vectors": ) .isOk() - if i == 1: - continue # second test case has no recursive gossip - - let contentKey = ContentKey - .decode(testData.recursive_gossip.content_key.hexToSeqByte().ContentKeyByteList) - .get() - let contentValueOffer = ContractTrieNodeOffer - .decode(testData.recursive_gossip.content_value_offer.hexToSeqByte()) - .get() - - check: - validateOffer( - Opt.some(stateRoot), contentKey.contractTrieNodeKey, contentValueOffer - ) - .isOk() - test "Validate invalid ContractTrieNodeOffer nodes - bad state roots": const file = testVectorDir / "contract_storage_trie_node.yaml" const stateRoots = [ "0xBAD7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", "0xBAD7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", + "0xBAD7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", ] let testCase = YamlTrieNodeKVs.loadFromYaml(file).valueOr: @@ -525,58 +496,3 @@ suite "State Validation - Test Vectors": res.isErr() res.error() == "hash of bytecode doesn't match the code hash in the account proof" - - # Recursive gossip offer validation tests - - test "Validate valid AccountTrieNodeOffer recursive gossip nodes": - const file = testVectorDir / "recursive_gossip.yaml" - const stateRoots = [ - "0x1ad7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", - "0x1ad7b80af0c28bc1489513346d2706885be90abb07f23ca28e50482adb392d61", - "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544", - ] - - let testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: - raiseAssert "Cannot read test vector: " & error - - for i, testData in testCase: - if i == 1: - continue - - var stateRoot = KeccakHash.fromBytes(stateRoots[i].hexToSeqByte()) - - for kv in testData.recursive_gossip: - let contentKey = - ContentKey.decode(kv.content_key.hexToSeqByte().ContentKeyByteList).get() - let contentValueOffer = - AccountTrieNodeOffer.decode(kv.content_value.hexToSeqByte()).get() - - check: - validateOffer( - Opt.some(stateRoot), contentKey.accountTrieNodeKey, contentValueOffer - ) - .isOk() - - test "Validate valid ContractTrieNodeOffer recursive gossip nodes": - const file = testVectorDir / "recursive_gossip.yaml" - - let testCase = YamlRecursiveGossipKVs.loadFromYaml(file).valueOr: - raiseAssert "Cannot read test vector: " & error - - for i, testData in testCase: - if i != 1: - continue - - var stateRoot = KeccakHash.fromBytes(testData.state_root.hexToSeqByte()) - - for kv in testData.recursive_gossip: - let contentKey = - ContentKey.decode(kv.content_key.hexToSeqByte().ContentKeyByteList).get() - let contentValueOffer = - ContractTrieNodeOffer.decode(kv.content_value.hexToSeqByte()).get() - - check: - validateOffer( - Opt.some(stateRoot), contentKey.contractTrieNodeKey, contentValueOffer - ) - .isOk() diff --git a/fluffy/tools/portal_bridge/portal_bridge_conf.nim b/fluffy/tools/portal_bridge/portal_bridge_conf.nim index 10f84307cf..f9ea85d636 100644 --- a/fluffy/tools/portal_bridge/portal_bridge_conf.nim +++ b/fluffy/tools/portal_bridge/portal_bridge_conf.nim @@ -145,18 +145,33 @@ type desc: "The block number to start from", defaultValue: 1, name: "start-block" .}: uint64 - verifyState* {. - desc: "Verify the fetched state before gossiping it into the network", - defaultValue: true, - name: "verify-state" + verifyStateProofs* {. + desc: "Verify state proofs before gossiping them into the portal network", + defaultValue: false, + name: "verify-state-proofs" .}: bool - backfillState* {. - desc: "Backfill pre-merge state data into the network", + gossipGenesis* {. + desc: + "Enable gossip of the genesis state into the portal network when starting from block 1", defaultValue: true, - name: "backfill" + name: "gossip-genesis" + .}: bool + + verifyGossip* {. + desc: + "Enable verifying that the state was successfully gossipped by fetching it from the network", + defaultValue: false, + name: "verify-gossip" .}: bool + gossipWorkersCount* {. + desc: + "The number of workers to use for gossiping the state into the portal network", + defaultValue: 2, + name: "gossip-workers" + .}: uint + func parseCmdArg*(T: type TrustedDigest, input: string): T {.raises: [ValueError].} = TrustedDigest.fromHex(input) diff --git a/fluffy/tools/portal_bridge/portal_bridge_state.nim b/fluffy/tools/portal_bridge/portal_bridge_state.nim index 66a5dcf9c9..75e02b6060 100644 --- a/fluffy/tools/portal_bridge/portal_bridge_state.nim +++ b/fluffy/tools/portal_bridge/portal_bridge_state.nim @@ -12,6 +12,7 @@ import chronicles, chronos, stint, + json_serialization, stew/byteutils, web3/[eth_api, eth_api_types], results, @@ -65,7 +66,9 @@ proc getLastPersistedBlockNumber(db: DatabaseRef): Opt[uint64] = raiseAssert(e.msg) # Should never happen proc putLastPersistedBlockNumber(db: DatabaseRef, blockNumber: uint64) {.inline.} = - db.put(rlp.encode("lastPersistedBlockNumber"), rlp.encode(blockNumber)) + # Only update the last persisted block number if it's greater than the current one + if blockNumber > db.getLastPersistedBlockNumber().valueOr(0): + db.put(rlp.encode("lastPersistedBlockNumber"), rlp.encode(blockNumber)) proc runBackfillCollectBlockDataLoop( db: DatabaseRef, @@ -132,6 +135,8 @@ proc runBackfillBuildBlockOffersLoop( db: DatabaseRef, blockDataQueue: AsyncQueue[BlockData], blockOffersQueue: AsyncQueue[BlockOffersRef], + verifyStateProofs: bool, + gossipGenesis: bool, ) {.async: (raises: [CancelledError]).} = info "Starting state backfill build block offers loop" @@ -158,20 +163,21 @@ proc runBackfillBuildBlockOffersLoop( raiseAssert(e.msg) # Should never happen ws.applyGenesisAccounts(genesisAccounts) - let genesisBlockHash = KeccakHash.fromHex( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" - ) - var builder = OffersBuilderRef.init(ws, genesisBlockHash) - builder.buildBlockOffers() - - await blockOffersQueue.addLast( - BlockOffersRef( - blockNumber: 0.uint64, - accountTrieOffers: builder.getAccountTrieOffers(), - contractTrieOffers: builder.getContractTrieOffers(), - contractCodeOffers: builder.getContractCodeOffers(), + if gossipGenesis: + let genesisBlockHash = KeccakHash.fromHex( + "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" + ) + var builder = OffersBuilder.init(ws, genesisBlockHash) + builder.buildBlockOffers() + + await blockOffersQueue.addLast( + BlockOffersRef( + blockNumber: 0.uint64, + accountTrieOffers: builder.getAccountTrieOffers(), + contractTrieOffers: builder.getContractTrieOffers(), + contractCodeOffers: builder.getContractCodeOffers(), + ) ) - ) # Load the world state using the parent state root let worldState = WorldStateRef.init(db, firstBlock.parentStateRoot) @@ -204,9 +210,10 @@ proc runBackfillBuildBlockOffersLoop( trace "State diffs successfully applied to block number:", blockNumber = blockData.blockNumber - # worldState.verifyProofs(blockData.parentStateRoot, blockData.stateRoot) + if verifyStateProofs: + worldState.verifyProofs(blockData.parentStateRoot, blockData.stateRoot) - var builder = OffersBuilderRef.init(worldState, blockData.blockHash) + var builder = OffersBuilder.init(worldState, blockData.blockHash) builder.buildBlockOffers() await blockOffersQueue.addLast( @@ -223,50 +230,95 @@ proc runBackfillBuildBlockOffersLoop( # to enable restarting from this block if needed db.putLastPersistedBlockNumber(blockData.blockNumber) -proc gossipOffer( - portalClient: RpcClient, +proc collectOffer( + offersMap: TableRef[seq[byte], seq[byte]], offerWithKey: AccountTrieOfferWithKey | ContractTrieOfferWithKey | ContractCodeOfferWithKey, -) {.async: (raises: [CancelledError]).} = - let - keyBytes = offerWithKey.key.toContentKey().encode().asSeq() - offerBytes = offerWithKey.offer.encode() - try: - let numPeers = - await portalClient.portal_stateGossip(keyBytes.to0xHex(), offerBytes.to0xHex()) - debug "Gossiping offer to peers: ", offerKey = keyBytes.to0xHex(), numPeers - except CatchableError as e: - raiseAssert(e.msg) # Should never happen +) {.inline.} = + let keyBytes = offerWithKey.key.toContentKey().encode().asSeq() + offersMap[keyBytes] = offerWithKey.offer.encode() -proc recursiveGossipOffer( - portalClient: RpcClient, +proc recursiveCollectOffer( + offersMap: TableRef[seq[byte], seq[byte]], offerWithKey: AccountTrieOfferWithKey | ContractTrieOfferWithKey, -) {.async: (raises: [CancelledError]).} = - await portalClient.gossipOffer(offerWithKey) +) = + offersMap.collectOffer(offerWithKey) - # root node, recursive gossip is finished + # root node, recursive collect is finished if offerWithKey.key.path.unpackNibbles().len() == 0: return - # continue the recursive gossip by sharing the parent offer with peers - await portalClient.recursiveGossipOffer(offerWithKey.getParent()) + # continue the recursive collect + offersMap.recursiveCollectOffer(offerWithKey.getParent()) proc runBackfillGossipBlockOffersLoop( - blockOffersQueue: AsyncQueue[BlockOffersRef], portalClient: RpcClient + blockOffersQueue: AsyncQueue[BlockOffersRef], + portalClient: RpcClient, + verifyGossip: bool, + workerId: int, ) {.async: (raises: [CancelledError]).} = - info "Starting state backfill gossip block offers loop" + info "Starting state backfill gossip block offers loop", workerId + + var blockOffers = await blockOffersQueue.popFirst() while true: - let blockOffers = await blockOffersQueue.popFirst() + # A table of offer key, value pairs is used to filter out duplicates so + # that we don't gossip the same offer multiple times. + let offersMap = newTable[seq[byte], seq[byte]]() for offerWithKey in blockOffers.accountTrieOffers: - await portalClient.recursiveGossipOffer(offerWithKey) - + offersMap.recursiveCollectOffer(offerWithKey) for offerWithKey in blockOffers.contractTrieOffers: - await portalClient.recursiveGossipOffer(offerWithKey) - + offersMap.recursiveCollectOffer(offerWithKey) for offerWithKey in blockOffers.contractCodeOffers: - await portalClient.gossipOffer(offerWithKey) + offersMap.collectOffer(offerWithKey) + + var retryGossip = false + for k, v in offersMap: + try: + let numPeers = await portalClient.portal_stateGossip(k.to0xHex(), v.to0xHex()) + if numPeers == 0: + warn "Offer gossipped to no peers", workerId + retryGossip = true + break + except CatchableError as e: + error "Failed to gossip offer to peers", error = e.msg, workerId + retryGossip = true + break + + if retryGossip: + await sleepAsync(1.seconds) + warn "Retrying state gossip for block number: ", + blockNumber = blockOffers.blockNumber, workerId + continue + + if verifyGossip: + #await sleepAsync(100.milliseconds) # wait for the peers to be updated + for k, _ in offersMap: + try: + let contentInfo = + await portalClient.portal_stateRecursiveFindContent(k.to0xHex()) + if contentInfo.content.len() == 0: + error "Found empty contentValue", workerId + retryGossip = true + break + except CatchableError as e: + error "Failed to find content with key: ", + contentKey = k, error = e.msg, workerId + retryGossip = true + break + + if retryGossip: + await sleepAsync(1.seconds) + warn "Retrying state gossip for block number: ", + blockNumber = blockOffers.blockNumber + continue + + if blockOffers.blockNumber mod 1000 == 0: + info "Finished gossiping offers for block number: ", + workerId, blockNumber = blockOffers.blockNumber, offerCount = offersMap.len() + + blockOffers = await blockOffersQueue.popFirst() proc runBackfillMetricsLoop( blockDataQueue: AsyncQueue[BlockData], blockOffersQueue: AsyncQueue[BlockOffersRef] @@ -275,8 +327,12 @@ proc runBackfillMetricsLoop( while true: await sleepAsync(10.seconds) - info "Block data queue length: ", blockDataQueueLen = blockDataQueue.len() - info "Block offers queue length: ", blockOffersQueueLen = blockOffersQueue.len() + info "Block data queue metrics: ", + nextBlockNumber = blockDataQueue[0].blockNumber, + blockDataQueueLen = blockDataQueue.len() + info "Block offers queue metrics: ", + nextBlockNumber = blockOffersQueue[0].blockNumber, + blockOffersQueueLen = blockOffersQueue.len() proc runState*(config: PortalBridgeConf) = let @@ -289,47 +345,41 @@ proc runState*(config: PortalBridgeConf) = if web3Client of RpcHttpClient: warn "Using a WebSocket connection to the JSON-RPC API is recommended to improve performance" - # TODO: - # Here we'd want to implement initially a loop that backfills the state - # content. Secondly, a loop that follows the head and injects the latest - # state changes too. - # - # The first step would probably be the easier one to start with, as one - # can start from genesis state. - # It could be implemented by using the `exp_getProofsByBlockNumber` JSON-RPC - # method from nimbus-eth1. - # It could also be implemented by having the whole state execution happening - # inside the bridge, and getting the blocks from era1 files. - - if config.backfillState: - let maybeLastPersistedBlock = db.getLastPersistedBlockNumber() - if maybeLastPersistedBlock.isSome(): - info "Last persisted block found in the database: ", - lastPersistedBlock = maybeLastPersistedBlock.get() - if config.startBlockNumber < 1 or - config.startBlockNumber > maybeLastPersistedBlock.get(): - warn "Start block must be set to a value between 1 and the last persisted block" - quit QuitFailure - else: - info "No last persisted block found in the database" - if config.startBlockNumber != 1: - warn "Start block must be set to 1" - quit QuitFailure - - info "Starting state backfill from block number: ", - startBlockNumber = config.startBlockNumber - - const bufferSize = 1000 # Should we make this configurable? - let - blockDataQueue = newAsyncQueue[BlockData](bufferSize) - blockOffersQueue = newAsyncQueue[BlockOffersRef](bufferSize) - - asyncSpawn runBackfillCollectBlockDataLoop( - db, blockDataQueue, web3Client, config.startBlockNumber + let maybeLastPersistedBlock = db.getLastPersistedBlockNumber() + if maybeLastPersistedBlock.isSome(): + info "Last persisted block found in the database: ", + lastPersistedBlock = maybeLastPersistedBlock.get() + if config.startBlockNumber < 1 or + config.startBlockNumber > maybeLastPersistedBlock.get(): + warn "Start block must be set to a value between 1 and the last persisted block" + quit QuitFailure + else: + info "No last persisted block found in the database" + if config.startBlockNumber != 1: + warn "Start block must be set to 1" + quit QuitFailure + + info "Starting state backfill from block number: ", + startBlockNumber = config.startBlockNumber + + let + bufferSize = 1000 + blockDataQueue = newAsyncQueue[BlockData](bufferSize) + blockOffersQueue = newAsyncQueue[BlockOffersRef](bufferSize) + + asyncSpawn runBackfillCollectBlockDataLoop( + db, blockDataQueue, web3Client, config.startBlockNumber + ) + asyncSpawn runBackfillBuildBlockOffersLoop( + db, blockDataQueue, blockOffersQueue, config.verifyStateProofs, config.gossipGenesis + ) + + for workerId in 1 .. config.gossipWorkersCount.int: + asyncSpawn runBackfillGossipBlockOffersLoop( + blockOffersQueue, portalClient, config.verifyGossip, workerId ) - asyncSpawn runBackfillBuildBlockOffersLoop(db, blockDataQueue, blockOffersQueue) - asyncSpawn runBackfillGossipBlockOffersLoop(blockOffersQueue, portalClient) - asyncSpawn runBackfillMetricsLoop(blockDataQueue, blockOffersQueue) + + asyncSpawn runBackfillMetricsLoop(blockDataQueue, blockOffersQueue) while true: poll() diff --git a/fluffy/tools/portal_bridge/state_bridge/offers_builder.nim b/fluffy/tools/portal_bridge/state_bridge/offers_builder.nim index 7a19a3eeb4..91579a6976 100644 --- a/fluffy/tools/portal_bridge/state_bridge/offers_builder.nim +++ b/fluffy/tools/portal_bridge/state_bridge/offers_builder.nim @@ -13,28 +13,26 @@ import ../../../network/state/[state_content, state_utils, state_gossip], ./world_state -type OffersBuilderRef* = ref object +type OffersBuilder* = object worldState: WorldStateRef blockHash: BlockHash accountTrieOffers: seq[AccountTrieOfferWithKey] contractTrieOffers: seq[ContractTrieOfferWithKey] contractCodeOffers: seq[ContractCodeOfferWithKey] -proc init*( - T: type OffersBuilderRef, worldState: WorldStateRef, blockHash: BlockHash -): T = +proc init*(T: type OffersBuilder, worldState: WorldStateRef, blockHash: BlockHash): T = T(worldState: worldState, blockHash: blockHash) proc toTrieProof(proof: seq[seq[byte]]): TrieProof = TrieProof.init(proof.map((node) => TrieNode.init(node))) proc buildAccountTrieNodeOffer( - builder: var OffersBuilderRef, address: EthAddress, proof: TrieProof + builder: var OffersBuilder, addressHash: content_keys.AddressHash, proof: TrieProof ) = try: let path = removeLeafKeyEndNibbles( - Nibbles.init(worldState.toAccountKey(address).data, isEven = true), proof[^1] + Nibbles.init(addressHash.data, isEven = true), proof[^1] ) offerKey = AccountTrieNodeKey.init(path, keccakHash(proof[^1].asSeq())) offerValue = AccountTrieNodeOffer.init(proof, builder.blockHash) @@ -44,53 +42,65 @@ proc buildAccountTrieNodeOffer( raiseAssert(e.msg) # Should never happen proc buildContractTrieNodeOffer( - builder: var OffersBuilderRef, - address: EthAddress, + builder: var OffersBuilder, + addressHash: content_keys.AddressHash, slotHash: SlotKeyHash, storageProof: TrieProof, accountProof: TrieProof, ) = - let - path = Nibbles.init(slotHash.data, isEven = true) - offerKey = - ContractTrieNodeKey.init(address, path, keccakHash(storageProof[^1].asSeq())) - offerValue = - ContractTrieNodeOffer.init(storageProof, accountProof, builder.blockHash) + try: + let + path = removeLeafKeyEndNibbles( + Nibbles.init(slotHash.data, isEven = true), storageProof[^1] + ) + offerKey = ContractTrieNodeKey.init( + addressHash, path, keccakHash(storageProof[^1].asSeq()) + ) + offerValue = + ContractTrieNodeOffer.init(storageProof, accountProof, builder.blockHash) - builder.contractTrieOffers.add(offerValue.withKey(offerKey)) + builder.contractTrieOffers.add(offerValue.withKey(offerKey)) + except RlpError as e: + raiseAssert(e.msg) # Should never happen proc buildContractCodeOffer( - builder: var OffersBuilderRef, - address: EthAddress, + builder: var OffersBuilder, + addressHash: content_keys.AddressHash, code: seq[byte], accountProof: TrieProof, ) = let #bytecode = Bytelist.init(code) # This fails to compile for some reason bytecode = List[byte, MAX_BYTECODE_LEN](code) - offerKey = ContractCodeKey.init(address, keccakHash(code)) + offerKey = ContractCodeKey.init(addressHash, keccakHash(code)) offerValue = ContractCodeOffer.init(bytecode, accountProof, builder.blockHash) builder.contractCodeOffers.add(offerValue.withKey(offerKey)) -proc buildBlockOffers*(builder: var OffersBuilderRef) = - for address, proof in builder.worldState.updatedAccountProofs(): +proc buildBlockOffers*(builder: var OffersBuilder) = + for addressHash, proof in builder.worldState.updatedAccountProofs(): let accountProof = toTrieProof(proof) - builder.buildAccountTrieNodeOffer(address, accountProof) + builder.buildAccountTrieNodeOffer(addressHash, accountProof) - for slotHash, sProof in builder.worldState.updatedStorageProofs(address): + for slotHash, sProof in builder.worldState.updatedStorageProofs(addressHash): let storageProof = toTrieProof(sProof) - builder.buildContractTrieNodeOffer(address, slotHash, storageProof, accountProof) + builder.buildContractTrieNodeOffer( + addressHash, slotHash, storageProof, accountProof + ) - let code = builder.worldState.getUpdatedBytecode(address) + let code = builder.worldState.getUpdatedBytecode(addressHash) if code.len() > 0: - builder.buildContractCodeOffer(address, code, accountProof) + builder.buildContractCodeOffer(addressHash, code, accountProof) -proc getAccountTrieOffers*(builder: OffersBuilderRef): seq[AccountTrieOfferWithKey] = +proc getAccountTrieOffers*(builder: OffersBuilder): lent seq[AccountTrieOfferWithKey] = builder.accountTrieOffers -proc getContractTrieOffers*(builder: OffersBuilderRef): seq[ContractTrieOfferWithKey] = +proc getContractTrieOffers*( + builder: OffersBuilder +): lent seq[ContractTrieOfferWithKey] = builder.contractTrieOffers -proc getContractCodeOffers*(builder: OffersBuilderRef): seq[ContractCodeOfferWithKey] = +proc getContractCodeOffers*( + builder: OffersBuilder +): lent seq[ContractCodeOfferWithKey] = builder.contractCodeOffers diff --git a/fluffy/tools/portal_bridge/state_bridge/world_state.nim b/fluffy/tools/portal_bridge/state_bridge/world_state.nim index 56c5d482d3..38aed481ea 100644 --- a/fluffy/tools/portal_bridge/state_bridge/world_state.nim +++ b/fluffy/tools/portal_bridge/state_bridge/world_state.nim @@ -109,9 +109,7 @@ proc setAccountPreimage( ) = state.preimagesDb.put(rlp.encode(accountKey), rlp.encode(address)) -proc getAccount*(state: WorldStateRef, address: EthAddress): AccountState = - let accountKey = toAccountKey(address) - +proc getAccount(state: WorldStateRef, accountKey: AddressHash): AccountState = try: if state.accountsTrie.contains(accountKey.data): let accountBytes = state.accountsTrie.get(accountKey.data) @@ -121,6 +119,9 @@ proc getAccount*(state: WorldStateRef, address: EthAddress): AccountState = except RlpError as e: raiseAssert(e.msg) # should never happen unless the database is corrupted +proc getAccount*(state: WorldStateRef, address: EthAddress): AccountState {.inline.} = + state.getAccount(toAccountKey(address)) + proc setAccount*(state: WorldStateRef, address: EthAddress, accState: AccountState) = let accountKey = toAccountKey(address) state.setAccountPreimage(accountKey, address) @@ -166,7 +167,7 @@ proc deleteAccount*(state: WorldStateRef, address: EthAddress) = raiseAssert(e.msg) # should never happen unless the database is corrupted # Returns the account proofs for all the updated accounts from the last transaction -iterator updatedAccountProofs*(state: WorldStateRef): (EthAddress, seq[seq[byte]]) = +iterator updatedAccountProofs*(state: WorldStateRef): (AddressHash, seq[seq[byte]]) = let trie = initHexaryTrie( state.db.getAccountsUpdatedCache(), state.stateRoot(), isPruning = false ) @@ -175,16 +176,15 @@ iterator updatedAccountProofs*(state: WorldStateRef): (EthAddress, seq[seq[byte] for key in trie.keys(): if key.len() == 0: continue # skip the empty node created on initialization - let address = state.getAccountPreimage(KeccakHash.fromBytes(key)) - yield (address, trie.getBranch(key)) + yield (KeccakHash.fromBytes(key), trie.getBranch(key)) except RlpError as e: raiseAssert(e.msg) # should never happen unless the database is corrupted # Returns the storage proofs for the updated slots for the given account from the last transaction iterator updatedStorageProofs*( - state: WorldStateRef, address: EthAddress + state: WorldStateRef, accountKey: AddressHash ): (SlotKeyHash, seq[seq[byte]]) = - let accState = state.getAccount(address) + let accState = state.getAccount(accountKey) let trie = initHexaryTrie( state.db.getStorageUpdatedCache(), accState.account.storageRoot, isPruning = false @@ -199,6 +199,54 @@ iterator updatedStorageProofs*( raiseAssert(e.msg) # should never happen unless the database is corrupted proc getUpdatedBytecode*( - state: WorldStateRef, address: EthAddress + state: WorldStateRef, accountKey: AddressHash ): seq[byte] {.inline.} = - state.db.getBytecodeUpdatedCache().get(toAccountKey(address).data) + state.db.getBytecodeUpdatedCache().get(accountKey.data) + +# Slow: Used for testing only +proc verifyProofs*( + state: WorldStateRef, preStateRoot: KeccakHash, expectedStateRoot: KeccakHash +) = + try: + let trie = + initHexaryTrie(state.db.getAccountsBackend(), preStateRoot, isPruning = false) + + var memDb = newMemoryDB() + for k, v in trie.replicate(): + memDb.put(k, v) + + for accountKey, proof in state.updatedAccountProofs(): + doAssert isValidBranch( + proof, + expectedStateRoot, + @(accountKey.data), + rlpFromBytes(proof[^1]).listElem(1).toBytes(), # pull the value out of the proof + ) + for p in proof: + memDb.put(keccakHash(p).data, p) + + let memTrie = initHexaryTrie(memDb, expectedStateRoot, isPruning = false) + doAssert(memTrie.rootHash() == expectedStateRoot) + + for accountKey, proof in state.updatedAccountProofs(): + let + accountBytes = memTrie.get(accountKey.data) + account = rlp.decode(accountBytes, Account) + doAssert(accountBytes.len() > 0) + doAssert(accountBytes == rlpFromBytes(proof[^1]).listElem(1).toBytes()) + # pull the value out of the proof + + for slotHash, sProof in state.updatedStorageProofs(accountKey): + doAssert isValidBranch( + sProof, + account.storageRoot, + @(slotHash.data), + rlpFromBytes(sProof[^1]).listElem(1).toBytes(), + # pull the value out of the proof + ) + + let updatedCode = state.getUpdatedBytecode(accountKey) + if updatedCode.len() > 0: + doAssert(account.codeHash == keccakHash(updatedCode)) + except RlpError as e: + raiseAssert(e.msg) # Should never happen diff --git a/hive_integration/nodocker/consensus/consensus_sim.nim b/hive_integration/nodocker/consensus/consensus_sim.nim index 46d29b99f6..2d0b1e25e2 100644 --- a/hive_integration/nodocker/consensus/consensus_sim.nim +++ b/hive_integration/nodocker/consensus/consensus_sim.nim @@ -27,9 +27,8 @@ proc processChainData(cd: ChainData): TestStatus = cd.params ) - com.initializeEmptyDb() - let c = newForkedChain(com, com.genesisHeader) + for bytes in cd.blocksRlp: # ignore return value here # because good blocks maybe interleaved with diff --git a/hive_integration/nodocker/engine/engine_env.nim b/hive_integration/nodocker/engine/engine_env.nim index b8eed02769..fb8b58bef8 100644 --- a/hive_integration/nodocker/engine/engine_env.nim +++ b/hive_integration/nodocker/engine/engine_env.nim @@ -90,7 +90,6 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E com = makeCom(conf) chain = newChain(com) - com.initializeEmptyDb() let txPool = TxPoolRef.new(com) node.addEthHandlerCapability( diff --git a/hive_integration/nodocker/graphql/graphql_sim.nim b/hive_integration/nodocker/graphql/graphql_sim.nim index ba2c919d24..969221c2d9 100644 --- a/hive_integration/nodocker/graphql/graphql_sim.nim +++ b/hive_integration/nodocker/graphql/graphql_sim.nim @@ -83,7 +83,6 @@ proc main() = conf.networkParams ) - com.initializeEmptyDb() let txPool = TxPoolRef.new(com) discard importRlpBlock(blocksFile, com) let ctx = setupGraphqlContext(com, ethNode, txPool) diff --git a/hive_integration/nodocker/rpc/test_env.nim b/hive_integration/nodocker/rpc/test_env.nim index 631fcd876c..6e8ab3a933 100644 --- a/hive_integration/nodocker/rpc/test_env.nim +++ b/hive_integration/nodocker/rpc/test_env.nim @@ -81,7 +81,6 @@ proc setupEnv*(): TestEnv = ) manageAccounts(ethCtx, conf) - com.initializeEmptyDb() let chainRef = newChain(com) let txPool = TxPoolRef.new(com) diff --git a/nimbus/beacon/api_handler/api_forkchoice.nim b/nimbus/beacon/api_handler/api_forkchoice.nim index 9d023bc9e8..206069771c 100644 --- a/nimbus/beacon/api_handler/api_forkchoice.nim +++ b/nimbus/beacon/api_handler/api_forkchoice.nim @@ -224,7 +224,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, raise invalidAttr(error) let id = computePayloadId(blockHash, attrs) - ben.put(id, ben.blockValue, bundle.executionPayload, bundle.blobsBundle) + ben.put(id, bundle.blockValue, bundle.executionPayload, bundle.blobsBundle) info "Created payload for sealing", id = id.toHex, diff --git a/nimbus/beacon/api_handler/api_getbodies.nim b/nimbus/beacon/api_handler/api_getbodies.nim index d38495476c..407f88a763 100644 --- a/nimbus/beacon/api_handler/api_getbodies.nim +++ b/nimbus/beacon/api_handler/api_getbodies.nim @@ -23,7 +23,7 @@ const proc getPayloadBodyByHeader(db: CoreDbRef, header: common.BlockHeader, - output: var seq[Opt[ExecutionPayloadBodyV1]]) = + output: var seq[Opt[ExecutionPayloadBodyV1]]) {.gcsafe, raises:[].} = var body: common.BlockBody if not db.getBlockBody(header, body): diff --git a/nimbus/beacon/beacon_engine.nim b/nimbus/beacon/beacon_engine.nim index a3855b0c02..1549577dcf 100644 --- a/nimbus/beacon/beacon_engine.nim +++ b/nimbus/beacon/beacon_engine.nim @@ -165,11 +165,6 @@ func posFinalized*(ben: BeaconEngineRef): bool = ## PoSFinalized reports whether the chain has entered the PoS stage. ben.merge.posFinalized -func blockValue*(ben: BeaconEngineRef): UInt256 = - ## return sum of reward for feeRecipient for each - ## tx included in a block - ben.txPool.blockValue - proc get*(ben: BeaconEngineRef, hash: common.Hash256, header: var common.BlockHeader): bool = ben.queue.get(hash, header) @@ -208,6 +203,7 @@ proc get*(ben: BeaconEngineRef, id: PayloadID, type ExecutionPayloadAndBlobsBundle* = object executionPayload*: ExecutionPayload blobsBundle*: Opt[BlobsBundleV1] + blockValue*: UInt256 proc generatePayload*(ben: BeaconEngineRef, attrs: PayloadAttributes): @@ -252,7 +248,8 @@ proc generatePayload*(ben: BeaconEngineRef, ok ExecutionPayloadAndBlobsBundle( executionPayload: executionPayload(bundle.blk), - blobsBundle: blobsBundle) + blobsBundle: blobsBundle, + blockValue: bundle.blockValue) proc setInvalidAncestor*(ben: BeaconEngineRef, header: common.BlockHeader, blockHash: common.Hash256) = ben.invalidBlocksHits[blockHash] = 1 diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index ba7edf4428..a0452cd780 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -119,6 +119,50 @@ func daoCheck(conf: ChainConfig) = if conf.daoForkSupport and conf.daoForkBlock.isNone: conf.daoForkBlock = conf.homesteadBlock +proc initializeDb(com: CommonRef) = + let kvt = com.db.ctx.getKvt() + proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool = + kvt.hasKey(key).expect "valid bool" + if canonicalHeadHashKey().toOpenArray notin kvt: + info "Writing genesis to DB" + doAssert(com.genesisHeader.number == 0.BlockNumber, + "can't commit genesis block with number > 0") + doAssert(com.db.persistHeader(com.genesisHeader, + com.consensusType == ConsensusType.POS, + startOfHistory=com.genesisHeader.parentHash), + "can persist genesis header") + doAssert(canonicalHeadHashKey().toOpenArray in kvt) + + # The database must at least contain the base and head pointers - the base + # is implicitly considered finalized + let + baseNum = com.db.getSavedStateBlockNumber() + base = + try: + com.db.getBlockHeader(baseNum) + except BlockNotFound as exc: + fatal "Cannot load base block header", + baseNum, err = exc.msg + quit 1 + finalized = + try: + com.db.finalizedHeader() + except BlockNotFound as exc: + debug "No finalized block stored in database, reverting to base" + base + head = + try: + com.db.getCanonicalHead() + except EVMError as exc: + fatal "Cannot load canonical block header", + err = exc.msg + quit 1 + + info "Database initialized", + base = (base.blockHash, base.number), + finalized = (finalized.blockHash, finalized.number), + head = (head.blockHash, head.number) + proc init(com : CommonRef, db : CoreDbRef, networkId : NetworkId, @@ -174,6 +218,8 @@ proc init(com : CommonRef, # By default, history begins at genesis. com.startOfHistory = GENESIS_PARENT_HASH + com.initializeDb() + proc getTd(com: CommonRef, blockHash: Hash256): Opt[DifficultyInt] = var td: DifficultyInt if not com.db.getTd(blockHash, td): @@ -345,20 +391,6 @@ proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType = return com.config.consensusType -proc initializeEmptyDb*(com: CommonRef) = - let kvt = com.db.ctx.getKvt() - proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool = - kvt.hasKey(key).expect "valid bool" - if canonicalHeadHashKey().toOpenArray notin kvt: - info "Writing genesis to DB" - doAssert(com.genesisHeader.number == 0.BlockNumber, - "can't commit genesis block with number > 0") - doAssert(com.db.persistHeader(com.genesisHeader, - com.consensusType == ConsensusType.POS, - startOfHistory=com.genesisHeader.parentHash), - "can persist genesis header") - doAssert(canonicalHeadHashKey().toOpenArray in kvt) - proc syncReqNewHead*(com: CommonRef; header: BlockHeader) {.gcsafe, raises: [].} = ## Used by RPC to update the beacon head for snap sync diff --git a/nimbus/core/casper.nim b/nimbus/core/casper.nim index 2d87e6b7f7..794a89ff29 100644 --- a/nimbus/core/casper.nim +++ b/nimbus/core/casper.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -17,18 +17,6 @@ type withdrawals : seq[Withdrawal] ## EIP-4895 beaconRoot : Hash256 ## EIP-4788 -proc prepare*(ctx: CasperRef, header: var BlockHeader) = - header.coinbase = ctx.feeRecipient - header.timestamp = ctx.timestamp - header.prevRandao = ctx.prevRandao - header.difficulty = DifficultyInt.zero - -proc prepareForSeal*(ctx: CasperRef, header: var BlockHeader) {.gcsafe, raises:[].} = - header.nonce = default(BlockNonce) - header.extraData = @[] # TODO: probably this should be configurable by user? - # this repetition, assigning prevRandao is because how txpool works - header.prevRandao = ctx.prevRandao - # ------------------------------------------------------------------------------ # Getters # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool.nim b/nimbus/core/tx_pool.nim index e4683e6f1c..f99185bb61 100644 --- a/nimbus/core/tx_pool.nim +++ b/nimbus/core/tx_pool.nim @@ -288,38 +288,6 @@ ## The `flags` parameter holds a set of strategy symbols for how to process ## items and buckets. ## -## *stageItems1559MinFee* -## Stage tx items with `tx.maxFee` at least `minFeePrice`. Other items are -## left or set pending. This symbol affects post-London tx items, only. -## -## *stageItems1559MinTip* -## Stage tx items with `tx.effectiveGasTip(baseFee)` at least -## `minTipPrice`. Other items are considered underpriced and left or set -## pending. This symbol affects post-London tx items, only. -## -## *stageItemsPlMinPrice* -## Stage tx items with `tx.gasPrice` at least `minPreLondonGasPrice`. -## Other items are considered underpriced and left or set pending. This -## symbol affects pre-London tx items, only. -## -## *packItemsMaxGasLimit* -## It set, the *packer* will execute and collect additional items from -## the `staged` bucket while accumulating `gasUsed` as long as -## `maxGasLimit` is not exceeded. If `packItemsTryHarder` flag is also -## set, the *packer* will not stop until at least `hwmGasLimit` is -## reached. -## -## Otherwise the *packer* will accumulate up until `trgGasLimit` is -## not exceeded, and not stop until at least `lwmGasLimit` is reached -## in case `packItemsTryHarder` is also set, -## -## *packItemsTryHarder* -## It set, the *packer* will *not* stop accumulaing transactions up until -## the `lwmGasLimit` or `hwmGasLimit` is reached, depending on whether -## the `packItemsMaxGasLimit` is set. Otherwise, accumulating stops -## immediately before the next transaction exceeds `trgGasLimit`, or -## `maxGasLimit` depending on `packItemsMaxGasLimit`. -## ## *autoUpdateBucketsDB* ## Automatically update the state buckets after running batch jobs if the ## `dirtyBuckets` flag is also set. @@ -328,38 +296,11 @@ ## Automatically dispose *pending* or *staged* tx items that were added to ## the state buckets database at least `lifeTime` ago. ## -## *autoZombifyPacked* -## Automatically dispose *packed* tx itemss that were added to -## the state buckets database at least `lifeTime` ago. -## -## *..there might be more strategy symbols..* -## -## hwmTrgPercent -## This parameter implies the size of `hwmGasLimit` which is calculated -## as `max(trgGasLimit, maxGasLimit * lwmTrgPercent / 100)`. -## ## lifeTime ## Txs that stay longer in one of the buckets will be moved to a waste ## basket. From there they will be eventually deleted oldest first when ## the maximum size would be exceeded. ## -## lwmMaxPercent -## This parameter implies the size of `lwmGasLimit` which is calculated -## as `max(minGasLimit, trgGasLimit * lwmTrgPercent / 100)`. -## -## minFeePrice -## Applies no EIP-1559 txs only. Txs are packed if `maxFee` is at least -## that value. -## -## minTipPrice -## For EIP-1559, txs are packed if the expected tip (see `estimatedGasTip()`) -## is at least that value. In compatibility mode for legacy txs, this -## degenerates to `gasPrice - baseFee`. -## -## minPreLondonGasPrice -## For pre-London or legacy txs, this parameter has precedence over -## `minTipPrice`. Txs are packed if the `gasPrice` is at least that value. -## ## priceBump ## There can be only one transaction in the database for the same `sender` ## account and `nonce` value. When adding a transaction with the same @@ -369,69 +310,21 @@ ## ## Read-Only Parameters ## -------------------- -## -## baseFee -## This parameter is derived from the internally cached block chain state. -## The base fee parameter modifies/determines the expected gain when packing -## a new block (is set to *zero* for *pre-London* blocks.) -## -## dirtyBuckets -## If `true`, the state buckets database is ready for re-org if the -## `autoUpdateBucketsDB` flag is also set. -## -## gasLimit -## Taken or derived from the current block chain head, incoming txs that -## exceed this gas limit are stored into the *pending* bucket (maybe -## eligible for staging at the next cycle when the internally cached block -## chain state is updated.) -## ## head ## Cached block chain insertion point, not necessarily the same header as ## retrieved by the `getCanonicalHead()`. This insertion point can be ## adjusted with the `smartHead()` function. -## -## hwmGasLimit -## This parameter is at least `trgGasLimit` and does not exceed -## `maxGasLimit` and can be adjusted by means of setting `hwmMaxPercent`. It -## is used by the packer as a minimum block size if both flags -## `packItemsTryHarder` and `packItemsMaxGasLimit` are set. -## -## lwmGasLimit -## This parameter is at least `minGasLimit` and does not exceed -## `trgGasLimit` and can be adjusted by means of setting `lwmTrgPercent`. It -## is used by the packer as a minimum block size if the flag -## `packItemsTryHarder` is set and `packItemsMaxGasLimit` is unset. -## -## maxGasLimit -## This parameter is at least `hwmGasLimit`. It is calculated considering -## the current state of the block chain as represented by the internally -## cached head. This parameter is used by the *packer* as a size limit if -## `packItemsMaxGasLimit` is set. -## -## minGasLimit -## This parameter is calculated considering the current state of the block -## chain as represented by the internally cached head. It can be used for -## verifying that a generated block does not underflow minimum size. -## Underflow can only be happen if there are not enough transaction available -## in the pool. -## -## trgGasLimit -## This parameter is at least `lwmGasLimit` and does not exceed -## `maxGasLimit`. It is calculated considering the current state of the block -## chain as represented by the internally cached head. This parameter is -## used by the *packer* as a size limit if `packItemsMaxGasLimit` is unset. -## + import std/[sequtils, tables], - ./tx_pool/[tx_chain, tx_desc, tx_info, tx_item], + ./tx_pool/[tx_packer, tx_desc, tx_info, tx_item], ./tx_pool/tx_tabs, ./tx_pool/tx_tasks/[ tx_add, tx_bucket, tx_head, - tx_dispose, - tx_packer], + tx_dispose], chronicles, eth/keys, stew/keyed_queue, @@ -444,21 +337,18 @@ export TxItemStatus, TxPoolFlags, TxPoolRef, - TxTabsGasTotals, TxTabsItemsCount, results, tx_desc.startDate, tx_info, - tx_item.GasPrice, - tx_item.`<=`, - tx_item.`<`, tx_item.effectiveGasTip, tx_item.info, tx_item.itemID, tx_item.sender, tx_item.status, tx_item.timeStamp, - tx_item.tx + tx_item.tx, + tx_desc.head {.push raises: [].} @@ -474,8 +364,7 @@ proc maintenanceProcessing(xp: TxPoolRef) ## Tasks to be done after add/del txs processing # Purge expired items - if autoZombifyUnpacked in xp.pFlags or - autoZombifyPacked in xp.pFlags: + if autoZombifyUnpacked in xp.pFlags: # Move transactions older than `xp.lifeTime` to the waste basket. xp.disposeExpiredItems @@ -492,9 +381,9 @@ proc setHead(xp: TxPoolRef; val: BlockHeader) {.gcsafe,raises: [CatchableError].} = ## Update cached block chain insertion point. This will also update the ## internally cached `baseFee` (depends on the block chain state.) - if xp.chain.head != val: - xp.chain.head = val # calculates the new baseFee - xp.txDB.baseFee = xp.chain.baseFee + if xp.head != val: + xp.head = val # calculates the new baseFee + xp.txDB.baseFee = xp.baseFee xp.pDirtyBuckets = true xp.bucketFlushPacked @@ -535,25 +424,18 @@ proc add*(xp: TxPoolRef; tx: PooledTransaction; info = "") ## Variant of `add()` for a single transaction. xp.add(@[tx], info) -proc smartHead*(xp: TxPoolRef; pos: BlockHeader; blindMode = false): bool +proc smartHead*(xp: TxPoolRef; pos: BlockHeader): bool {.gcsafe,raises: [CatchableError].} = ## This function moves the internal head cache (i.e. tx insertion point, ## vmState) and ponts it to a now block on the chain. ## - ## In standard mode when argument `blindMode` is `false`, it calculates the + ## it calculates the ## txs that need to be added or deleted after moving the insertion point ## head so that the tx-pool will not fail to re-insert quered txs that are ## on the chain, already. Neither will it loose any txs. After updating the ## the internal head cache, the previously calculated actions will be ## applied. ## - ## If the argument `blindMode` is passed `true`, the insertion head is - ## simply set ignoring all changes. This mode makes sense only in very - ## particular circumstances. - if blindMode: - xp.setHead(pos) - return true - let rcDiff = xp.headDiff(pos) if rcDiff.isOk: let changes = rcDiff.value @@ -579,34 +461,18 @@ proc smartHead*(xp: TxPoolRef; pos: BlockHeader; blindMode = false): bool xp.maintenanceProcessing return true -proc triggerReorg*(xp: TxPoolRef) - {.gcsafe,raises: [CatchableError].} = - ## This function triggers a tentative bucket re-org action by setting the - ## `dirtyBuckets` parameter. This re-org action eventually happens only if - ## the `autoUpdateBucketsDB` flag is also set. - xp.pDirtyBuckets = true - xp.maintenanceProcessing - # ------------------------------------------------------------------------------ # Public functions, getters # ------------------------------------------------------------------------------ func com*(xp: TxPoolRef): CommonRef = ## Getter - xp.chain.com - -func baseFee*(xp: TxPoolRef): GasInt = - ## Getter, this parameter modifies/determines the expected gain when packing - xp.chain.baseFee - -func dirtyBuckets*(xp: TxPoolRef): bool = - ## Getter, bucket database is ready for re-org if the `autoUpdateBucketsDB` - ## flag is also set. - xp.pDirtyBuckets + xp.vmState.com type EthBlockAndBlobsBundle* = object blk*: EthBlock blobsBundle*: Opt[BlobsBundle] + blockValue*: UInt256 proc assembleBlock*( xp: TxPoolRef, @@ -624,11 +490,11 @@ proc assembleBlock*( ## Note that this getter runs *ad hoc* all the txs through the VM in ## order to build the block. - xp.packerVmExec().isOkOr: # updates vmState + let pst = xp.packerVmExec().valueOr: # updates vmState return err(error) var blk = EthBlock( - header: xp.chain.getHeader # uses updated vmState + header: pst.assembleHeader # uses updated vmState ) var blobsBundle: BlobsBundle @@ -644,7 +510,7 @@ proc assembleBlock*( for blob in tx.networkPayload.blobs: blobsBundle.blobs.add blob - let com = xp.chain.com + let com = xp.vmState.com if com.isShanghaiOrLater(blk.header.timestamp): blk.withdrawals = Opt.some(com.pos.withdrawals) @@ -664,40 +530,8 @@ proc assembleBlock*( ok EthBlockAndBlobsBundle( blk: blk, - blobsBundle: blobsBundleOpt) - -func gasTotals*(xp: TxPoolRef): TxTabsGasTotals = - ## Getter, retrieves the current gas limit totals per bucket. - xp.txDB.gasTotals - -func flags*(xp: TxPoolRef): set[TxPoolFlags] = - ## Getter, retrieves strategy symbols for how to process items and buckets. - xp.pFlags - -func head*(xp: TxPoolRef): BlockHeader = - ## Getter, cached block chain insertion point. Typocally, this should be the - ## the same header as retrieved by the `getCanonicalHead()` (unless in the - ## middle of a mining update.) - xp.chain.head - -# core/tx_pool.go(435): func (pool *TxPool) GasPrice() *big.Int { -func minFeePrice*(xp: TxPoolRef): GasPrice = - ## Getter, retrieves minimum for the current gas fee enforced by the - ## transaction pool for txs to be packed. This is an EIP-1559 only - ## parameter (see `stage1559MinFee` strategy.) - xp.pMinFeePrice - -func minPreLondonGasPrice*(xp: TxPoolRef): GasPrice = - ## Getter. retrieves, the current gas price enforced by the transaction - ## pool. This is a pre-London parameter (see `packedPlMinPrice` strategy.) - xp.pMinPlGasPrice - -func minTipPrice*(xp: TxPoolRef): GasPrice = - ## Getter, retrieves minimum for the current gas tip (or priority fee) - ## enforced by the transaction pool. This is an EIP-1559 parameter but it - ## comes with a fall back interpretation (see `stage1559MinTip` strategy.) - ## for legacy transactions. - xp.pMinTipPrice + blobsBundle: blobsBundleOpt, + blockValue: pst.blockValue) # core/tx_pool.go(474): func (pool SetGasPrice,*TxPool) Stats() (int, int) { # core/tx_pool.go(1728): func (t *txLookup) Count() int { @@ -708,52 +542,6 @@ func nItems*(xp: TxPoolRef): TxTabsItemsCount = ## some totals. xp.txDB.nItems -# ------------------------------------------------------------------------------ -# Public functions, setters -# ------------------------------------------------------------------------------ - -func `baseFee=`*(xp: TxPoolRef; val: GasInt) {.raises: [KeyError].} = - ## Setter, sets `baseFee` explicitely witout triggering a packer update. - ## Stil a database update might take place when updating account ranks. - ## - ## Typically, this function would *not* be called but rather the `smartHead()` - ## update would be employed to do the job figuring out the proper value - ## for the `baseFee`. - xp.txDB.baseFee = val - -func `flags=`*(xp: TxPoolRef; val: set[TxPoolFlags]) = - ## Setter, strategy symbols for how to process items and buckets. - xp.pFlags = val - -func `maxRejects=`*(xp: TxPoolRef; val: int) = - ## Setter, the size of the waste basket. This setting becomes effective with - ## the next move of an item into the waste basket. - xp.txDB.maxRejects = val - -# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) { -func `minFeePrice=`*(xp: TxPoolRef; val: GasPrice) = - ## Setter for `minFeePrice`. If there was a value change, this function - ## implies `triggerReorg()`. - if xp.pMinFeePrice != val: - xp.pMinFeePrice = val - xp.pDirtyBuckets = true - -# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) { -func `minPreLondonGasPrice=`*(xp: TxPoolRef; val: GasPrice) = - ## Setter for `minPlGasPrice`. If there was a value change, this function - ## implies `triggerReorg()`. - if xp.pMinPlGasPrice != val: - xp.pMinPlGasPrice = val - xp.pDirtyBuckets = true - -# core/tx_pool.go(444): func (pool *TxPool) SetGasPrice(price *big.Int) { -func `minTipPrice=`*(xp: TxPoolRef; val: GasPrice) = - ## Setter for `minTipPrice`. If there was a value change, this function - ## implies `triggerReorg()`. - if xp.pMinTipPrice != val: - xp.pMinTipPrice = val - xp.pDirtyBuckets = true - # ------------------------------------------------------------------------------ # Public functions, per-tx-item operations # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_chain.nim b/nimbus/core/tx_pool/tx_chain.nim deleted file mode 100644 index 1f91b8c038..0000000000 --- a/nimbus/core/tx_pool/tx_chain.nim +++ /dev/null @@ -1,267 +0,0 @@ -# Nimbus -# Copyright (c) 2022-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed except -# according to those terms. - -{.push raises: [].} - -## Transaction Pool Block Chain Packer Environment -## =============================================== -## - -import - results, - ../../common/common, - ../../constants, - ../../db/ledger, - ../../utils/utils, - ../../evm/state, - ../../evm/types, - ../pow/header, - ../eip4844, - ../casper, - eth/eip1559 - -type - TxChainRef* = ref object ##\ - ## State cache of the transaction environment for creating a new\ - ## block. This state is typically synchrionised with the canonical\ - ## block chain head when updated. - com: CommonRef ## Block chain config - roAcc: ReadOnlyStateDB ## Accounts cache fixed on current sync header - prepHeader: BlockHeader ## Prepared Header from Consensus Engine - - vmState: BaseVMState ## current tx/packer environment - receiptsRoot: Hash256 - logsBloom: BloomFilter - txRoot: Hash256 ## `rootHash` after packing - stateRoot: Hash256 ## `stateRoot` after packing - -# ------------------------------------------------------------------------------ -# Private functions -# ------------------------------------------------------------------------------ -proc baseFeeGet(com: CommonRef; parent: BlockHeader): Opt[UInt256] = - ## Calculates the `baseFee` of the head assuming this is the parent of a - ## new block header to generate. - - # Note that the baseFee is calculated for the next header - if not com.isLondonOrLater(parent.number+1): - return Opt.none(UInt256) - - # If the new block is the first EIP-1559 block, return initial base fee. - if not com.isLondonOrLater(parent.number): - return Opt.some(EIP1559_INITIAL_BASE_FEE) - - Opt.some calcEip1599BaseFee( - parent.gasLimit, - parent.gasUsed, - parent.baseFeePerGas.get(0.u256)) - -proc gasLimitsGet(com: CommonRef; parent: BlockHeader): GasInt = - if com.isLondonOrLater(parent.number+1): - var parentGasLimit = parent.gasLimit - if not com.isLondonOrLater(parent.number): - # Bump by 2x - parentGasLimit = parent.gasLimit * EIP1559_ELASTICITY_MULTIPLIER - calcGasLimit1559(parentGasLimit, desiredLimit = DEFAULT_GAS_LIMIT) - else: - computeGasLimit( - parent.gasUsed, - parent.gasLimit, - gasFloor = DEFAULT_GAS_LIMIT, - gasCeil = DEFAULT_GAS_LIMIT) - -func prepareHeader(dh: TxChainRef) = - dh.com.pos.prepare(dh.prepHeader) - -func prepareForSeal(dh: TxChainRef; header: var BlockHeader) = - dh.com.pos.prepareForSeal(header) - -func getTimestamp(dh: TxChainRef): EthTime = - dh.com.pos.timestamp - -func feeRecipient*(dh: TxChainRef): EthAddress = - ## Getter - dh.com.pos.feeRecipient - -proc resetTxEnv(dh: TxChainRef; parent: BlockHeader) = - # do hardfork transition before - # BaseVMState querying any hardfork/consensus from CommonRef - - let timestamp = dh.getTimestamp() - dh.com.hardForkTransition( - parent.blockHash, parent.number+1, Opt.some(timestamp)) - dh.prepareHeader() - - # we don't consider PoS difficulty here - # because that is handled in vmState - let blockCtx = BlockContext( - timestamp : dh.prepHeader.timestamp, - gasLimit : gasLimitsGet(dh.com, parent), - baseFeePerGas: baseFeeGet(dh.com, parent), - prevRandao : dh.prepHeader.prevRandao, - difficulty : dh.prepHeader.difficulty, - coinbase : dh.feeRecipient, - excessBlobGas: calcExcessBlobGas(parent), - ) - - dh.vmState = BaseVMState.new( - parent = parent, - blockCtx = blockCtx, - com = dh.com) - - dh.txRoot = EMPTY_ROOT_HASH - dh.stateRoot = dh.vmState.parent.stateRoot - -proc update(dh: TxChainRef; parent: BlockHeader) - {.gcsafe,raises: [].} = - - let - db = dh.com.db - acc = LedgerRef.init(db, parent.stateRoot) - - # Keep a separate accounts descriptor positioned at the sync point - dh.roAcc = ReadOnlyStateDB(acc) - dh.resetTxEnv(parent) - -# ------------------------------------------------------------------------------ -# Public functions, constructor -# ------------------------------------------------------------------------------ - -proc new*(T: type TxChainRef; com: CommonRef): T - {.gcsafe, raises: [EVMError].} = - ## Constructor - new result - - result.com = com - result.update(com.db.getCanonicalHead) - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -proc getBalance*(dh: TxChainRef; account: EthAddress): UInt256 = - ## Wrapper around `vmState.readOnlyStateDB.getBalance()` for a `vmState` - ## descriptor positioned at the `dh.head`. This might differ from the - ## `dh.vmState.readOnlyStateDB.getBalance()` which returnes the current - ## balance relative to what has been accumulated by the current packing - ## procedure. - dh.roAcc.getBalance(account) - -proc getNonce*(dh: TxChainRef; account: EthAddress): AccountNonce = - ## Wrapper around `vmState.readOnlyStateDB.getNonce()` for a `vmState` - ## descriptor positioned at the `dh.head`. This might differ from the - ## `dh.vmState.readOnlyStateDB.getNonce()` which returnes the current balance - ## relative to what has been accumulated by the current packing procedure. - dh.roAcc.getNonce(account) - -func baseFee*(dh: TxChainRef): GasInt = - ## Getter, baseFee for the next bock header. This value is auto-generated - ## when a new insertion point is set via `head=`. - if dh.vmState.blockCtx.baseFeePerGas.isSome: - dh.vmState.blockCtx.baseFeePerGas.get.truncate(GasInt) - else: - 0.GasInt - -func excessBlobGas*(dh: TxChainRef): uint64 = - ## Getter, baseFee for the next bock header. This value is auto-generated - ## when a new insertion point is set via `head=`. - dh.vmState.blockCtx.excessBlobGas - -func blobGasUsed*(dh: TxChainRef): uint64 = - dh.vmState.blobGasUsed - -func gasLimit*(dh: TxChainRef): GasInt = - dh.vmState.blockCtx.gasLimit - -proc getHeader*(dh: TxChainRef): BlockHeader - {.gcsafe,raises: [].} = - ## Generate a new header, a child of the cached `head` - result = BlockHeader( - parentHash: dh.vmState.parent.blockHash, - ommersHash: EMPTY_UNCLE_HASH, - coinbase: dh.prepHeader.coinbase, - stateRoot: dh.stateRoot, - txRoot: dh.txRoot, - receiptsRoot: dh.receiptsRoot, - logsBloom: dh.logsBloom, - difficulty: dh.prepHeader.difficulty, - number: dh.vmState.blockNumber, - gasLimit: dh.gasLimit, - gasUsed: dh.vmState.cumulativeGasUsed, - timestamp: dh.prepHeader.timestamp, - # extraData: Blob # signing data - # mixHash: Hash256 # mining hash for given difficulty - # nonce: BlockNonce # mining free vaiable - baseFeePerGas: dh.vmState.blockCtx.baseFeePerGas, - ) - - if dh.com.isShanghaiOrLater(result.timestamp): - result.withdrawalsRoot = Opt.some(calcWithdrawalsRoot(dh.com.pos.withdrawals)) - - if dh.com.isCancunOrLater(result.timestamp): - result.parentBeaconBlockRoot = Opt.some(dh.com.pos.parentBeaconBlockRoot) - result.blobGasUsed = Opt.some dh.blobGasUsed - result.excessBlobGas = Opt.some dh.excessBlobGas - - dh.prepareForSeal(result) - -proc clearAccounts*(dh: TxChainRef) - {.gcsafe,raises: [].} = - ## Reset transaction environment, e.g. before packing a new block - dh.resetTxEnv(dh.vmState.parent) - -# ------------------------------------------------------------------------------ -# Public functions, getters -# ------------------------------------------------------------------------------ - -func com*(dh: TxChainRef): CommonRef = - ## Getter - dh.com - -func head*(dh: TxChainRef): BlockHeader = - ## Getter - dh.vmState.parent - -func nextFork*(dh: TxChainRef): EVMFork = - ## Getter, fork of next block - dh.vmState.fork - -func vmState*(dh: TxChainRef): BaseVMState = - ## Getter, `BaseVmState` descriptor based on the current insertion point. - dh.vmState - -# ------------------------------------------------------------------------------ -# Public functions, setters -# ------------------------------------------------------------------------------ - -proc `head=`*(dh: TxChainRef; val: BlockHeader) - {.gcsafe,raises: [].} = - ## Setter, updates descriptor. This setter re-positions the `vmState` and - ## account caches to a new insertion point on the block chain database. - dh.update(val) - -func `receiptsRoot=`*(dh: TxChainRef; val: Hash256) = - ## Setter, implies `gasUsed` - dh.receiptsRoot = val - -func `logsBloom=`*(dh: TxChainRef; val: BloomFilter) = - ## Setter, implies `gasUsed` - dh.logsBloom = val - -func `stateRoot=`*(dh: TxChainRef; val: Hash256) = - ## Setter - dh.stateRoot = val - -func `txRoot=`*(dh: TxChainRef; val: Hash256) = - ## Setter - dh.txRoot = val - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_desc.nim b/nimbus/core/tx_pool/tx_desc.nim index d83bb651d9..62f18b2dc7 100644 --- a/nimbus/core/tx_pool/tx_desc.nim +++ b/nimbus/core/tx_pool/tx_desc.nim @@ -14,60 +14,25 @@ import std/[times], + eth/eip1559, ../../common/common, - ./tx_chain, - ./tx_info, + ../../evm/state, + ../../evm/types, + ../../db/ledger, + ../../constants, + ../pow/header, + ../eip4844, + ../casper, ./tx_item, ./tx_tabs, - ./tx_tabs/tx_sender, # for verify() - eth/keys + ./tx_tabs/tx_sender {.push raises: [].} type - TxPoolCallBackRecursion* = object of Defect - ## Attempt to recurse a call back function - TxPoolFlags* = enum ##\ ## Processing strategy selector symbols - stageItems1559MinFee ##\ - ## Stage tx items with `tx.maxFee` at least `minFeePrice`. Other items - ## are left or set pending. This symbol affects post-London tx items, - ## only. - - stageItems1559MinTip ##\ - ## Stage tx items with `tx.effectiveGasTip(baseFee)` at least - ## `minTipPrice`. Other items are considered underpriced and left - ## or set pending. This symbol affects post-London tx items, only. - - stageItemsPlMinPrice ##\ - ## Stage tx items with `tx.gasPrice` at least `minPreLondonGasPrice`. - ## Other items are considered underpriced and left or set pending. - ## This symbol affects pre-London tx items, only. - - # ----------- - - packItemsMaxGasLimit ##\ - ## It set, the *packer* will execute and collect additional items from - ## the `staged` bucket while accumulating `gasUsed` as long as - ## `maxGasLimit` is not exceeded. If `packItemsTryHarder` flag is also - ## set, the *packer* will not stop until at least `hwmGasLimit` is - ## reached. - ## - ## Otherwise the *packer* will accumulate up until `trgGasLimit` is - ## not exceeded, and not stop until at least `lwmGasLimit` is reached - ## in case `packItemsTryHarder` is also set, - - packItemsTryHarder ##\ - ## It set, the *packer* will *not* stop accumulaing transactions up until - ## the `lwmGasLimit` or `hwmGasLimit` is reached, depending on whether - ## the `packItemsMaxGasLimit` is set. Otherwise, accumulating stops - ## immediately before the next transaction exceeds `trgGasLimit`, or - ## `maxGasLimit` depending on `packItemsMaxGasLimit`. - - # ----------- - autoUpdateBucketsDB ##\ ## Automatically update the state buckets after running batch jobs if ## the `dirtyBuckets` flag is also set. @@ -76,14 +41,7 @@ type ## Automatically dispose *pending* or *staged* txs that were queued ## at least `lifeTime` ago. - autoZombifyPacked ##\ - ## Automatically dispose *packed* txs that were queued - ## at least `lifeTime` ago. - TxPoolParam* = tuple ## Getter/setter accessible parameters - minFeePrice: GasPrice ## Gas price enforced by the pool, `gasFeeCap` - minTipPrice: GasPrice ## Desired tip-per-tx target, `effectiveGasTip` - minPlGasPrice: GasPrice ## Desired pre-London min `gasPrice` dirtyBuckets: bool ## Buckets need to be updated doubleCheck: seq[TxItemRef] ## Check items after moving block chain head flags: set[TxPoolFlags] ## Processing strategy symbols @@ -91,15 +49,13 @@ type TxPoolRef* = ref object of RootObj ##\ ## Transaction pool descriptor startDate: Time ## Start date (read-only) + param: TxPoolParam ## Getter/Setter parameters - chain: TxChainRef ## block chain state + vmState: BaseVMState txDB: TxTabsRef ## Transaction lists & tables lifeTime*: times.Duration ## Maximum life time of a tx in the system priceBump*: uint ## Min precentage price when superseding - blockValue*: UInt256 ## Sum of reward received by feeRecipient - - param: TxPoolParam ## Getter/Setter parameters const txItemLifeTime = ##\ @@ -114,15 +70,70 @@ const ## core/tx_pool.go(177) of the geth implementation. 10u - txMinFeePrice = 1.GasPrice - txMinTipPrice = 1.GasPrice - txPoolFlags = {stageItems1559MinTip, - stageItems1559MinFee, - stageItemsPlMinPrice, - packItemsTryHarder, - autoUpdateBucketsDB, + txPoolFlags = {autoUpdateBucketsDB, autoZombifyUnpacked} +# ------------------------------------------------------------------------------ +# Private functions +# ------------------------------------------------------------------------------ + +proc baseFeeGet(com: CommonRef; parent: BlockHeader): Opt[UInt256] = + ## Calculates the `baseFee` of the head assuming this is the parent of a + ## new block header to generate. + + # Note that the baseFee is calculated for the next header + if not com.isLondonOrLater(parent.number+1): + return Opt.none(UInt256) + + # If the new block is the first EIP-1559 block, return initial base fee. + if not com.isLondonOrLater(parent.number): + return Opt.some(EIP1559_INITIAL_BASE_FEE) + + Opt.some calcEip1599BaseFee( + parent.gasLimit, + parent.gasUsed, + parent.baseFeePerGas.get(0.u256)) + +proc gasLimitsGet(com: CommonRef; parent: BlockHeader): GasInt = + if com.isLondonOrLater(parent.number+1): + var parentGasLimit = parent.gasLimit + if not com.isLondonOrLater(parent.number): + # Bump by 2x + parentGasLimit = parent.gasLimit * EIP1559_ELASTICITY_MULTIPLIER + calcGasLimit1559(parentGasLimit, desiredLimit = DEFAULT_GAS_LIMIT) + else: + computeGasLimit( + parent.gasUsed, + parent.gasLimit, + gasFloor = DEFAULT_GAS_LIMIT, + gasCeil = DEFAULT_GAS_LIMIT) + +proc setupVMState(com: CommonRef; parent: BlockHeader): BaseVMState = + # do hardfork transition before + # BaseVMState querying any hardfork/consensus from CommonRef + + let pos = com.pos + com.hardForkTransition( + parent.blockHash, parent.number+1, Opt.some(pos.timestamp)) + + let blockCtx = BlockContext( + timestamp : pos.timestamp, + gasLimit : gasLimitsGet(com, parent), + baseFeePerGas: baseFeeGet(com, parent), + prevRandao : pos.prevRandao, + difficulty : UInt256.zero(), + coinbase : pos.feeRecipient, + excessBlobGas: calcExcessBlobGas(parent), + ) + + BaseVMState.new( + parent = parent, + blockCtx = blockCtx, + com = com) + +proc update(xp: TxPoolRef; parent: BlockHeader) = + xp.vmState = setupVMState(xp.vmState.com, parent) + # ------------------------------------------------------------------------------ # Public functions, constructor # ------------------------------------------------------------------------------ @@ -132,24 +143,26 @@ proc init*(xp: TxPoolRef; com: CommonRef) ## Constructor, returns new tx-pool descriptor. xp.startDate = getTime().utc.toTime - xp.chain = TxChainRef.new(com) + xp.vmState = setupVMState(com, com.db.getCanonicalHead) xp.txDB = TxTabsRef.new xp.lifeTime = txItemLifeTime xp.priceBump = txPriceBump xp.param.reset - xp.param.minFeePrice = txMinFeePrice - xp.param.minTipPrice = txMinTipPrice xp.param.flags = txPoolFlags # ------------------------------------------------------------------------------ -# Public functions, getters +# Public functions # ------------------------------------------------------------------------------ -func chain*(xp: TxPoolRef): TxChainRef = - ## Getter, block chain DB - xp.chain +proc clearAccounts*(xp: TxPoolRef) = + ## Reset transaction environment, e.g. before packing a new block + xp.update(xp.vmState.parent) + +# ------------------------------------------------------------------------------ +# Public functions, getters +# ------------------------------------------------------------------------------ func pFlags*(xp: TxPoolRef): set[TxPoolFlags] = ## Returns the set of algorithm strategy symbols for labelling items @@ -164,18 +177,6 @@ func pDoubleCheck*(xp: TxPoolRef): seq[TxItemRef] = ## Getter, cached block chain head was moved back xp.param.doubleCheck -func pMinFeePrice*(xp: TxPoolRef): GasPrice = - ## Getter - xp.param.minFeePrice - -func pMinTipPrice*(xp: TxPoolRef): GasPrice = - ## Getter - xp.param.minTipPrice - -func pMinPlGasPrice*(xp: TxPoolRef): GasPrice = - ## Getter - xp.param.minPlGasPrice - func startDate*(xp: TxPoolRef): Time = ## Getter xp.startDate @@ -184,6 +185,47 @@ func txDB*(xp: TxPoolRef): TxTabsRef = ## Getter, pool database xp.txDB +func baseFee*(xp: TxPoolRef): GasInt = + ## Getter, baseFee for the next bock header. This value is auto-generated + ## when a new insertion point is set via `head=`. + if xp.vmState.blockCtx.baseFeePerGas.isSome: + xp.vmState.blockCtx.baseFeePerGas.get.truncate(GasInt) + else: + 0.GasInt + +func vmState*(xp: TxPoolRef): BaseVMState = + xp.vmState + +func nextFork*(xp: TxPoolRef): EVMFork = + xp.vmState.fork + +func gasLimit*(xp: TxPoolRef): GasInt = + xp.vmState.blockCtx.gasLimit + +func excessBlobGas*(xp: TxPoolRef): GasInt = + xp.vmState.blockCtx.excessBlobGas + +proc getBalance*(xp: TxPoolRef; account: EthAddress): UInt256 = + ## Wrapper around `vmState.readOnlyStateDB.getBalance()` for a `vmState` + ## descriptor positioned at the `dh.head`. This might differ from the + ## `dh.vmState.readOnlyStateDB.getBalance()` which returnes the current + ## balance relative to what has been accumulated by the current packing + ## procedure. + xp.vmState.stateDB.getBalance(account) + +proc getNonce*(xp: TxPoolRef; account: EthAddress): AccountNonce = + ## Wrapper around `vmState.readOnlyStateDB.getNonce()` for a `vmState` + ## descriptor positioned at the `dh.head`. This might differ from the + ## `dh.vmState.readOnlyStateDB.getNonce()` which returnes the current balance + ## relative to what has been accumulated by the current packing procedure. + xp.vmState.stateDB.getNonce(account) + +func head*(xp: TxPoolRef): BlockHeader = + ## Getter, cached block chain insertion point. Typocally, this should be the + ## the same header as retrieved by the `getCanonicalHead()` (unless in the + ## middle of a mining update.) + xp.vmState.parent + # ------------------------------------------------------------------------------ # Public functions, setters # ------------------------------------------------------------------------------ @@ -204,64 +246,11 @@ func `pFlags=`*(xp: TxPoolRef; val: set[TxPoolFlags]) = ## Install a set of algorithm strategy symbols for labelling items as`packed` xp.param.flags = val -func `pMinFeePrice=`*(xp: TxPoolRef; val: GasPrice) = - ## Setter - xp.param.minFeePrice = val - -func `pMinTipPrice=`*(xp: TxPoolRef; val: GasPrice) = - ## Setter - xp.param.minTipPrice = val - -func `pMinPlGasPrice=`*(xp: TxPoolRef; val: GasPrice) = - ## Setter - xp.param.minPlGasPrice = val - -# ------------------------------------------------------------------------------ -# Public functions, heplers (debugging only) -# ------------------------------------------------------------------------------ - -proc verify*(xp: TxPoolRef): Result[void,TxInfo] - {.gcsafe, raises: [CatchableError].} = - ## Verify descriptor and subsequent data structures. - - block: - let rc = xp.txDB.verify - if rc.isErr: - return rc - - # verify consecutive nonces per sender - var - initOk = false - lastSender: EthAddress - lastNonce: AccountNonce - lastSublist: TxSenderSchedRef - - for (_,nonceList) in xp.txDB.incAccount: - for item in nonceList.incNonce: - if not initOk or lastSender != item.sender: - initOk = true - lastSender = item.sender - lastNonce = item.tx.nonce - lastSublist = xp.txDB.bySender.eq(item.sender).value.data - elif lastNonce + 1 == item.tx.nonce: - lastNonce = item.tx.nonce - else: - return err(txInfoVfyNonceChain) - - # verify bucket boundary conditions - case item.status: - of txItemPending: - discard - of txItemStaged: - if lastSublist.eq(txItemPending).eq(item.tx.nonce - 1).isOk: - return err(txInfoVfyNonceChain) - of txItemPacked: - if lastSublist.eq(txItemPending).eq(item.tx.nonce - 1).isOk: - return err(txInfoVfyNonceChain) - if lastSublist.eq(txItemStaged).eq(item.tx.nonce - 1).isOk: - return err(txInfoVfyNonceChain) - - ok() +proc `head=`*(xp: TxPoolRef; val: BlockHeader) + {.gcsafe,raises: [].} = + ## Setter, updates descriptor. This setter re-positions the `vmState` and + ## account caches to a new insertion point on the block chain database. + xp.update(val) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/core/tx_pool/tx_item.nim b/nimbus/core/tx_pool/tx_item.nim index 0344905342..9d6fda49da 100644 --- a/nimbus/core/tx_pool/tx_item.nim +++ b/nimbus/core/tx_pool/tx_item.nim @@ -16,6 +16,7 @@ import std/[hashes, times], ../../utils/ec_recover, ../../utils/utils, + ../../transaction, ./tx_info, eth/[common, keys], results @@ -23,16 +24,6 @@ import {.push raises: [].} type - GasPrice* = ##| - ## Handy definition distinct from `GasInt` which is a commodity unit while - ## the `GasPrice` is the commodity valuation per unit of gas, similar to a - ## kind of currency. - distinct uint64 - - GasPriceEx* = ##\ - ## Similar to `GasPrice` but is allowed to be negative. - distinct int64 - TxItemStatus* = enum ##\ ## Current status of a transaction as seen by the pool. txItemPending = 0 @@ -57,50 +48,6 @@ type proc utcTime: Time = getTime().utc.toTime -# ------------------------------------------------------------------------------ -# Public helpers supporting distinct types -# ------------------------------------------------------------------------------ - -proc `$`*(a: GasPrice): string {.borrow.} -proc `<`*(a, b: GasPrice): bool {.borrow.} -proc `<=`*(a, b: GasPrice): bool {.borrow.} -proc `==`*(a, b: GasPrice): bool {.borrow.} -proc `+`*(a, b: GasPrice): GasPrice {.borrow.} -proc `-`*(a, b: GasPrice): GasPrice {.borrow.} - -proc `$`*(a: GasPriceEx): string {.borrow.} -proc `<`*(a, b: GasPriceEx): bool {.borrow.} -proc `<=`*(a, b: GasPriceEx): bool {.borrow.} -proc `==`*(a, b: GasPriceEx): bool {.borrow.} -proc `+`*(a, b: GasPriceEx): GasPriceEx {.borrow.} -proc `-`*(a, b: GasPriceEx): GasPriceEx {.borrow.} -proc `+=`*(a: var GasPriceEx; b: GasPriceEx) {.borrow.} -proc `-=`*(a: var GasPriceEx; b: GasPriceEx) {.borrow.} - -# Multiplication/division of *price* and *commodity unit* - -proc `*`*(a: GasPrice; b: SomeUnsignedInt): GasPrice {.borrow.} -proc `*`*(a: SomeUnsignedInt; b: GasPrice): GasPrice {.borrow.} - -proc `div`*(a: GasPrice; b: SomeUnsignedInt): GasPrice = - (a.uint64 div b).GasPrice # beware of zero denominator - -proc `*`*(a: SomeInteger; b: GasPriceEx): GasPriceEx = - (a * b.int64).GasPriceEx # beware of under/overflow - -# Mixed stuff, convenience ops - -proc `-`*(a: GasPrice; b: SomeUnsignedInt): GasPrice {.borrow.} - -proc `<`*(a: GasPriceEx; b: SomeSignedInt): bool = - a.int64 < b - -proc `<`*(a: GasPriceEx|SomeSignedInt; b: GasPrice): bool = - if a.int64 < 0: true else: a.GasPrice < b - -proc `<=`*(a: SomeSignedInt; b: GasPriceEx): bool = - a < b.int64 - # ------------------------------------------------------------------------------ # Public functions, Constructor # ------------------------------------------------------------------------------ @@ -159,20 +106,8 @@ proc cost*(tx: Transaction): UInt256 = ## Getter (go/ref compat): gas * gasPrice + value. (tx.gasPrice * tx.gasLimit).u256 + tx.value -# core/types/transaction.go(332): .. *Transaction) EffectiveGasTip(baseFee .. -# core/types/transaction.go(346): .. EffectiveGasTipValue(baseFee .. -proc effectiveGasTip*(tx: Transaction; baseFee: GasInt): GasPriceEx = - ## The effective miner gas tip for the globally argument `baseFee`. The - ## result (which is a price per gas) might well be negative. - if tx.txType < TxEip1559: - (tx.gasPrice - baseFee).GasPriceEx - else: - # London, EIP1559 - min(tx.maxPriorityFeePerGas, tx.maxFeePerGas - baseFee).GasPriceEx - -proc effectiveGasTip*(tx: Transaction; baseFee: UInt256): GasPriceEx = - ## Variant of `effectiveGasTip()` - tx.effectiveGasTip(baseFee.truncate(GasInt)) +func effectiveGasTip*(tx: Transaction; baseFee: GasInt): GasInt = + effectiveGasTip(tx, Opt.some(baseFee.u256)) # ------------------------------------------------------------------------------ # Public functions, item getters diff --git a/nimbus/core/tx_pool/tx_packer.nim b/nimbus/core/tx_pool/tx_packer.nim new file mode 100644 index 0000000000..b4f17e591c --- /dev/null +++ b/nimbus/core/tx_pool/tx_packer.nim @@ -0,0 +1,346 @@ +# Nimbus +# Copyright (c) 2018-2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +## Transaction Pool Tasklets: Packer, VM execute and compact txs +## ============================================================= +## + +{.push raises: [].} + +import + eth/[keys, rlp], + stew/sorted_set, + ../../db/[ledger, core_db], + ../../common/common, + ../../utils/utils, + ../../constants, + ".."/[executor, validate, casper], + ../../transaction/call_evm, + ../../transaction, + ../../evm/state, + ../../evm/types, + ../eip4844, + "."/[tx_desc, tx_item, tx_tabs, tx_tabs/tx_status, tx_info], + tx_tasks/[tx_bucket] + +type + TxPacker = object + # Packer state + vmState: BaseVMState + txDB: TxTabsRef + tr: CoreDbMptRef + cleanState: bool + numBlobPerBlock: int + + # Packer results + blockValue: UInt256 + stateRoot: Hash256 + txRoot: Hash256 + receiptsRoot: Hash256 + logsBloom: BloomFilter + + GrabResult = enum + FetchNextItem + ContinueWithNextAccount + StopCollecting + +const + receiptsExtensionSize = ##\ + ## Number of slots to extend the `receipts[]` at the same time. + 20 + +# ------------------------------------------------------------------------------ +# Private helpers +# ------------------------------------------------------------------------------ + +proc persist(pst: var TxPacker) + {.gcsafe,raises: [].} = + ## Smart wrapper + let vmState = pst.vmState + if not pst.cleanState: + let clearEmptyAccount = vmState.fork >= FkSpurious + vmState.stateDB.persist(clearEmptyAccount) + pst.cleanState = true + +proc classifyValidatePacked(vmState: BaseVMState; item: TxItemRef): bool = + ## Verify the argument `item` against the accounts database. This function + ## is a wrapper around the `verifyTransaction()` call to be used in a similar + ## fashion as in `asyncProcessTransactionImpl()`. + let + roDB = vmState.readOnlyStateDB + baseFee = vmState.blockCtx.baseFeePerGas.get(0.u256) + fork = vmState.fork + gasLimit = vmState.blockCtx.gasLimit + tx = item.tx.eip1559TxNormalization(baseFee.truncate(GasInt)) + excessBlobGas = calcExcessBlobGas(vmState.parent) + + roDB.validateTransaction( + tx, item.sender, gasLimit, baseFee, excessBlobGas, fork).isOk + +proc classifyPacked(vmState: BaseVMState; moreBurned: GasInt): bool = + ## Classifier for *packing* (i.e. adding up `gasUsed` values after executing + ## in the VM.) This function checks whether the sum of the arguments + ## `gasBurned` and `moreGasBurned` is within acceptable constraints. + let totalGasUsed = vmState.cumulativeGasUsed + moreBurned + totalGasUsed < vmState.blockCtx.gasLimit + +proc classifyPackedNext(vmState: BaseVMState): bool = + ## Classifier for *packing* (i.e. adding up `gasUsed` values after executing + ## in the VM.) This function returns `true` if the packing level is still + ## low enough to proceed trying to accumulate more items. + ## + ## This function is typically called as a follow up after a `false` return of + ## `classifyPack()`. + vmState.cumulativeGasUsed < vmState.blockCtx.gasLimit + +func baseFee(pst: TxPacker): GasInt = + ## Getter, baseFee for the next bock header. This value is auto-generated + ## when a new insertion point is set via `head=`. + if pst.vmState.blockCtx.baseFeePerGas.isSome: + pst.vmState.blockCtx.baseFeePerGas.get.truncate(GasInt) + else: + 0.GasInt + +func feeRecipient(pst: TxPacker): EthAddress = + pst.vmState.com.pos.feeRecipient + +# ------------------------------------------------------------------------------ +# Private functions +# ------------------------------------------------------------------------------ + +proc runTx(pst: var TxPacker; item: TxItemRef): GasInt = + ## Execute item transaction and update `vmState` book keeping. Returns the + ## `gasUsed` after executing the transaction. + let + baseFee = pst.baseFee + tx = item.tx.eip1559TxNormalization(baseFee) + + let gasUsed = tx.txCallEvm(item.sender, pst.vmState) + pst.cleanState = false + doAssert 0 <= gasUsed + gasUsed + +proc runTxCommit(pst: var TxPacker; item: TxItemRef; gasBurned: GasInt) + {.gcsafe,raises: [CatchableError].} = + ## Book keeping after executing argument `item` transaction in the VM. The + ## function returns the next number of items `nItems+1`. + let + vmState = pst.vmState + inx = pst.txDB.byStatus.eq(txItemPacked).nItems + gasTip = item.tx.effectiveGasTip(pst.baseFee) + + # The gas tip cannot get negative as all items in the `staged` bucket + # are vetted for profitability before entering that bucket. + assert 0 <= gasTip + let reward = gasBurned.u256 * gasTip.u256 + vmState.stateDB.addBalance(pst.feeRecipient, reward) + pst.blockValue += reward + + # Save accounts via persist() is not needed unless the fork is smaller + # than `FkByzantium` in which case, the `rootHash()` function is called + # by `makeReceipt()`. As the `rootHash()` function asserts unconditionally + # that the account cache has been saved, the `persist()` call is + # obligatory here. + if vmState.fork < FkByzantium: + pst.persist() + + # Update receipts sequence + if vmState.receipts.len <= inx: + vmState.receipts.setLen(inx + receiptsExtensionSize) + + # Return remaining gas to the block gas counter so it is + # available for the next transaction. + vmState.gasPool += item.tx.gasLimit - gasBurned + + # gasUsed accounting + vmState.cumulativeGasUsed += gasBurned + vmState.receipts[inx] = vmState.makeReceipt(item.tx.txType) + + # Update txRoot + pst.tr.merge(rlp.encode(inx.uint64), rlp.encode(item.tx)).isOkOr: + raiseAssert "runTxCommit(): merge failed, " & $$error + + # Add the item to the `packed` bucket. This implicitely increases the + # receipts index `inx` at the next visit of this function. + discard pst.txDB.reassign(item,txItemPacked) + +# ------------------------------------------------------------------------------ +# Private functions: packer packerVmExec() helpers +# ------------------------------------------------------------------------------ + +proc vmExecInit(xp: TxPoolRef): Result[TxPacker, string] + {.gcsafe,raises: [CatchableError].} = + + # Flush `packed` bucket + xp.bucketFlushPacked + + let packer = TxPacker( + vmState: xp.vmState, + txDB: xp.txDB, + tr: AristoDbMemory.newCoreDbRef().ctx.getGeneric(clearData=true), + numBlobPerBlock: 0, + blockValue: 0.u256, + txRoot: EMPTY_ROOT_HASH, + stateRoot: xp.vmState.parent.stateRoot, + ) + + # EIP-4788 + if xp.nextFork >= FkCancun: + let beaconRoot = xp.vmState.com.pos.parentBeaconBlockRoot + xp.vmState.processBeaconBlockRoot(beaconRoot).isOkOr: + return err(error) + + ok(packer) + +proc vmExecGrabItem(pst: var TxPacker; item: TxItemRef): GrabResult + {.gcsafe,raises: [CatchableError].} = + ## Greedily collect & compact items as long as the accumulated `gasLimit` + ## values are below the maximum block size. + let vmState = pst.vmState + + if not item.tx.validateChainId(vmState.com.chainId): + discard pst.txDB.dispose(item, txInfoChainIdMismatch) + return ContinueWithNextAccount + + # EIP-4844 + if pst.numBlobPerBlock + item.tx.versionedHashes.len > MAX_BLOBS_PER_BLOCK: + return ContinueWithNextAccount + pst.numBlobPerBlock += item.tx.versionedHashes.len + + # Verify we have enough gas in gasPool + if vmState.gasPool < item.tx.gasLimit: + # skip this transaction and + # continue with next account + # if we don't have enough gas + return ContinueWithNextAccount + vmState.gasPool -= item.tx.gasLimit + + # Validate transaction relative to the current vmState + if not vmState.classifyValidatePacked(item): + return ContinueWithNextAccount + + # EIP-1153 + vmState.stateDB.clearTransientStorage() + + let + accTx = vmState.stateDB.beginSavepoint + gasUsed = pst.runTx(item) # this is the crucial part, running the tx + + # Find out what to do next: accepting this tx or trying the next account + if not vmState.classifyPacked(gasUsed): + vmState.stateDB.rollback(accTx) + if vmState.classifyPackedNext(): + return ContinueWithNextAccount + return StopCollecting + + # Commit account state DB + vmState.stateDB.commit(accTx) + + vmState.stateDB.persist(clearEmptyAccount = vmState.fork >= FkSpurious) + + # Finish book-keeping and move item to `packed` bucket + pst.runTxCommit(item, gasUsed) + + FetchNextItem + +proc vmExecCommit(pst: var TxPacker) = + let + vmState = pst.vmState + stateDB = vmState.stateDB + + # EIP-4895 + if vmState.fork >= FkShanghai: + for withdrawal in vmState.com.pos.withdrawals: + stateDB.addBalance(withdrawal.address, withdrawal.weiAmount) + + # Finish up, then vmState.stateDB.rootHash may be accessed + stateDB.persist(clearEmptyAccount = vmState.fork >= FkSpurious) + + # Update flexi-array, set proper length + let nItems = pst.txDB.byStatus.eq(txItemPacked).nItems + vmState.receipts.setLen(nItems) + + pst.receiptsRoot = vmState.receipts.calcReceiptsRoot + pst.logsBloom = vmState.receipts.createBloom + pst.txRoot = pst.tr.state(updateOk=true).valueOr: + raiseAssert "vmExecCommit(): state() failed " & $$error + pst.stateRoot = vmState.stateDB.rootHash + + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc packerVmExec*(xp: TxPoolRef): Result[TxPacker, string] + {.gcsafe,raises: [CatchableError].} = + ## Rebuild `packed` bucket by selection items from the `staged` bucket + ## after executing them in the VM. + let db = xp.vmState.com.db + let dbTx = db.ctx.newTransaction() + defer: dbTx.dispose() + + var pst = xp.vmExecInit.valueOr: + return err(error) + + block loop: + for (_,nonceList) in xp.txDB.packingOrderAccounts(txItemStaged): + + block account: + for item in nonceList.incNonce: + let rc = pst.vmExecGrabItem(item) + if rc == StopCollecting: + break loop # stop + if rc == ContinueWithNextAccount: + break account # continue with next account + + pst.vmExecCommit() + ok(pst) + # Block chain will roll back automatically + +proc assembleHeader*(pst: TxPacker): BlockHeader = + ## Generate a new header, a child of the cached `head` + let + vmState = pst.vmState + com = vmState.com + pos = com.pos + + result = BlockHeader( + parentHash: vmState.parent.blockHash, + ommersHash: EMPTY_UNCLE_HASH, + coinbase: pos.feeRecipient, + stateRoot: pst.stateRoot, + txRoot: pst.txRoot, + receiptsRoot: pst.receiptsRoot, + logsBloom: pst.logsBloom, + difficulty: UInt256.zero(), + number: vmState.blockNumber, + gasLimit: vmState.blockCtx.gasLimit, + gasUsed: vmState.cumulativeGasUsed, + timestamp: pos.timestamp, + extraData: @[], + mixHash: pos.prevRandao, + nonce: default(BlockNonce), + baseFeePerGas: vmState.blockCtx.baseFeePerGas, + ) + + if com.isShanghaiOrLater(pos.timestamp): + result.withdrawalsRoot = Opt.some(calcWithdrawalsRoot(pos.withdrawals)) + + if com.isCancunOrLater(pos.timestamp): + result.parentBeaconBlockRoot = Opt.some(pos.parentBeaconBlockRoot) + result.blobGasUsed = Opt.some vmState.blobGasUsed + result.excessBlobGas = Opt.some vmState.blockCtx.excessBlobGas + +func blockValue*(pst: TxPacker): UInt256 = + pst.blockValue + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_tabs.nim b/nimbus/core/tx_pool/tx_tabs.nim index f64b28c5a6..5de05b4f6f 100644 --- a/nimbus/core/tx_pool/tx_tabs.nim +++ b/nimbus/core/tx_pool/tx_tabs.nim @@ -20,12 +20,12 @@ import ./tx_item, ./tx_tabs/[tx_sender, tx_rank, tx_status], eth/[common, keys], - stew/[keyed_queue, keyed_queue/kq_debug, sorted_set], + stew/[keyed_queue, sorted_set], results export # bySender/byStatus index operations - sub, eq, ge, gt, le, len, lt, nItems, gasLimits + sub, eq, ge, gt, le, len, lt, nItems type TxTabsItemsCount* = tuple @@ -33,9 +33,6 @@ type total: int ## excluding rejects disposed: int ## waste basket - TxTabsGasTotals* = tuple - pending, staged, packed: GasInt ## sum => total - TxTabsRef* = ref object ##\ ## Base descriptor maxRejects: int ##\ @@ -284,11 +281,6 @@ proc nItems*(xp: TxTabsRef): TxTabsItemsCount = result.total = xp.byItemID.len result.disposed = xp.byRejects.len -proc gasTotals*(xp: TxTabsRef): TxTabsGasTotals = - result.pending = xp.byStatus.eq(txItemPending).gasLimits - result.staged = xp.byStatus.eq(txItemStaged).gasLimits - result.packed = xp.byStatus.eq(txItemPacked).gasLimits - # ------------------------------------------------------------------------------ # Public iterators, `TxRank` > `(EthAddress,TxStatusNonceRef)` # ------------------------------------------------------------------------------ @@ -345,41 +337,6 @@ iterator packingOrderAccounts*(xp: TxTabsRef; bucket: TxItemStatus): for (account,nonceList) in xp.decAccount(bucket): yield (account,nonceList) -# ------------------------------------------------------------------------------ -# Public iterators, `TxRank` > `(EthAddress,TxSenderNonceRef)` -# ------------------------------------------------------------------------------ - -iterator incAccount*(xp: TxTabsRef; - fromRank = TxRank.low): (EthAddress,TxSenderNonceRef) - {.gcsafe,raises: [KeyError].} = - ## Variant of `incAccount()` without bucket restriction. - var rcRank = xp.byRank.ge(fromRank) - while rcRank.isOk: - let (rank, addrList) = (rcRank.value.key, rcRank.value.data) - - # Try all sender adresses found - for account in addrList.keys: - yield (account, xp.bySender.eq(account).sub.value.data) - - # Get next ranked address list (top down index walk) - rcRank = xp.byRank.gt(rank) # potenially modified database - - -iterator decAccount*(xp: TxTabsRef; - fromRank = TxRank.high): (EthAddress,TxSenderNonceRef) - {.gcsafe,raises: [KeyError].} = - ## Variant of `decAccount()` without bucket restriction. - var rcRank = xp.byRank.le(fromRank) - while rcRank.isOk: - let (rank, addrList) = (rcRank.value.key, rcRank.value.data) - - # Try all sender adresses found - for account in addrList.keys: - yield (account, xp.bySender.eq(account).sub.value.data) - - # Get next ranked address list (top down index walk) - rcRank = xp.byRank.lt(rank) # potenially modified database - # ----------------------------------------------------------------------------- # Public second stage iterators: nonce-ordered item lists. # ----------------------------------------------------------------------------- @@ -404,84 +361,6 @@ iterator incNonce*(nonceList: TxStatusNonceRef; yield item rc = nonceList.gt(nonce) # potenially modified database -#[ -# There is currently no use for nonce count down traversal - -iterator decNonce*(nonceList: TxSenderNonceRef; - nonceFrom = AccountNonce.high): TxItemRef - {.gcsafe, raises: [KeyError].} = - ## Similar to `incNonce()` but visiting items in reverse order. - var rc = nonceList.le(nonceFrom) - while rc.isOk: - let (nonce, item) = (rc.value.key, rc.value.data) - yield item - rc = nonceList.lt(nonce) # potenially modified database - - -iterator decNonce*(nonceList: TxStatusNonceRef; - nonceFrom = AccountNonce.high): TxItemRef = - ## Variant of `decNonce()` for the `TxStatusNonceRef` list. - var rc = nonceList.le(nonceFrom) - while rc.isOk: - let (nonce, item) = (rc.value.key, rc.value.data) - yield item - rc = nonceList.lt(nonce) # potenially modified database -]# - -# ------------------------------------------------------------------------------ -# Public functions, debugging -# ------------------------------------------------------------------------------ - -proc verify*(xp: TxTabsRef): Result[void,TxInfo] - {.gcsafe, raises: [CatchableError].} = - ## Verify descriptor and subsequent data structures. - block: - let rc = xp.bySender.verify - if rc.isErr: - return rc - block: - let rc = xp.byItemID.verify - if rc.isErr: - return err(txInfoVfyItemIdList) - block: - let rc = xp.byRejects.verify - if rc.isErr: - return err(txInfoVfyRejectsList) - block: - let rc = xp.byStatus.verify - if rc.isErr: - return rc - block: - let rc = xp.byRank.verify - if rc.isErr: - return rc - - for status in TxItemStatus: - var - statusCount = 0 - statusAllGas = 0.GasInt - for (account,nonceList) in xp.incAccount(status): - let bySenderStatusList = xp.bySender.eq(account).eq(status) - statusAllGas += bySenderStatusList.gasLimits - statusCount += bySenderStatusList.nItems - if bySenderStatusList.nItems != nonceList.nItems: - return err(txInfoVfyStatusSenderTotal) - - if xp.byStatus.eq(status).nItems != statusCount: - return err(txInfoVfyStatusSenderTotal) - if xp.byStatus.eq(status).gasLimits != statusAllGas: - return err(txInfoVfyStatusSenderGasLimits) - - if xp.byItemID.len != xp.bySender.nItems: - return err(txInfoVfySenderTotal) - - if xp.byItemID.len != xp.byStatus.nItems: - return err(txInfoVfyStatusTotal) - - if xp.bySender.len != xp.byRank.nItems: - return err(txInfoVfyRankTotal) - ok() - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_tabs/tx_rank.nim b/nimbus/core/tx_pool/tx_tabs/tx_rank.nim index a2d6ef3b8f..0a11fb250b 100644 --- a/nimbus/core/tx_pool/tx_tabs/tx_rank.nim +++ b/nimbus/core/tx_pool/tx_tabs/tx_rank.nim @@ -16,7 +16,6 @@ import std/[tables], - ../tx_info, eth/common, stew/[sorted_set], results @@ -111,36 +110,6 @@ proc delete*(rt: var TxRankTab; sender: EthAddress): bool rt.addrTab.del(sender) return true - -proc verify*(rt: var TxRankTab): Result[void,TxInfo] - {.gcsafe,raises: [CatchableError].} = - - var - seen: Table[EthAddress,TxRank] - rc = rt.rankList.ge(TxRank.low) - - while rc.isOk: - let (key, addrTab) = (rc.value.key, rc.value.data) - rc = rt.rankList.gt(key) - - for (sender,rank) in addrTab.pairs: - if key != rank: - return err(txInfoVfyRankAddrMismatch) - - if not rt.addrTab.hasKey(sender): - return err(txInfoVfyRankReverseLookup) - if rank != rt.addrTab[sender]: - return err(txInfoVfyRankReverseMismatch) - - if seen.hasKey(sender): - return err(txInfoVfyRankDuplicateAddr) - seen[sender] = rank - - if seen.len != rt.addrTab.len: - return err(txInfoVfyReverseZombies) - - ok() - # ------------------------------------------------------------------------------ # Public functions: `TxRank` > `EthAddress` # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_tabs/tx_sender.nim b/nimbus/core/tx_pool/tx_tabs/tx_sender.nim index 96d1c38d89..e0a5f331cf 100644 --- a/nimbus/core/tx_pool/tx_tabs/tx_sender.nim +++ b/nimbus/core/tx_pool/tx_tabs/tx_sender.nim @@ -15,20 +15,18 @@ ## import - ../tx_info, ../tx_item, eth/common, - stew/[keyed_queue, keyed_queue/kq_debug, sorted_set], + stew/[keyed_queue, sorted_set], results, ../../eip4844 - type TxSenderNonceRef* = ref object ##\ ## Sub-list ordered by `AccountNonce` values containing transaction\ ## item lists. gasLimits: GasInt ## Accumulated gas limits - profit: float64 ## Aggregated `effectiveGasTip*gasLimit` values + profit: GasInt ## Aggregated `effectiveGasTip*gasLimit` values nonceList: SortedSet[AccountNonce,TxItemRef] TxSenderSchedRef* = ref object ##\ @@ -75,19 +73,6 @@ proc nActive(rq: TxSenderSchedRef): int = if not rq.statusList[status].isNil: result.inc -func differs(a, b: float64): bool = - ## Syntactic sugar, crude comparator for large integer values a and b coded - ## as `float64`. This function is mainly provided for the `verify()` function. - # note that later NIM compilers also provide `almostEqual()` - const - epsilon = 1.0e+15'f64 # just arbitrary, something small - let - x = max(a, b) - y = min(a, b) - z = if x == 0: 1'f64 else: x # 1f64 covers the case x == y == 0.0 - epsilon < (x - y) / z - - func toSenderSchedule(status: TxItemStatus): TxSenderSchedule = case status of txItemPending: @@ -110,23 +95,21 @@ proc getRank(schedData: TxSenderSchedRef): int64 = if gasLimits <= 0: return int64.low - let profit = maxProfit / gasLimits.float64 + let profit = maxProfit div gasLimits # Beware of under/overflow - if profit < int64.low.float64: - return int64.low - if int64.high.float64 < profit: + if int64.high.GasInt < profit: return int64.high profit.int64 -proc maxProfit(item: TxItemRef; baseFee: GasInt): float64 = +proc maxProfit(item: TxItemRef; baseFee: GasInt): GasInt = ## Profit calculator - item.tx.gasLimit.float64 * item.tx.effectiveGasTip(baseFee).float64 + item.tx.getTotalBlobGas.float64 + item.tx.gasLimit * item.tx.effectiveGasTip(baseFee) + item.tx.getTotalBlobGas proc recalcProfit(nonceData: TxSenderNonceRef; baseFee: GasInt) = ## Re-calculate profit value depending on `baseFee` - nonceData.profit = 0.0 + nonceData.profit = 0 var rc = nonceData.nonceList.ge(AccountNonce.low) while rc.isOk: let item = rc.value.data @@ -253,116 +236,6 @@ proc delete*(gt: var TxSenderTab; item: TxItemRef): bool inx.statusNonce.profit -= tip return true - -proc verify*(gt: var TxSenderTab): Result[void,TxInfo] - {.gcsafe,raises: [CatchableError].} = - ## Walk `EthAddress` > `TxSenderLocus` > `AccountNonce` > items - - block: - let rc = gt.addrList.verify - if rc.isErr: - return err(txInfoVfySenderRbTree) - - var totalCount = 0 - for p in gt.addrList.nextPairs: - let schedData = p.data - #var addrCount = 0 -- notused - # at least one of status lists must be available - if schedData.nActive == 0: - return err(txInfoVfySenderLeafEmpty) - if schedData.allList.isNil: - return err(txInfoVfySenderLeafEmpty) - - # status list - # ---------------------------------------------------------------- - var - statusCount = 0 - statusGas = 0.GasInt - statusProfit = 0.0 - for status in TxItemStatus: - let statusData = schedData.statusList[status] - - if not statusData.isNil: - block: - let rc = statusData.nonceList.verify - if rc.isErr: - return err(txInfoVfySenderRbTree) - - var - rcNonce = statusData.nonceList.ge(AccountNonce.low) - bucketProfit = 0.0 - while rcNonce.isOk: - let (nonceKey, item) = (rcNonce.value.key, rcNonce.value.data) - rcNonce = statusData.nonceList.gt(nonceKey) - - statusGas += item.tx.gasLimit - statusCount.inc - - bucketProfit += item.maxProfit(gt.baseFee) - - statusProfit += bucketProfit - - if differs(statusData.profit, bucketProfit): - echo "*** verify (1) ", statusData.profit," != ", bucketProfit - return err(txInfoVfySenderProfits) - - # verify that `recalcProfit()` works - statusData.recalcProfit(gt.baseFee) - if differs(statusData.profit, bucketProfit): - echo "*** verify (2) ", statusData.profit," != ", bucketProfit - return err(txInfoVfySenderProfits) - - # allList - # ---------------------------------------------------------------- - var - allCount = 0 - allGas = 0.GasInt - allProfit = 0.0 - block: - var allData = schedData.allList - - block: - let rc = allData.nonceList.verify - if rc.isErr: - return err(txInfoVfySenderRbTree) - - var rcNonce = allData.nonceList.ge(AccountNonce.low) - while rcNonce.isOk: - let (nonceKey, item) = (rcNonce.value.key, rcNonce.value.data) - rcNonce = allData.nonceList.gt(nonceKey) - - allProfit += item.maxProfit(gt.baseFee) - allGas += item.tx.gasLimit - allCount.inc - - if differs(allData.profit, allProfit): - echo "*** verify (3) ", allData.profit," != ", allProfit - return err(txInfoVfySenderProfits) - - # verify that `recalcProfit()` works - allData.recalcProfit(gt.baseFee) - if differs(allData.profit, allProfit): - echo "*** verify (4) ", allData.profit," != ", allProfit - return err(txInfoVfySenderProfits) - - if differs(allProfit, statusProfit): - echo "*** verify (5) ", allProfit," != ", statusProfit - return err(txInfoVfySenderProfits) - if allGas != statusGas: - return err(txInfoVfySenderTotal) - if statusCount != schedData.size: - return err(txInfoVfySenderTotal) - if allCount != schedData.size: - return err(txInfoVfySenderTotal) - - totalCount += allCount - - # end while - if totalCount != gt.size: - return err(txInfoVfySenderTotal) - - ok() - # ------------------------------------------------------------------------------ # Public getters # ------------------------------------------------------------------------------ @@ -509,38 +382,6 @@ proc nItems*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]): int = return rc.value.data.nItems 0 - -proc gasLimits*(nonceData: TxSenderNonceRef): GasInt = - ## Getter, aggregated valued of `gasLimit` for all items in the - ## argument list. - nonceData.gasLimits - -proc gasLimits*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]): - GasInt = - ## Getter variant of `gasLimits()`, returns `0` if `rc.isErr` - ## evaluates `true`. - if rc.isOk: - return rc.value.data.gasLimits - 0 - - -proc maxProfit*(nonceData: TxSenderNonceRef): float64 = - ## Getter, maximum profit value for the current item list. This is the - ## aggregated value of `item.effectiveGasTip(baseFee) * item.gasLimit` - ## over all items in the argument list `nonceData`. Note that this value - ## is typically pretty large and sort of rounded due to the resolution - ## of the `float64` data type. - nonceData.profit - -proc maxProfit*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]): - float64 = - ## Variant of `profit()`, returns `GasPriceEx.low` if `rc.isErr` - ## evaluates `true`. - if rc.isOk: - return rc.value.data.profit - float64.low - - proc eq*(nonceData: TxSenderNonceRef; nonce: AccountNonce): SortedSetResult[AccountNonce,TxItemRef] = nonceData.nonceList.eq(nonce) @@ -576,30 +417,6 @@ proc gt*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]; return rc.value.data.gt(nonce) err(rc.error) - -proc le*(nonceData: TxSenderNonceRef; nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - nonceData.nonceList.le(nonce) - -proc le*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]; - nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - if rc.isOk: - return rc.value.data.le(nonce) - err(rc.error) - - -proc lt*(nonceData: TxSenderNonceRef; nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - nonceData.nonceList.lt(nonce) - -proc lt*(rc: SortedSetResult[TxSenderSchedule,TxSenderNonceRef]; - nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - if rc.isOk: - return rc.value.data.lt(nonce) - err(rc.error) - # ------------------------------------------------------------------------------ # Public iterators # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_tabs/tx_status.nim b/nimbus/core/tx_pool/tx_tabs/tx_status.nim index 68de2c8c8a..f5b88e348b 100644 --- a/nimbus/core/tx_pool/tx_tabs/tx_status.nim +++ b/nimbus/core/tx_pool/tx_tabs/tx_status.nim @@ -13,10 +13,9 @@ ## import - ../tx_info, ../tx_item, eth/common, - stew/[keyed_queue, keyed_queue/kq_debug, sorted_set], + stew/[keyed_queue, sorted_set], results {.push raises: [].} @@ -30,7 +29,6 @@ type ## Per address table. This table is provided as a keyed queue so deletion\ ## while traversing is supported and predictable. size: int ## Total number of items - gasLimits: GasInt ## Accumulated gas limits addrList: KeyedQueue[EthAddress,TxStatusNonceRef] TxStatusTab* = object ##\ @@ -121,7 +119,6 @@ proc insert*(sq: var TxStatusTab; item: TxItemRef): bool let inx = rc.value sq.size.inc inx.addrData.size.inc - inx.addrData.gasLimits += item.tx.gasLimit return true @@ -133,7 +130,6 @@ proc delete*(sq: var TxStatusTab; item: TxItemRef): bool sq.size.dec inx.addrData.size.dec - inx.addrData.gasLimits -= item.tx.gasLimit discard inx.nonceData.nonceList.delete(item.tx.nonce) if inx.nonceData.nonceList.len == 0: @@ -144,53 +140,6 @@ proc delete*(sq: var TxStatusTab; item: TxItemRef): bool return true - -proc verify*(sq: var TxStatusTab): Result[void,TxInfo] - {.gcsafe,raises: [CatchableError].} = - ## walk `TxItemStatus` > `EthAddress` > `AccountNonce` - - var totalCount = 0 - for status in TxItemStatus: - let addrData = sq.statusList[status] - if not addrData.isNil: - - block: - let rc = addrData.addrList.verify - if rc.isErr: - return err(txInfoVfyStatusSenderList) - var - addrCount = 0 - gasLimits = 0.GasInt - for p in addrData.addrList.nextPairs: - # let (addrKey, nonceData) = (p.key, p.data) -- notused - let nonceData = p.data - - block: - let rc = nonceData.nonceList.verify - if rc.isErr: - return err(txInfoVfyStatusNonceList) - - var rcNonce = nonceData.nonceList.ge(AccountNonce.low) - while rcNonce.isOk: - let (nonceKey, item) = (rcNonce.value.key, rcNonce.value.data) - rcNonce = nonceData.nonceList.gt(nonceKey) - - gasLimits += item.tx.gasLimit - addrCount.inc - - if addrCount != addrData.size: - return err(txInfoVfyStatusTotal) - if gasLimits != addrData.gasLimits: - return err(txInfoVfyStatusGasLimits) - - totalCount += addrCount - - # end while - if totalCount != sq.size: - return err(txInfoVfyStatusTotal) - - ok() - # ------------------------------------------------------------------------------ # Public array ops -- `TxItemStatus` (level 0) # ------------------------------------------------------------------------------ @@ -222,17 +171,6 @@ proc nItems*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef]): int = return rc.value.data.nItems 0 - -proc gasLimits*(addrData: TxStatusSenderRef): GasInt = - ## Getter, accumulated `gasLimit` values - addrData.gasLimits - -proc gasLimits*(rc: SortedSetResult[TxItemStatus,TxStatusSenderRef]): GasInt = - if rc.isOk: - return rc.value.data.gasLimits - 0 - - proc eq*(addrData: TxStatusSenderRef; sender: EthAddress): SortedSetResult[EthAddress,TxStatusNonceRef] {.gcsafe,raises: [KeyError].} = @@ -297,28 +235,6 @@ proc gt*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce): return rc.value.data.gt(nonce) err(rc.error) - -proc le*(nonceData: TxStatusNonceRef; nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - nonceData.nonceList.le(nonce) - -proc le*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - if rc.isOk: - return rc.value.data.le(nonce) - err(rc.error) - - -proc lt*(nonceData: TxStatusNonceRef; nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - nonceData.nonceList.lt(nonce) - -proc lt*(rc: SortedSetResult[EthAddress,TxStatusNonceRef]; nonce: AccountNonce): - SortedSetResult[AccountNonce,TxItemRef] = - if rc.isOk: - return rc.value.data.lt(nonce) - err(rc.error) - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_tasks/tx_bucket.nim b/nimbus/core/tx_pool/tx_tasks/tx_bucket.nim index cceec5304d..0142842fba 100644 --- a/nimbus/core/tx_pool/tx_tasks/tx_bucket.nim +++ b/nimbus/core/tx_pool/tx_tasks/tx_bucket.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -15,7 +15,6 @@ import std/[tables], ../../../constants, - ../tx_chain, ../tx_desc, ../tx_info, ../tx_item, @@ -166,7 +165,7 @@ proc bucketFlushPacked*(xp: TxPoolRef) discard xp.txDB.reassign(item,txItemStaged) # Reset bucket status info - xp.chain.clearAccounts + xp.clearAccounts # ------------------------------------------------------------------------------ # End diff --git a/nimbus/core/tx_pool/tx_tasks/tx_classify.nim b/nimbus/core/tx_pool/tx_tasks/tx_classify.nim index 2a7b195804..bd8304b0f8 100644 --- a/nimbus/core/tx_pool/tx_tasks/tx_classify.nim +++ b/nimbus/core/tx_pool/tx_tasks/tx_classify.nim @@ -14,18 +14,15 @@ import ../../../common/common, - ../../../evm/state, - ../../../evm/types, ../../validate, ../../eip4844, - ../tx_chain, ../tx_desc, ../tx_item, ../tx_tabs, chronicles, eth/keys -import ../../../transaction except GasPrice, GasPriceEx # already in tx_item +import ../../../transaction {.push raises: [].} @@ -39,7 +36,7 @@ logScope: proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool = let res = validateTxBasic( item.tx, - xp.chain.nextFork, + xp.nextFork, # A new transaction of the next fork may be # coming before the fork activated validateFork = false @@ -55,7 +52,7 @@ proc checkTxNonce(xp: TxPoolRef; item: TxItemRef): bool ## sender) starting at the account nonce. # get the next applicable nonce as registered on the account database - let accountNonce = xp.chain.getNonce(item.sender) + let accountNonce = xp.getNonce(item.sender) if item.tx.nonce < accountNonce: debug "invalid tx: account nonce too small", @@ -95,7 +92,7 @@ proc txNonceActive(xp: TxPoolRef; item: TxItemRef): bool proc txGasCovered(xp: TxPoolRef; item: TxItemRef): bool = ## Check whether the max gas consumption is within the gas limit (aka block ## size). - let trgLimit = xp.chain.gasLimit + let trgLimit = xp.gasLimit if trgLimit < item.tx.gasLimit: debug "invalid tx: gasLimit exceeded", maxLimit = trgLimit, @@ -107,15 +104,15 @@ proc txFeesCovered(xp: TxPoolRef; item: TxItemRef): bool = ## Ensure that the user was willing to at least pay the base fee ## And to at least pay the current data gasprice if item.tx.txType >= TxEip1559: - if item.tx.maxFeePerGas < xp.chain.baseFee: + if item.tx.maxFeePerGas < xp.baseFee: debug "invalid tx: maxFee is smaller than baseFee", maxFee = item.tx.maxFeePerGas, - baseFee = xp.chain.baseFee + baseFee = xp.baseFee return false if item.tx.txType >= TxEip4844: let - excessBlobGas = xp.chain.excessBlobGas + excessBlobGas = xp.excessBlobGas blobGasPrice = getBlobBaseFee(excessBlobGas) if item.tx.maxFeePerBlobGas < blobGasPrice: debug "invalid tx: maxFeePerBlobGas smaller than blobGasPrice", @@ -127,7 +124,7 @@ proc txFeesCovered(xp: TxPoolRef; item: TxItemRef): bool = proc txCostInBudget(xp: TxPoolRef; item: TxItemRef): bool = ## Check whether the worst case expense is covered by the price budget, let - balance = xp.chain.getBalance(item.sender) + balance = xp.getBalance(item.sender) gasCost = item.tx.gasCost if balance < gasCost: debug "invalid tx: not enough cash for gas", @@ -148,28 +145,22 @@ proc txPreLondonAcceptableGasPrice(xp: TxPoolRef; item: TxItemRef): bool = ## For legacy transactions check whether minimum gas price and tip are ## high enough. These checks are optional. if item.tx.txType < TxEip1559: + if item.tx.gasPrice < 0: + return false - if stageItemsPlMinPrice in xp.pFlags: - if item.tx.gasPrice.GasPriceEx < xp.pMinPlGasPrice: - return false - - elif stageItems1559MinTip in xp.pFlags: - # Fall back transaction selector scheme - if item.tx.effectiveGasTip(xp.chain.baseFee) < xp.pMinTipPrice: - return false + # Fall back transaction selector scheme + if item.tx.effectiveGasTip(xp.baseFee) < 1.GasInt: + return false true proc txPostLondonAcceptableTipAndFees(xp: TxPoolRef; item: TxItemRef): bool = ## Helper for `classifyTxPacked()` if item.tx.txType >= TxEip1559: + if item.tx.effectiveGasTip(xp.baseFee) < 1.GasInt: + return false - if stageItems1559MinTip in xp.pFlags: - if item.tx.effectiveGasTip(xp.chain.baseFee) < xp.pMinTipPrice: - return false - - if stageItems1559MinFee in xp.pFlags: - if item.tx.maxFeePerGas.GasPriceEx < xp.pMinFeePrice: - return false + if item.tx.maxFeePerGas < 1.GasInt: + return false true # ------------------------------------------------------------------------------ @@ -197,7 +188,7 @@ proc classifyActive*(xp: TxPoolRef; item: TxItemRef): bool if not xp.txNonceActive(item): return false - if item.tx.effectiveGasTip(xp.chain.baseFee) <= 0.GasPriceEx: + if item.tx.effectiveGasTip(xp.baseFee) <= 0.GasInt: return false if not xp.txGasCovered(item): @@ -217,42 +208,6 @@ proc classifyActive*(xp: TxPoolRef; item: TxItemRef): bool true - -proc classifyValidatePacked*(xp: TxPoolRef; - vmState: BaseVMState; item: TxItemRef): bool = - ## Verify the argument `item` against the accounts database. This function - ## is a wrapper around the `verifyTransaction()` call to be used in a similar - ## fashion as in `asyncProcessTransactionImpl()`. - let - roDB = vmState.readOnlyStateDB - baseFee = xp.chain.baseFee.uint64.u256 - fork = xp.chain.nextFork - gasLimit = xp.chain.gasLimit - tx = item.tx.eip1559TxNormalization(xp.chain.baseFee.GasInt) - excessBlobGas = calcExcessBlobGas(vmState.parent) - - roDB.validateTransaction( - tx, item.sender, gasLimit, baseFee, excessBlobGas, fork).isOk - -proc classifyPacked*(xp: TxPoolRef; gasBurned, moreBurned: GasInt): bool = - ## Classifier for *packing* (i.e. adding up `gasUsed` values after executing - ## in the VM.) This function checks whether the sum of the arguments - ## `gasBurned` and `moreGasBurned` is within acceptable constraints. - let totalGasUsed = gasBurned + moreBurned - totalGasUsed < xp.chain.gasLimit - -proc classifyPackedNext*(xp: TxPoolRef; gasBurned, moreBurned: GasInt): bool = - ## Classifier for *packing* (i.e. adding up `gasUsed` values after executing - ## in the VM.) This function returns `true` if the packing level is still - ## low enough to proceed trying to accumulate more items. - ## - ## This function is typically called as a follow up after a `false` return of - ## `classifyPack()`. - if packItemsTryHarder notin xp.pFlags: - xp.classifyPacked(gasBurned, moreBurned) - else: - gasBurned < xp.chain.gasLimit - # ------------------------------------------------------------------------------ # Public functionss # ------------------------------------------------------------------------------ diff --git a/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim b/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim index c48376a941..abc2188a18 100644 --- a/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim +++ b/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim @@ -61,7 +61,6 @@ proc disposeExpiredItems*(xp: TxPoolRef) {.gcsafe,raises: [KeyError].} = ## apply to items in the packed queue. let deadLine = utcNow() - xp.lifeTime - dspPacked = autoZombifyPacked in xp.pFlags dspUnpacked = autoZombifyUnpacked in xp.pFlags var rc = xp.txDB.byItemID.first @@ -71,10 +70,7 @@ proc disposeExpiredItems*(xp: TxPoolRef) {.gcsafe,raises: [KeyError].} = break rc = xp.txDB.byItemID.next(key) - if item.status == txItemPacked: - if not dspPacked: - continue - else: + if item.status != txItemPacked: if not dspUnpacked: continue diff --git a/nimbus/core/tx_pool/tx_tasks/tx_head.nim b/nimbus/core/tx_pool/tx_tasks/tx_head.nim index dec83e5c15..82aad848f7 100644 --- a/nimbus/core/tx_pool/tx_tasks/tx_head.nim +++ b/nimbus/core/tx_pool/tx_tasks/tx_head.nim @@ -16,7 +16,6 @@ import std/[tables], ../../../common/common, - ../tx_chain, ../tx_desc, ../tx_info, ../tx_item, @@ -48,7 +47,7 @@ logScope: # use it as a stack/lifo as the ordering is reversed proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256) {.raises: [BlockNotFound].} = - let db = xp.chain.com.db + let db = xp.vmState.com.db for tx in db.getBlockBody(blockHash).transactions: if tx.versionedHashes.len > 0: # EIP-4844 blobs are not persisted and cannot be re-broadcasted. @@ -60,7 +59,7 @@ proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256) proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256) {.gcsafe,raises: [BlockNotFound].} = - let db = xp.chain.com.db + let db = xp.vmState.com.db for tx in db.getBlockBody(blockHash).transactions: kq.remTxs[tx.itemID] = true @@ -123,10 +122,10 @@ proc headDiff*(xp: TxPoolRef; ## of txs to be removed is *DEL - ADD*. ## let - curHead = xp.chain.head + curHead = xp.head curHash = curHead.blockHash newHash = newHead.blockHash - db = xp.chain.com.db + db = xp.vmState.com.db var ignHeader: BlockHeader if not db.getBlockHeader(newHash, ignHeader): diff --git a/nimbus/core/tx_pool/tx_tasks/tx_packer.nim b/nimbus/core/tx_pool/tx_tasks/tx_packer.nim deleted file mode 100644 index c8663c330c..0000000000 --- a/nimbus/core/tx_pool/tx_tasks/tx_packer.nim +++ /dev/null @@ -1,261 +0,0 @@ -# Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed except -# according to those terms. - -## Transaction Pool Tasklets: Packer, VM execute and compact txs -## ============================================================= -## - -{.push raises: [].} - -import - eth/[keys, rlp], - stew/sorted_set, - ../../../db/[ledger, core_db], - ../../../common/common, - ../../../utils/utils, - ../../../constants, - "../.."/[executor, validate, casper], - ../../../transaction/call_evm, - ../../../transaction, - ../../../evm/state, - ../../../evm/types, - ".."/[tx_chain, tx_desc, tx_item, tx_tabs, tx_tabs/tx_status, tx_info], - "."/[tx_bucket, tx_classify] - -type - TxPackerStateRef = ref object - xp: TxPoolRef - tr: CoreDbMptRef - cleanState: bool - numBlobPerBlock: int - -const - receiptsExtensionSize = ##\ - ## Number of slots to extend the `receipts[]` at the same time. - 20 - -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -proc persist(pst: TxPackerStateRef) - {.gcsafe,raises: [].} = - ## Smart wrapper - if not pst.cleanState: - let fork = pst.xp.chain.nextFork - pst.xp.chain.vmState.stateDB.persist(clearEmptyAccount = fork >= FkSpurious) - pst.cleanState = true - -# ------------------------------------------------------------------------------ -# Private functions -# ------------------------------------------------------------------------------ - -proc runTx(pst: TxPackerStateRef; item: TxItemRef): GasInt = - ## Execute item transaction and update `vmState` book keeping. Returns the - ## `gasUsed` after executing the transaction. - let - baseFee = pst.xp.chain.baseFee - tx = item.tx.eip1559TxNormalization(baseFee.GasInt) - - let gasUsed = tx.txCallEvm(item.sender, pst.xp.chain.vmState) - pst.cleanState = false - doAssert 0 <= gasUsed - gasUsed - -proc runTxCommit(pst: TxPackerStateRef; item: TxItemRef; gasBurned: GasInt) - {.gcsafe,raises: [CatchableError].} = - ## Book keeping after executing argument `item` transaction in the VM. The - ## function returns the next number of items `nItems+1`. - let - xp = pst.xp - vmState = xp.chain.vmState - inx = xp.txDB.byStatus.eq(txItemPacked).nItems - gasTip = item.tx.effectiveGasTip(xp.chain.baseFee) - - # The gas tip cannot get negative as all items in the `staged` bucket - # are vetted for profitability before entering that bucket. - assert 0 <= gasTip - let reward = gasBurned.u256 * gasTip.uint64.u256 - vmState.stateDB.addBalance(xp.chain.feeRecipient, reward) - xp.blockValue += reward - - if vmState.collectWitnessData: - vmState.stateDB.collectWitnessData() - - # Save accounts via persist() is not needed unless the fork is smaller - # than `FkByzantium` in which case, the `rootHash()` function is called - # by `makeReceipt()`. As the `rootHash()` function asserts unconditionally - # that the account cache has been saved, the `persist()` call is - # obligatory here. - if xp.chain.nextFork < FkByzantium: - pst.persist() - - # Update receipts sequence - if vmState.receipts.len <= inx: - vmState.receipts.setLen(inx + receiptsExtensionSize) - - # Return remaining gas to the block gas counter so it is - # available for the next transaction. - vmState.gasPool += item.tx.gasLimit - gasBurned - - # gasUsed accounting - vmState.cumulativeGasUsed += gasBurned - vmState.receipts[inx] = vmState.makeReceipt(item.tx.txType) - - # Update txRoot - pst.tr.merge(rlp.encode(inx.uint64), rlp.encode(item.tx)).isOkOr: - raiseAssert "runTxCommit(): merge failed, " & $$error - - # Add the item to the `packed` bucket. This implicitely increases the - # receipts index `inx` at the next visit of this function. - discard xp.txDB.reassign(item,txItemPacked) - -# ------------------------------------------------------------------------------ -# Private functions: packer packerVmExec() helpers -# ------------------------------------------------------------------------------ - -proc vmExecInit(xp: TxPoolRef): Result[TxPackerStateRef, string] - {.gcsafe,raises: [CatchableError].} = - - # Flush `packed` bucket - xp.bucketFlushPacked - - # reset blockValue before adding any tx - xp.blockValue = 0.u256 - - # EIP-4788 - if xp.chain.nextFork >= FkCancun: - let beaconRoot = xp.chain.com.pos.parentBeaconBlockRoot - xp.chain.vmState.processBeaconBlockRoot(beaconRoot).isOkOr: - return err(error) - - let packer = TxPackerStateRef( # return value - xp: xp, - tr: AristoDbMemory.newCoreDbRef().ctx.getGeneric(clearData=true), - numBlobPerBlock: 0, - ) - ok(packer) - -proc vmExecGrabItem(pst: TxPackerStateRef; item: TxItemRef): Result[bool,void] - {.gcsafe,raises: [CatchableError].} = - ## Greedily collect & compact items as long as the accumulated `gasLimit` - ## values are below the maximum block size. - let - xp = pst.xp - vmState = xp.chain.vmState - - if not item.tx.validateChainId(xp.chain.com.chainId): - discard xp.txDB.dispose(item, txInfoChainIdMismatch) - return ok(false) # continue with next account - - # EIP-4844 - if pst.numBlobPerBlock + item.tx.versionedHashes.len > MAX_BLOBS_PER_BLOCK: - return ok(false) # continue with next account - pst.numBlobPerBlock += item.tx.versionedHashes.len - - # Verify we have enough gas in gasPool - if vmState.gasPool < item.tx.gasLimit: - # skip this transaction and - # continue with next account - # if we don't have enough gas - return ok(false) - vmState.gasPool -= item.tx.gasLimit - - # Validate transaction relative to the current vmState - if not xp.classifyValidatePacked(vmState, item): - return ok(false) # continue with next account - - # EIP-1153 - vmState.stateDB.clearTransientStorage() - - let - accTx = vmState.stateDB.beginSavepoint - gasUsed = pst.runTx(item) # this is the crucial part, running the tx - - # Find out what to do next: accepting this tx or trying the next account - if not xp.classifyPacked(vmState.cumulativeGasUsed, gasUsed): - vmState.stateDB.rollback(accTx) - if xp.classifyPackedNext(vmState.cumulativeGasUsed, gasUsed): - return ok(false) # continue with next account - return err() # otherwise stop collecting - - # Commit account state DB - vmState.stateDB.commit(accTx) - - vmState.stateDB.persist(clearEmptyAccount = xp.chain.nextFork >= FkSpurious) - # let midRoot = vmState.stateDB.rootHash -- notused - - # Finish book-keeping and move item to `packed` bucket - pst.runTxCommit(item, gasUsed) - - ok(true) # fetch the very next item - - -proc vmExecCommit(pst: TxPackerStateRef) - {.gcsafe,raises: [].} = - let - xp = pst.xp - vmState = xp.chain.vmState - - # EIP-4895 - if xp.chain.nextFork >= FkShanghai: - for withdrawal in xp.chain.com.pos.withdrawals: - vmState.stateDB.addBalance(withdrawal.address, withdrawal.weiAmount) - - # Reward beneficiary - vmState.mutateStateDB: - if vmState.collectWitnessData: - db.collectWitnessData() - # Finish up, then vmState.stateDB.rootHash may be accessed - db.persist(clearEmptyAccount = xp.chain.nextFork >= FkSpurious) - - # Update flexi-array, set proper length - let nItems = xp.txDB.byStatus.eq(txItemPacked).nItems - vmState.receipts.setLen(nItems) - - xp.chain.receiptsRoot = vmState.receipts.calcReceiptsRoot - xp.chain.logsBloom = vmState.receipts.createBloom - xp.chain.txRoot = pst.tr.state(updateOk=true).valueOr: - raiseAssert "vmExecCommit(): state() failed " & $$error - xp.chain.stateRoot = vmState.stateDB.rootHash - - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -proc packerVmExec*(xp: TxPoolRef): Result[void, string] {.gcsafe,raises: [CatchableError].} = - ## Rebuild `packed` bucket by selection items from the `staged` bucket - ## after executing them in the VM. - let db = xp.chain.com.db - let dbTx = db.ctx.newTransaction() - defer: dbTx.dispose() - - var pst = xp.vmExecInit.valueOr: - return err(error) - - block loop: - for (_,nonceList) in pst.xp.txDB.packingOrderAccounts(txItemStaged): - - block account: - for item in nonceList.incNonce: - let rc = pst.vmExecGrabItem(item) - if rc.isErr: - break loop # stop - if not rc.value: - break account # continue with next account - - pst.vmExecCommit - ok() - # Block chain will roll back automatically - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/TODO.md b/nimbus/db/aristo/TODO.md index 8ba6161a78..a35630bf95 100644 --- a/nimbus/db/aristo/TODO.md +++ b/nimbus/db/aristo/TODO.md @@ -14,3 +14,5 @@ of proof nodes is rather small. Also, a right boundary leaf node is typically cleared. This needs to be re-checked when writing the `proof` function mentioned above. + +* `aristo_nearby` also qualifies for a re-write, now diff --git a/nimbus/db/aristo/aristo_api.nim b/nimbus/db/aristo/aristo_api.nim index 135051b0dc..b487604fa7 100644 --- a/nimbus/db/aristo/aristo_api.nim +++ b/nimbus/db/aristo/aristo_api.nim @@ -18,8 +18,8 @@ import results, ./aristo_desc/desc_backend, ./aristo_init/memory_db, - "."/[aristo_delete, aristo_desc, aristo_fetch, - aristo_init, aristo_merge, aristo_path, aristo_profile, aristo_tx] + "."/[aristo_delete, aristo_desc, aristo_fetch, aristo_init, aristo_merge, + aristo_part, aristo_path, aristo_profile, aristo_tx] export AristoDbProfListRef @@ -327,6 +327,74 @@ type ## `(accPath,stoPath)` where `accPath` is the account key (into the MPT) ## and `stoPath` is the slot path of the corresponding storage area. + AristoApiPartAccountTwig* = + proc(db: AristoDbRef; + accPath: Hash256; + ): Result[seq[Blob], AristoError] + {.noRaise.} + ## This function returns a chain of rlp-encoded nodes along the argument + ## path `(root,path)`. + + AristoApiPartGenericTwig* = + proc(db: AristoDbRef; + root: VertexID; + path: openArray[byte]; + ): Result[seq[Blob], AristoError] + {.noRaise.} + ## Variant of `partAccountTwig()`. + ## + ## Note: This function provides a functionality comparable to the + ## `getBranch()` function from `hexary.nim` + + AristoApiPartStorageTwig* = + proc(db: AristoDbRef; + accPath: Hash256; + stoPath: Hash256; + ): Result[seq[Blob], AristoError] + {.noRaise.} + ## Variant of `partAccountTwig()`. + + AristoApiPartUntwigGeneric* = + proc(chain: openArray[Blob]; + root: Hash256; + path: openArray[byte]; + ): Result[Blob,AristoError] + {.noRaise.} + ## Follow and verify the argument `chain` up unlil the last entry + ## which must be a leaf node. Extract the payload and pass it on + ## as return code. + + AristoApiPartUntwigGenericOk* = + proc(chain: openArray[Blob]; + root: Hash256; + path: openArray[byte]; + payload: openArray[byte]; + ): Result[void,AristoError] + {.noRaise.} + ## Variant of `partUntwigGeneric()`. The function verifis the argument + ## `chain` of rlp-encoded nodes against the `path` and `payload` + ## arguments. + ## + ## Note: This function provides a functionality comparable to the + ## `isValidBranch()` function from `hexary.nim`. + + AristoApiPartUntwigPath* = + proc(chain: openArray[Blob]; + root: Hash256; + path: Hash256; + ): Result[Blob,AristoError] + {.noRaise.} + ## Variant of `partUntwigGeneric()`. + + AristoApiPartUntwigPathOk* = + proc(chain: openArray[Blob]; + root: Hash256; + path: Hash256; + payload: openArray[byte]; + ): Result[void,AristoError] + {.noRaise.} + ## Variant of `partUntwigGenericOk()`. + AristoApiPathAsBlobFn* = proc(tag: PathID; ): Blob @@ -444,6 +512,14 @@ type mergeGenericData*: AristoApiMergeGenericDataFn mergeStorageData*: AristoApiMergeStorageDataFn + partAccountTwig*: AristoApiPartAccountTwig + partGenericTwig*: AristoApiPartGenericTwig + partStorageTwig*: AristoApiPartStorageTwig + partUntwigGeneric*: AristoApiPartUntwigGeneric + partUntwigGenericOk*: AristoApiPartUntwigGenericOk + partUntwigPath*: AristoApiPartUntwigPath + partUntwigPathOk*: AristoApiPartUntwigPathOk + pathAsBlob*: AristoApiPathAsBlobFn persist*: AristoApiPersistFn reCentre*: AristoApiReCentreFn @@ -491,6 +567,14 @@ type AristoApiProfMergeGenericDataFn = "mergeGenericData" AristoApiProfMergeStorageDataFn = "mergeStorageData" + AristoApiProfPartAccountTwigFn = "partAccountTwig" + AristoApiProfPartGenericTwigFn = "partGenericTwig" + AristoApiProfPartStorageTwigFn = "partStorageTwig" + AristoApiProfPartUntwigGenericFn = "partUntwigGeneric" + AristoApiProfPartUntwigGenericOkFn = "partUntwigGenericOk" + AristoApiProfPartUntwigPathFn = "partUntwigPath" + AristoApiProfPartUntwigPathOkFn = "partUntwigPathOk" + AristoApiProfPathAsBlobFn = "pathAsBlob" AristoApiProfPersistFn = "persist" AristoApiProfReCentreFn = "reCentre" @@ -555,6 +639,14 @@ when AutoValidateApiHooks: doAssert not api.mergeGenericData.isNil doAssert not api.mergeStorageData.isNil + doAssert not api.partAccountTwig.isNil + doAssert not api.partGenericTwig.isNil + doAssert not api.partStorageTwig.isNil + doAssert not api.partUntwigGeneric.isNil + doAssert not api.partUntwigGenericOk.isNil + doAssert not api.partUntwigPath.isNil + doAssert not api.partUntwigPathOk.isNil + doAssert not api.pathAsBlob.isNil doAssert not api.persist.isNil doAssert not api.reCentre.isNil @@ -623,6 +715,14 @@ func init*(api: var AristoApiObj) = api.mergeGenericData = mergeGenericData api.mergeStorageData = mergeStorageData + api.partAccountTwig = partAccountTwig + api.partGenericTwig = partGenericTwig + api.partStorageTwig = partStorageTwig + api.partUntwigGeneric = partUntwigGeneric + api.partUntwigGenericOk = partUntwigGenericOk + api.partUntwigPath = partUntwigPath + api.partUntwigPathOk = partUntwigPathOk + api.pathAsBlob = pathAsBlob api.persist = persist api.reCentre = reCentre @@ -673,6 +773,14 @@ func dup*(api: AristoApiRef): AristoApiRef = mergeGenericData: api.mergeGenericData, mergeStorageData: api.mergeStorageData, + partAccountTwig: api.partAccountTwig, + partGenericTwig: api.partGenericTwig, + partStorageTwig: api.partStorageTwig, + partUntwigGeneric: api.partUntwigGeneric, + partUntwigGenericOk: api.partUntwigGenericOk, + partUntwigPath: api.partUntwigPath, + partUntwigPathOk: api.partUntwigPathOk, + pathAsBlob: api.pathAsBlob, persist: api.persist, reCentre: api.reCentre, @@ -845,6 +953,41 @@ func init*( AristoApiProfMergeStorageDataFn.profileRunner: result = api.mergeStorageData(a, b, c, d) + profApi.partAccountTwig = + proc(a: AristoDbRef; b: Hash256): auto = + AristoApiProfPartAccountTwigFn.profileRunner: + result = api.partAccountTwig(a, b) + + profApi.partGenericTwig = + proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto = + AristoApiProfPartGenericTwigFn.profileRunner: + result = api.partGenericTwig(a, b, c) + + profApi.partStorageTwig = + proc(a: AristoDbRef; b: Hash256; c: Hash256): auto = + AristoApiProfPartStorageTwigFn.profileRunner: + result = api.partStorageTwig(a, b, c) + + profApi.partUntwigGeneric = + proc(a: openArray[Blob]; b: Hash256; c: openArray[byte]): auto = + AristoApiProfPartUntwigGenericFn.profileRunner: + result = api.partUntwigGeneric(a, b, c) + + profApi.partUntwigGenericOk = + proc(a: openArray[Blob]; b: Hash256; c, d: openArray[byte]): auto = + AristoApiProfPartUntwigGenericOkFn.profileRunner: + result = api.partUntwigGenericOk(a, b, c, d) + + profApi.partUntwigPath = + proc(a: openArray[Blob]; b, c: Hash256): auto = + AristoApiProfPartUntwigPathFn.profileRunner: + result = api.partUntwigPath(a, b, c) + + profApi.partUntwigPathOk = + proc(a: openArray[Blob]; b, c: Hash256; d: openArray[byte]): auto = + AristoApiProfPartUntwigPathOkFn.profileRunner: + result = api.partUntwigPathOk(a, b, c, d) + profApi.pathAsBlob = proc(a: PathID): auto = AristoApiProfPathAsBlobFn.profileRunner: diff --git a/nimbus/db/aristo/aristo_blobify.nim b/nimbus/db/aristo/aristo_blobify.nim index 8e0fef392b..493a11497b 100644 --- a/nimbus/db/aristo/aristo_blobify.nim +++ b/nimbus/db/aristo/aristo_blobify.nim @@ -146,9 +146,9 @@ proc blobifyTo*(pyl: LeafPayload, data: var Blob) = lens += uint16(tmp.len - 1) shl 3 # 5 bits data &= tmp.data() - if VertexID(0) < pyl.stoID: + if pyl.stoID.isValid: mask = mask or 0x04 - let tmp = pyl.stoID.blobify() + let tmp = pyl.stoID.vid.blobify() lens += uint16(tmp.len - 1) shl 8 # 3 bits data &= tmp.data() @@ -275,7 +275,7 @@ proc deblobify( if (mask and 0x04) > 0: let len = (lens shr 8) and 0b111 - pAcc.stoID = VertexID(? load64(data, start, int(len + 1))) + pAcc.stoID = (true, VertexID(? load64(data, start, int(len + 1)))) if (mask and 0x08) > 0: if data.len() < start + 32: diff --git a/nimbus/db/aristo/aristo_check.nim b/nimbus/db/aristo/aristo_check.nim index 59727d5e12..f9e15bc583 100644 --- a/nimbus/db/aristo/aristo_check.nim +++ b/nimbus/db/aristo/aristo_check.nim @@ -20,7 +20,7 @@ import results, ./aristo_walk/persistent, "."/[aristo_desc, aristo_get, aristo_init], - ./aristo_check/[check_be, check_top] + ./aristo_check/[check_be, check_top, check_twig] # ------------------------------------------------------------------------------ # Public functions @@ -78,7 +78,7 @@ proc checkBE*( proc check*( - db: AristoDbRef; # Database, top layer + db: AristoDbRef; # Database relax = false; # Check existing hashes only cache = true; # Also verify against top layer cache proofMode = false; # Has proof nodes @@ -88,6 +88,41 @@ proc check*( ? db.checkBE() ok() +proc check*( + db: AristoDbRef; # Database + root: VertexID; # Start node + path: openArray[byte]; # Data path + ): Result[void,AristoError] = + ## Check generic path `path` against portal proof generation and + ## verification. + ## + ## Note that this check might have side effects in that it might compile + ## the hash keys on the `root` sub-tree. + db.checkTwig(root, path) + +proc check*( + db: AristoDbRef; # Database + accPath: Hash256; # Account key + ): Result[void,AristoError] = + ## Check accounts tree path `accPath` against portal proof generation and + ## verification. + ## + ## Note that this check might have side effects in that it might compile + ## the hash keys on the accounts sub-tree. + db.checkTwig(VertexID(1), accPath.data) + +proc check*( + db: AristoDbRef; # Database + accPath: Hash256; # Account key + stoPath: Hash256; # Storage key + ): Result[void,AristoError] = + ## Check account tree `Account key` against portal proof generation and + ## verification. + ## + ## Note that this check might have side effects in that it might compile + ## the hash keys on the particulat storage sub-tree. + db.checkTwig(accPath, stoPath) + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_check/check_top.nim b/nimbus/db/aristo/aristo_check/check_top.nim index e9058bef1b..27b9d79bc6 100644 --- a/nimbus/db/aristo/aristo_check/check_top.nim +++ b/nimbus/db/aristo/aristo_check/check_top.nim @@ -89,8 +89,9 @@ proc checkTopCommon*( case vtx.vType: of Leaf: if vtx.lData.pType == AccountData: - let stoVid = vtx.lData.stoID - if stoVid.isValid: + let stoID = vtx.lData.stoID + if stoID.isValid: + let stoVid = stoID.vid if stoVid in stoRoots: return err((stoVid,CheckAnyVidSharedStorageRoot)) if vTop < stoVid: diff --git a/nimbus/db/aristo/aristo_check/check_twig.nim b/nimbus/db/aristo/aristo_check/check_twig.nim new file mode 100644 index 0000000000..88e6ece42e --- /dev/null +++ b/nimbus/db/aristo/aristo_check/check_twig.nim @@ -0,0 +1,50 @@ +# nimbus-eth1 +# Copyright (c) 2023-2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed +# except according to those terms. + +{.push raises: [].} + +import + eth/common, + results, + ".."/[aristo_compute, aristo_desc, aristo_fetch, aristo_part] + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc checkTwig*( + db: AristoDbRef; # Database + root: VertexID; # Start node + path: openArray[byte]; # Data path + ): Result[void,AristoError] = + let + proof = ? db.partGenericTwig(root, path) + key = ? db.computeKey (root,root) + pyl = ? proof.partUntwigGeneric(key.to(Hash256), path) + + ok() + +proc checkTwig*( + db: AristoDbRef; # Database + accPath: Hash256; # Account key + stoPath: Hash256; # Storage key + ): Result[void,AristoError] = + let + proof = ? db.partStorageTwig(accPath, stoPath) + vid = ? db.fetchStorageID accPath + key = ? db.computeKey (VertexID(1),vid) + pyl = ? proof.partUntwigPath(key.to(Hash256), stoPath) + + ok() + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ + diff --git a/nimbus/db/aristo/aristo_compute.nim b/nimbus/db/aristo/aristo_compute.nim index dbbf408b24..e3c26d3f64 100644 --- a/nimbus/db/aristo/aristo_compute.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -76,7 +76,7 @@ proc computeKeyImpl( stoID = vtx.lData.stoID skey = if stoID.isValid: - let (skey, sl) = ?db.computeKeyImpl((stoID, stoID)) + let (skey, sl) = ?db.computeKeyImpl((stoID.vid, stoID.vid)) level = maxLevel(level, sl) skey else: diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index 5608c2d326..2f0bb4a6e0 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -100,6 +100,12 @@ proc ppVid(vid: VertexID; pfx = true): string = else: result &= "ø" +proc ppVid(sid: StorageID; pfx = true): string = + if sid.isValid or not sid.vid.isValid: + sid.vid.ppVid(pfx) + else: + (if pfx: "$" else: "") & "®" & sid.vid.ppVid(false) + proc ppVid(rvid: RootedVertexID; pfx = true): string = if pfx: result = "$" @@ -234,7 +240,7 @@ proc ppNode( if nd.lData.pType == AccountData: result &= "(" & nd.lData.account.ppAriAccount() & "," if nd.lData.stoID.isValid: - let tag = db.ppKeyOk(nd.key[0],(rvid.root,nd.lData.stoID)) + let tag = db.ppKeyOk(nd.key[0],(rvid.root,nd.lData.stoID.vid)) result &= nd.lData.stoID.ppVid & tag else: result &= nd.lData.stoID.ppVid @@ -488,7 +494,7 @@ proc ppLayer( # Public functions # ------------------------------------------------------------------------------ -proc pp*(w: Hash256; codeHashOk = false): string = +proc pp*(w: Hash256; codeHashOk: bool): string = if codeHashOk: w.ppCodeHash elif w == EMPTY_ROOT_HASH: @@ -559,7 +565,7 @@ proc pp*( ): string = sTab.ppXTab(db.orDefault) -proc pp*(root: VertexID, leg: Leg; db = AristoDbRef(nil)): string = +proc pp*(leg: Leg; root: VertexID; db = AristoDbRef(nil)): string = let db = db.orDefault() result = "(" & leg.wp.vid.ppVid & "," block: @@ -583,7 +589,7 @@ proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string = else: if hike.legs[0].wp.vid != hike.root: result &= "(" & hike.root.ppVid & ")" & pfx - result &= hike.legs.mapIt(pp(hike.root, it, db)).join(pfx) + result &= hike.legs.mapIt(it.pp(hike.root, db)).join(pfx) result &= pfx & "(" & hike.tail.ppPathPfx & ")" result &= "]" diff --git a/nimbus/db/aristo/aristo_delete.nim b/nimbus/db/aristo/aristo_delete.nim index 2459de7e9b..df04aa865c 100644 --- a/nimbus/db/aristo/aristo_delete.nim +++ b/nimbus/db/aristo/aristo_delete.nim @@ -200,7 +200,7 @@ proc deleteAccountRecord*( # Delete storage tree if present if stoID.isValid: - ? db.delStoTreeImpl((stoID, stoID), accPath, NibblesBuf()) + ? db.delStoTreeImpl((stoID.vid, stoID.vid), accPath, NibblesBuf()) ?db.deleteImpl(hike) @@ -279,7 +279,7 @@ proc deleteStorageData*( if not stoID.isValid: return err(DelStoRootMissing) - let stoHike = stoPath.hikeUp(stoID, db).valueOr: + let stoHike = stoPath.hikeUp(stoID.vid, db).valueOr: if error[1] in HikeAcceptableStopsNotFound: return err(DelPathNotFound) return err(error[1]) @@ -292,12 +292,12 @@ proc deleteStorageData*( db.layersPutStoLeaf(AccountKey.mixUp(accPath, stoPath), nil) # Make sure that an account leaf has no dangling sub-trie - if db.getVtx((stoID, stoID)).isValid: + if db.getVtx((stoID.vid, stoID.vid)).isValid: return ok(false) # De-register the deleted storage tree from the account record let leaf = wpAcc.vtx.dup # Dup on modify - leaf.lData.stoID = VertexID(0) + leaf.lData.stoID.isValid = false db.layersPutAccLeaf(accPath, leaf) db.layersPutVtx((accHike.root, wpAcc.vid), leaf) ok(true) @@ -323,11 +323,11 @@ proc deleteStorageTree*( # Mark account path Merkle keys for update db.updateAccountForHasher accHike - ? db.delStoTreeImpl((stoID, stoID), accPath, NibblesBuf()) + ? db.delStoTreeImpl((stoID.vid, stoID.vid), accPath, NibblesBuf()) # De-register the deleted storage tree from the accounts record let leaf = wpAcc.vtx.dup # Dup on modify - leaf.lData.stoID = VertexID(0) + leaf.lData.stoID.isValid = false db.layersPutAccLeaf(accPath, leaf) db.layersPutVtx((accHike.root, wpAcc.vid), leaf) ok() diff --git a/nimbus/db/aristo/aristo_desc/desc_error.nim b/nimbus/db/aristo/aristo_desc/desc_error.nim index e52fec7eb5..aa492a0bc5 100644 --- a/nimbus/db/aristo/aristo_desc/desc_error.nim +++ b/nimbus/db/aristo/aristo_desc/desc_error.nim @@ -153,6 +153,8 @@ type # Part/proof node errors PartArgNotGenericRoot + PartArgNotInCore + PartArgRootAlreadyOnDatabase PartArgRootAlreadyUsed PartChkChangedKeyNotInKeyTab PartChkChangedVtxMissing @@ -167,6 +169,10 @@ type PartChkVidKeyTabLengthsDiffer PartChkVidTabCoreRootMissing PartChkVidTabVidMissing + PartChnBranchPathExhausted + PartChnExtPfxMismatch + PartChnLeafPathMismatch + PartChnNodeConvError PartCtxNotAvailable PartCtxStaleDescriptor PartExtVtxExistsAlready @@ -188,7 +194,13 @@ type PartRlpPayloadException PartRootKeysDontMatch PartRootVidsDontMatch - PartRootAlreadyOnDatabase + PartTrkEmptyPath + PartTrkFollowUpKeyMismatch + PartTrkGarbledNode + PartTrkLeafPfxMismatch + PartTrkLinkExpected + PartTrkPayloadMismatch + PartTrkRlpError PartVtxSlotWasModified PartVtxSlotWasNotModified diff --git a/nimbus/db/aristo/aristo_desc/desc_identifiers.nim b/nimbus/db/aristo/aristo_desc/desc_identifiers.nim index 613c3761f3..2a7e65fc4b 100644 --- a/nimbus/db/aristo/aristo_desc/desc_identifiers.nim +++ b/nimbus/db/aristo/aristo_desc/desc_identifiers.nim @@ -329,6 +329,11 @@ func to*(n: UInt256; T: type PathID): T = ## Representation of a scalar as `PathID` (preserving full information) T(pfx: n, length: 64) +func to*(a: PathID; T: type UInt256): T = + if not a.pfx.isZero: + assert a.length < 64 # debugging only + result = a.pfx shr (4 * (64 - a.length)) + # ------------------------------------------------------------------------------ # Public helpers: Miscellaneous mappings # ------------------------------------------------------------------------------ @@ -392,23 +397,6 @@ func `$`*(vids: HashSet[VertexID]): string = "$" & it.uint64.toHex.strip(trailing=false,chars={'0'}) ).join(",") & "}" -func `$`*(key: Hash256): string = - let w = UInt256.fromBytesBE key.data - if w == high(UInt256): - "2^256-1" - elif w == 0.u256: - "0" - elif w == 2.u256.pow 255: - "2^255" # 800... - elif w == 2.u256.pow 254: - "2^254" # 400.. - elif w == 2.u256.pow 253: - "2^253" # 200... - elif w == 2.u256.pow 251: - "2^252" # 100... - else: - w.toHex - func `$`*(key: HashKey): string = toHex(key.data) diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index 49648ad890..b1e277ac50 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -47,6 +47,14 @@ type AccountData ## `Aristo account` with vertex IDs links StoData ## Slot storage data + StorageID* = tuple + ## Once a storage tree is allocated, its root vertex ID is registered in + ## the leaf payload of an acoount. After subsequent storage tree deletion + ## the root vertex ID will be kept in the leaf payload for re-use but set + ## disabled (`.isValid` = `false`). + isValid: bool ## See also `isValid()` for `VertexID` + vid: VertexID ## Storage root vertex ID + LeafPayload* = object ## The payload type depends on the sub-tree used. The `VertexID(1)` rooted ## sub-tree only has `AccountData` type payload, stoID-based have StoData @@ -56,7 +64,7 @@ type rawBlob*: Blob ## Opaque data, default value of AccountData: account*: AristoAccount - stoID*: VertexID ## Storage vertex ID (if any) + stoID*: StorageID ## Storage vertex ID (if any) of StoData: stoData*: UInt256 diff --git a/nimbus/db/aristo/aristo_fetch.nim b/nimbus/db/aristo/aristo_fetch.nim index 4fc8403ebd..240154697c 100644 --- a/nimbus/db/aristo/aristo_fetch.nim +++ b/nimbus/db/aristo/aristo_fetch.nim @@ -163,7 +163,7 @@ proc fetchStorageID*( if not stoID.isValid: return err(FetchPathNotFound) - ok stoID + ok stoID.vid proc retrieveStoragePayload( db: AristoDbRef; diff --git a/nimbus/db/aristo/aristo_init/memory_only.nim b/nimbus/db/aristo/aristo_init/memory_only.nim index dec011ae0b..a07b4bd8b2 100644 --- a/nimbus/db/aristo/aristo_init/memory_only.nim +++ b/nimbus/db/aristo/aristo_init/memory_only.nim @@ -47,31 +47,12 @@ proc kind*( # Public database constuctors, destructor # ------------------------------------------------------------------------------ -proc init*( - T: type AristoDbRef; # Target type - B: type MemBackendRef; # Backend type - ): T = - ## Memory backend constructor. - ## - ## If the `qidLayout` argument is set `QidLayoutRef(nil)`, the a backend - ## database will not provide filter history management. Providing a different - ## scheduler layout shoud be used with care as table access with different - ## layouts might render the filter history data unmanageable. - ## - when B is MemBackendRef: - AristoDbRef(top: LayerRef.init(), backend: memoryBackend()) - proc init*( T: type AristoDbRef; # Target type B: type MemOnlyBackend; # Backend type ): T = ## Memory backend constructor. ## - ## If the `qidLayout` argument is set `QidLayoutRef(nil)`, the a backend - ## database will not provide filter history management. Providing a different - ## scheduler layout shoud be used with care as table access with different - ## layouts might render the filter history data unmanageable. - ## when B is VoidBackendRef: AristoDbRef(top: LayerRef.init()) diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index 5da23e9c34..05d8d0abbe 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -127,11 +127,14 @@ proc mergeStorageData*( stoID = vtx.lData.stoID # Provide new storage ID when needed - useID = if stoID.isValid: stoID else: db.vidFetch() + useID = + if stoID.isValid: stoID # Use as is + elif stoID.vid.isValid: (true, stoID.vid) # Re-use previous vid + else: (true, db.vidFetch()) # Create new vid # Call merge pyl = LeafPayload(pType: StoData, stoData: stoData) - rc = db.mergePayloadImpl(useID, stoPath.data, pyl) + rc = db.mergePayloadImpl(useID.vid, stoPath.data, pyl) if rc.isOk: # Mark account path Merkle keys for update diff --git a/nimbus/db/aristo/aristo_nearby.nim b/nimbus/db/aristo/aristo_nearby.nim index 9c35674aec..47bf934f14 100644 --- a/nimbus/db/aristo/aristo_nearby.nim +++ b/nimbus/db/aristo/aristo_nearby.nim @@ -151,11 +151,11 @@ proc zeroAdjust( if 0 < hike.legs.len: return ok(hike) - let root = db.getVtx (hike.root, hike.root) - if root.isValid: + let rootVtx = db.getVtx (hike.root, hike.root) + if rootVtx.isValid: block fail: var pfx: NibblesBuf - case root.vType: + case rootVtx.vType: of Branch: # Find first non-dangling link and assign it let nibbleID = block: @@ -166,18 +166,28 @@ proc zeroAdjust( if hike.tail.len == 0: break fail hike.tail[0].int8 - let n = root.branchBorderNibble nibbleID + let n = rootVtx.branchBorderNibble nibbleID if n < 0: # Before or after the database range return err((hike.root,NearbyBeyondRange)) - pfx = root.ePfx & NibblesBuf.nibble(n.byte) + pfx = rootVtx.ePfx & NibblesBuf.nibble(n.byte) of Leaf: - pfx = root.lPfx + pfx = rootVtx.lPfx if not hike.accept pfx: # Before or after the database range return err((hike.root,NearbyBeyondRange)) + # Pathological case: matching `rootVtx` which is a leaf + if hike.legs.len == 0 and hike.tail.len == 0: + return ok(Hike( + root: hike.root, + legs: @[Leg( + nibble: -1, + wp: VidVtxPair( + vid: hike.root, + vtx: rootVtx))])) + var newHike = pfx.toHike(hike.root, db) if 0 < newHike.legs.len: return ok(newHike) @@ -268,10 +278,6 @@ proc nearbyNext( # Some easy cases let hike = ? hike.zeroAdjust(db, doLeast=moveRight) - # if hike.legs[^1].wp.vtx.vType == Extension: - # let vid = hike.legs[^1].wp.vtx.eVid - # return hike.complete(vid, db, hikeLenMax, doLeast=moveRight) - var uHike = hike start = true diff --git a/nimbus/db/aristo/aristo_part.nim b/nimbus/db/aristo/aristo_part.nim index ec85c5da5d..b10130de2c 100644 --- a/nimbus/db/aristo/aristo_part.nim +++ b/nimbus/db/aristo/aristo_part.nim @@ -17,9 +17,10 @@ import std/[sets, sequtils], eth/common, results, - "."/[aristo_desc, aristo_get, aristo_merge, aristo_layers, aristo_utils], + "."/[aristo_desc, aristo_fetch, aristo_get, aristo_merge, aristo_layers, + aristo_utils], #./aristo_part/part_debug, - ./aristo_part/[part_ctx, part_desc, part_helpers] + ./aristo_part/[part_chain_rlp, part_ctx, part_desc, part_helpers] export PartStateCtx, @@ -35,22 +36,127 @@ proc roots*(ps: PartStateRef): seq[VertexID] = ## Getter: list of root vertex IDs from `ps`. ps.core.keys.toSeq -iterator perimeter*(ps: PartStateRef; root: VertexID): VertexID = +iterator perimeter*( + ps: PartStateRef; + root: VertexID; + ): (RootedVertexID, HashKey) = ## Retrieve the list of dangling vertex IDs relative to `ps`. ps.core.withValue(root,keys): for (key,rvid) in ps.byKey.pairs: if rvid.root == root and key notin keys[] and key notin ps.changed: - yield rvid.vid + yield (rvid,key) -iterator vkPairs*(ps: PartStateRef): (RootedVertexID,HashKey) = +iterator updated*( + ps: PartStateRef; + root: VertexID; + ): (RootedVertexID, HashKey) = + ## Retrieve the list of changed vertex IDs relative to `ps`. These vertices + ## IDs are not considered on the perimeter, anymore. + for key in ps.changed: + let rvid = ps[key] + if rvid.root == root: + yield (rvid,key) + +iterator vkPairs*(ps: PartStateRef): (RootedVertexID, HashKey) = ## Retrieve the list of cached `(key,vertex-ID)` pairs. - for (k,v) in ps.byKey.pairs: - yield (v,k) + for (key, rvid) in ps.byKey.pairs: + yield (rvid, key) # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ +proc partGenericTwig*( + db: AristoDbRef; + root: VertexID; + path: NibblesBuf; + ): Result[seq[Blob], AristoError] = + ## This function returns a chain of rlp-encoded nodes along the argument + ## path `(root,path)`. + ## + var chain: seq[Blob] + ? db.chainRlpNodes((root,root), path, chain) + ok chain + +proc partGenericTwig*( + db: AristoDbRef; + root: VertexID; + path: openArray[byte]; + ): Result[seq[Blob], AristoError] = + ## Variant of `partGenericTwig()`. + ## + ## Note: This function provides a functionality comparable to the + ## `getBranch()` function from `hexary.nim` + ## + db.partGenericTwig(root, NibblesBuf.fromBytes path) + +proc partAccountTwig*( + db: AristoDbRef; + accPath: Hash256; + ): Result[seq[Blob], AristoError] = + ## Variant of `partGenericTwig()`. + db.partGenericTwig(VertexID(1), NibblesBuf.fromBytes accPath.data) + +proc partStorageTwig*( + db: AristoDbRef; + accPath: Hash256; + stoPath: Hash256; + ): Result[seq[Blob], AristoError] = + ## Variant of `partGenericTwig()`. + let vid = ? db.fetchStorageID accPath + db.partGenericTwig(vid, NibblesBuf.fromBytes stoPath.data) + +# ---------- + +proc partUntwigGeneric*( + chain: openArray[Blob]; + root: Hash256; + path: openArray[byte]; + ): Result[Blob,AristoError] = + ## Verify the chain of rlp-encoded nodes and return the payload. + try: + let nibbles = NibblesBuf.fromBytes path + return chain.trackRlpNodes(root.to(HashKey), nibbles, start=true) + except RlpError as e: + return err(PartTrkRlpError) + +proc partUntwigPath*( + chain: openArray[Blob]; + root: Hash256; + path: Hash256; + ): Result[Blob,AristoError] = + ## Variant of `partUntwigGeneric()`. + chain.partUntwigGeneric(root, path.data) + + +proc partUntwigGenericOk*( + chain: openArray[Blob]; + root: Hash256; + path: openArray[byte]; + payload: openArray[byte]; + ): Result[void,AristoError] = + ## Verify the argument `chain` of rlp-encoded nodes against the `path` + ## and `payload` arguments. + ## + ## Note: This function provides a functionality comparable to the + ## `isValidBranch()` function from `hexary.nim`. + ## + if payload == ? chain.partUntwigGeneric(root, path): + ok() + else: + err(PartTrkPayloadMismatch) + +proc partUntwigPathOk*( + chain: openArray[Blob]; + root: Hash256; + path: Hash256; + payload: openArray[byte]; + ): Result[void,AristoError] = + ## Variant of `partUntwigGenericOk()`. + chain.partUntwigGenericOk(root, path.data, payload) + +# ---------------- + proc partPut*( ps: PartStateRef; # Partial database descriptor proof: openArray[Blob]; # RLP encoded proof nodes @@ -67,6 +173,11 @@ proc partPut*( # Check wether the chain has an accounts leaf node ? ps.updateAccountsTree(nodes, bl, mode) + when false: # or true: + echo ">>> partPut", + "\n chains\n ", bl.chains.pp(ps), + "" + # Assign vertex IDs. If possible, use IDs from `state` lookup var seen: HashSet[HashKey] for chain in bl.chains: @@ -129,7 +240,7 @@ proc partPut*( of Leaf: let lKey = node.key[0] if node.lData.pType == AccountData and lKey.isValid: - node.lData.stoID = (? ps.getRvid(root, lKey))[0].vid + node.lData.stoID = (true, (? ps.getRvid(root, lKey))[0].vid) of Branch: for n in 0 .. 15: let bKey = node.key[n] @@ -159,6 +270,16 @@ proc partPut*( ok() +proc partGetSubTree*(ps: PartStateRef; rootHash: Hash256): VertexID = + ## For the argument `roothash` retrieve the root vertex ID of a particular + ## sub tree from the partial state descriptor argument `ps`. The function + ## returns `VertexID(0)` if there is no match. + ## + for vid in ps.core.keys: + if ps[vid].to(Hash256) == rootHash: + return vid + + proc partReRoot*( ps: PartStateRef; frRoot: VertexID; @@ -166,9 +287,11 @@ proc partReRoot*( ): Result[void,AristoError] = ## Realign a generic root vertex (i.e `$2`..`$(LEAST_FREE_VID-1)`) for a ## `proof` state to a new root vertex. - if frRoot notin ps.core or frRoot == toRoot: + if frRoot == toRoot: return ok() # nothing to do + if frRoot notin ps.core: + return err(PartArgNotInCore) if frRoot < VertexID(2) or LEAST_FREE_VID <= frRoot.ord or toRoot < VertexID(2) or LEAST_FREE_VID <= toRoot.ord: return err(PartArgNotGenericRoot) @@ -176,7 +299,7 @@ proc partReRoot*( if toRoot in ps.core: return err(PartArgRootAlreadyUsed) if ps.db.getVtx((toRoot,toRoot)).isValid: - return err(PartRootAlreadyOnDatabase) + return err(PartArgRootAlreadyOnDatabase) # Migrate for key in ps.byKey.keys: diff --git a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim new file mode 100644 index 0000000000..d23f714194 --- /dev/null +++ b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim @@ -0,0 +1,107 @@ +# nimbus-eth1 +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed +# except according to those terms. + +{.push raises: [].} + +import + eth/common, + results, + ".."/[aristo_desc, aristo_get, aristo_utils, aristo_compute, aristo_serialise] + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc chainRlpNodes*( + db: AristoDbRef; + rvid: RootedVertexID; + path: NibblesBuf, + chain: var seq[Blob]; + ): Result[void,AristoError] = + ## Inspired by the `getBranchAux()` function from `hexary.nim` + let + key = ? db.computeKey rvid + (vtx,_) = ? db.getVtxRc rvid + node = vtx.toNode(rvid.root, db).valueOr: + return err(PartChnNodeConvError) + + # Save rpl encoded node(s) + chain &= node.to(seq[Blob]) + + # Follow up child node + case vtx.vType: + of Leaf: + if path != vtx.lPfx: + err(PartChnLeafPathMismatch) + else: + ok() + + of Branch: + let nChewOff = sharedPrefixLen(vtx.ePfx, path) + if nChewOff != vtx.ePfx.len: + err(PartChnExtPfxMismatch) + elif path.len == nChewOff: + err(PartChnBranchPathExhausted) + else: + let + nibble = path[nChewOff] + rest = path.slice(nChewOff+1) + # Recursion! + db.chainRlpNodes((rvid.root,vtx.bVid[nibble]), rest, chain) + + +proc trackRlpNodes*( + chain: openArray[Blob]; + topKey: HashKey; + path: NibblesBuf; + start = false; + ): Result[Blob,AristoError] + {.gcsafe, raises: [RlpError]} = + ## Verify rlp-encoded node chain created by `chainRlpNodes()`. + if path.len == 0: + return err(PartTrkEmptyPath) + + # Verify key against rlp-node + let digest = chain[0].digestTo(HashKey) + if start: + if topKey.to(Hash256) != digest.to(Hash256): + return err(PartTrkFollowUpKeyMismatch) + else: + if topKey != digest: + return err(PartTrkFollowUpKeyMismatch) + + var + node = rlpFromBytes chain[0] + nChewOff = 0 + link: Blob + + # Decode rlp-node and prepare for recursion + case node.listLen + of 2: + let (isLeaf, segm) = NibblesBuf.fromHexPrefix node.listElem(0).toBytes + nChewOff = sharedPrefixLen(path, segm) + link = node.listElem(1).toBytes # link or payload + if isLeaf: + if nChewOff == path.len: + return ok(link) + return err(PartTrkLeafPfxMismatch) + of 17: + nChewOff = 1 + link = node.listElem(path[0].int).toBytes + else: + return err(PartTrkGarbledNode) + + let nextKey = HashKey.fromBytes(link).valueOr: + return err(PartTrkLinkExpected) + chain.toOpenArray(1,chain.len-1).trackRlpNodes(nextKey, path.slice nChewOff) + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_part/part_debug.nim b/nimbus/db/aristo/aristo_part/part_debug.nim index a5d6bf6c48..c658e324ce 100644 --- a/nimbus/db/aristo/aristo_part/part_debug.nim +++ b/nimbus/db/aristo/aristo_part/part_debug.nim @@ -128,7 +128,7 @@ proc pp*( let pfx0 = indent.toPfx() pfx1 = indent.toPfx(1) - + pfx2 = indent.toPfx(2) var pfx = "" if dbOk: result &= pfx & "" & pfx1 & ps.db.pp( @@ -147,9 +147,10 @@ proc pp*( if 0 < len: var qfx = "" result &= pfx1 & "{" - for (vid,vLst) in ps.core.pairs: + for vid in ps.core.keys.toSeq.sorted: + let vLst = ps.core.getOrDefault vid result &= qfx & "(" & vid.pp & ":" & vLst.pp(ps) & ")" - qfx = pfx1 + qfx = pfx2 result &= "}" pfx = pfx0 if byKeyOk: diff --git a/nimbus/db/aristo/aristo_part/part_desc.nim b/nimbus/db/aristo/aristo_part/part_desc.nim index bf5134879d..bdd2d4395d 100644 --- a/nimbus/db/aristo/aristo_part/part_desc.nim +++ b/nimbus/db/aristo/aristo_part/part_desc.nim @@ -112,6 +112,7 @@ proc `[]`*(ps: PartStateRef; key: HashKey): RootedVertexID = proc `[]`*(ps: PartStateRef; vid: VertexID): HashKey = ps.byVid.withValue(vid,key): return key[] + VOID_HASH_KEY proc del*(ps: PartStateRef; key: HashKey) = diff --git a/nimbus/db/aristo/aristo_part/part_helpers.nim b/nimbus/db/aristo/aristo_part/part_helpers.nim index 1149583316..9b30dca394 100644 --- a/nimbus/db/aristo/aristo_part/part_helpers.nim +++ b/nimbus/db/aristo/aristo_part/part_helpers.nim @@ -22,74 +22,74 @@ import # ------------------------------------------------------------------------------ proc read(rlp: var Rlp; T: type PrfNode): T {.gcsafe, raises: [RlpError].} = - ## Mixin for RLP reader. The decoder with error return code in a `Leaf` - ## node if needed. - ## - func readError(error: AristoError): PrfNode = - ## Prettify return code expression - PrfNode(vType: Leaf, prfType: isError, error: error) - - if not rlp.isList: - # Otherwise `rlp.items` would raise a `Defect` - return readError(PartRlp2Or17ListEntries) + ## Mixin for RLP reader. The decoder with error return code in a `Leaf` + ## node if needed. + ## + func readError(error: AristoError): PrfNode = + ## Prettify return code expression + PrfNode(vType: Leaf, prfType: isError, error: error) - var - blobs = newSeq[Blob](2) # temporary, cache - links: array[16,HashKey] # reconstruct branch node - top = 0 # count entries and positions - - # Collect lists of either 2 or 17 blob entries. - for w in rlp.items: - case top - of 0, 1: - if not w.isBlob: - return readError(PartRlpBlobExpected) - blobs[top] = rlp.read(Blob) - of 2 .. 15: - let blob = rlp.read(Blob) - links[top] = HashKey.fromBytes(blob).valueOr: - return readError(PartRlpBranchHashKeyExpected) - of 16: - if not w.isBlob or 0 < rlp.read(Blob).len: - return readError(PartRlpEmptyBlobExpected) - else: - return readError(PartRlp2Or17ListEntries) - top.inc + if not rlp.isList: + # Otherwise `rlp.items` would raise a `Defect` + return readError(PartRlp2Or17ListEntries) - # Verify extension data + var + blobs = newSeq[Blob](2) # temporary, cache + links: array[16,HashKey] # reconstruct branch node + top = 0 # count entries and positions + + # Collect lists of either 2 or 17 blob entries. + for w in rlp.items: case top - of 2: - if blobs[0].len == 0: - return readError(PartRlpNonEmptyBlobExpected) - let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0] - if isLeaf: - return PrfNode( - vType: Leaf, - prfType: ignore, - lPfx: pathSegment, - lData: LeafPayload( - pType: RawData, - rawBlob: blobs[1])) - else: - var node = PrfNode( - vType: Branch, - prfType: isExtension, - ePfx: pathSegment) - node.key[0] = HashKey.fromBytes(blobs[1]).valueOr: - return readError(PartRlpExtHashKeyExpected) - return node - of 17: - for n in [0,1]: - links[n] = HashKey.fromBytes(blobs[n]).valueOr: - return readError(PartRlpBranchHashKeyExpected) + of 0, 1: + if not w.isBlob: + return readError(PartRlpBlobExpected) + blobs[top] = rlp.read(Blob) + of 2 .. 15: + let blob = rlp.read(Blob) + links[top] = HashKey.fromBytes(blob).valueOr: + return readError(PartRlpBranchHashKeyExpected) + of 16: + if not w.isBlob or 0 < rlp.read(Blob).len: + return readError(PartRlpEmptyBlobExpected) + else: + return readError(PartRlp2Or17ListEntries) + top.inc + + # Verify extension data + case top + of 2: + if blobs[0].len == 0: + return readError(PartRlpNonEmptyBlobExpected) + let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0] + if isLeaf: return PrfNode( - vType: Branch, - prfType: ignore, - key: links) + vType: Leaf, + prfType: ignore, + lPfx: pathSegment, + lData: LeafPayload( + pType: RawData, + rawBlob: blobs[1])) else: - discard + var node = PrfNode( + vType: Branch, + prfType: isExtension, + ePfx: pathSegment) + node.key[0] = HashKey.fromBytes(blobs[1]).valueOr: + return readError(PartRlpExtHashKeyExpected) + return node + of 17: + for n in [0,1]: + links[n] = HashKey.fromBytes(blobs[n]).valueOr: + return readError(PartRlpBranchHashKeyExpected) + return PrfNode( + vType: Branch, + prfType: ignore, + key: links) + else: + discard - readError(PartRlp2Or17ListEntries) + readError(PartRlp2Or17ListEntries) proc read(rlp: var Rlp; T: type PrfPayload): T {.gcsafe, raises: [RlpError].} = ## Mixin for RLP reader decoding `Account` or storage slot payload. diff --git a/nimbus/db/aristo/aristo_serialise.nim b/nimbus/db/aristo/aristo_serialise.nim index 4e9f63ed7f..77b1b9a9c5 100644 --- a/nimbus/db/aristo/aristo_serialise.nim +++ b/nimbus/db/aristo/aristo_serialise.nim @@ -37,15 +37,13 @@ proc serialise( of RawData: ok pyl.rawBlob of AccountData: - let - vid = pyl.stoID - key = block: - if vid.isValid: - vid.getKey.valueOr: - let w = (vid,error) - return err(w) - else: - VOID_HASH_KEY + let key = block: + if pyl.stoID.isValid: + pyl.stoID.vid.getKey.valueOr: + let w = (pyl.stoID.vid, error) + return err(w) + else: + VOID_HASH_KEY ok rlp.encode Account( nonce: pyl.account.nonce, @@ -67,44 +65,49 @@ func append*(w: var RlpWriter; key: HashKey) = # --------------------- -proc to*(w: tuple[key: HashKey, node: NodeRef]; T: type seq[(Blob,Blob)]): T = - ## Convert the argument pait `w` to a single or a double pair of - ## `(,)` tuples. Only in case of a combined extension - ## and branch vertex argument, there are is a double pair result. - var wr = initRlpWriter() - case w.node.vType: +proc to*(node: NodeRef; T: type seq[Blob]): T = + ## Convert the argument pait `w` to a single or a double item list item of + ## `` type entries. Only in case of a combined extension + ## and branch vertex argument, there will be a double item list result. + ## + case node.vType: of Branch: # Do branch node + var wr = initRlpWriter() wr.startList(17) for n in 0..15: - wr.append w.node.key[n] + wr.append node.key[n] wr.append EmptyBlob + let brData = wr.finish() - if 0 < w.node.ePfx.len: - # Do for embedded extension node - let brHash = wr.finish().digestTo(HashKey) - result.add (@(brHash.data), wr.finish()) + if 0 < node.ePfx.len: + # Prefix branch by embedded extension node + let brHash = brData.digestTo(HashKey) - wr = initRlpWriter() - wr.startList(2) - wr.append w.node.ePfx.toHexPrefix(isleaf = false) - wr.append brHash + var wrx = initRlpWriter() + wrx.startList(2) + wrx.append node.ePfx.toHexPrefix(isleaf = false) + wrx.append brHash + + result.add wrx.finish() + result.add brData else: # Do for pure branch node - result.add (@(w.key.data), wr.finish()) + result.add brData of Leaf: proc getKey0( vid: VertexID; ): Result[HashKey,AristoError] {.gcsafe, raises: [].} = - ok(w.node.key[0]) # always succeeds + ok(node.key[0]) # always succeeds + var wr = initRlpWriter() wr.startList(2) - wr.append w.node.lPfx.toHexPrefix(isleaf = true) - wr.append w.node.lData.serialise(getKey0).value + wr.append node.lPfx.toHexPrefix(isleaf = true) + wr.append node.lData.serialise(getKey0).value - result.add (@(w.key.data), wr.finish()) + result.add (wr.finish()) proc digestTo*(node: NodeRef; T: type HashKey): T = ## Convert the argument `node` to the corresponding Merkle hash key. Note @@ -122,7 +125,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T = # Do for embedded extension node if 0 < node.ePfx.len: let brHash = wr.finish().digestTo(HashKey) - wr= initRlpWriter() + wr = initRlpWriter() wr.startList(2) wr.append node.ePfx.toHexPrefix(isleaf = false) wr.append brHash diff --git a/nimbus/db/aristo/aristo_utils.nim b/nimbus/db/aristo/aristo_utils.nim index 094a78244d..6492280d92 100644 --- a/nimbus/db/aristo/aristo_utils.nim +++ b/nimbus/db/aristo/aristo_utils.nim @@ -59,11 +59,11 @@ proc toNode*( let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData) # Need to resolve storage root for account leaf if vtx.lData.pType == AccountData: - let vid = vtx.lData.stoID - if vid.isValid: - let key = db.getKey (root, vid) + let stoID = vtx.lData.stoID + if stoID.isValid: + let key = db.getKey (stoID.vid, stoID.vid) if not key.isValid: - return err(@[vid]) + return err(@[stoID.vid]) node.key[0] = key return ok node @@ -90,9 +90,9 @@ iterator subVids*(vtx: VertexRef): VertexID = case vtx.vType: of Leaf: if vtx.lData.pType == AccountData: - let vid = vtx.lData.stoID - if vid.isValid: - yield vid + let stoID = vtx.lData.stoID + if stoID.isValid: + yield stoID.vid of Branch: for vid in vtx.bVid: if vid.isValid: @@ -103,9 +103,9 @@ iterator subVidKeys*(node: NodeRef): (VertexID,HashKey) = case node.vType: of Leaf: if node.lData.pType == AccountData: - let vid = node.lData.stoID - if node.isValid: - yield (vid,node.key[0]) + let stoID = node.lData.stoID + if stoID.isValid: + yield (stoID.vid, node.key[0]) of Branch: for n in 0 .. 15: let vid = node.bVid[n] diff --git a/nimbus/db/core_db/backend/aristo_db.nim b/nimbus/db/core_db/backend/aristo_db.nim index 21139429f9..719e879cbf 100644 --- a/nimbus/db/core_db/backend/aristo_db.nim +++ b/nimbus/db/core_db/backend/aristo_db.nim @@ -11,14 +11,16 @@ {.push raises: [].} import + eth/common, ../../aristo as use_ari, + ../../aristo/aristo_desc/desc_identifiers, ../../aristo/[aristo_init/memory_only, aristo_walk], ../../kvt as use_kvt, ../../kvt/[kvt_init/memory_only, kvt_walk], ../base/[base_config, base_desc, base_helpers] # ------------------------------------------------------------------------------ -# Public constructor and helper +# Public constructors # ------------------------------------------------------------------------------ proc create*(dbType: CoreDbType; kvt: KvtDbRef; mpt: AristoDbRef): CoreDbRef = @@ -51,6 +53,41 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef = KvtDbRef.init(use_kvt.VoidBackendRef), AristoDbRef.init(use_ari.VoidBackendRef)) +proc newCtxByKey*( + ctx: CoreDbCtxRef; + key: Hash256; + info: static[string]; + ): CoreDbRc[CoreDbCtxRef] = + const + rvid: RootedVertexID = (VertexID(1),VertexID(1)) + let + db = ctx.parent + + # Find `(vid,key)` on transaction stack + inx = block: + let rc = db.ariApi.call(findTx, ctx.mpt, rvid, key.to(HashKey)) + if rc.isErr: + return err(rc.error.toError info) + rc.value + + # Fork MPT descriptor that provides `(vid,key)` + newMpt = block: + let rc = db.ariApi.call(forkTx, ctx.mpt, inx) + if rc.isErr: + return err(rc.error.toError info) + rc.value + + # Fork KVT descriptor parallel to `newMpt` + newKvt = block: + let rc = db.kvtApi.call(forkTx, ctx.kvt, inx) + if rc.isErr: + discard db.ariApi.call(forget, newMpt) + return err(rc.error.toError info) + rc.value + + # Create new context + ok(db.bless CoreDbCtxRef(kvt: newKvt, mpt: newMpt)) + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/backend/aristo_replicate.nim b/nimbus/db/core_db/backend/aristo_replicate.nim index 9733985b01..5b589e13ac 100644 --- a/nimbus/db/core_db/backend/aristo_replicate.nim +++ b/nimbus/db/core_db/backend/aristo_replicate.nim @@ -65,7 +65,10 @@ iterator aristoReplicate[T]( let p = mpt.call(forkTx, mpt.mpt, 0).valueOrApiError "aristoReplicate()" defer: discard mpt.call(forget, p) for (rVid,key,vtx,node) in T.replicate(p): - for (k,v) in (key,node).to(seq[(Blob,Blob)]): - yield (k, v) + let w = node.to(seq[Blob]) + yield (@(key.data),w[0]) + if 1 < w.len: + # Was an extension merged into a branch + yield (@(w[1].digestTo(HashKey).data),w[1]) # End diff --git a/nimbus/db/core_db/backend/aristo_trace.nim b/nimbus/db/core_db/backend/aristo_trace.nim new file mode 100644 index 0000000000..8a9e17f4e4 --- /dev/null +++ b/nimbus/db/core_db/backend/aristo_trace.nim @@ -0,0 +1,956 @@ +# Nimbus +# Copyright (c) 2023-2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +## +## Database Backend Tracer +## ======================= +## + +{.push raises: [].} + +import + std/[sequtils, tables, typetraits], + eth/common, + results, + ../../aristo as use_aristo, + ../../aristo/aristo_desc, + ../../kvt as use_kvt, + ../../kvt/kvt_desc, + ../base/[base_config, base_desc] + +const + LogJournalMax = 1_000_000 + ## Maximal size of a journal (organised as LRU) + +type + TracePfx = enum + TrpOops = 0 + TrpKvt + TrpAccounts + TrpGeneric + TrpStorage + + TraceRequest* = enum + TrqOops = 0 + TrqFind + TrqAdd + TrqModify + TrqDelete + + TraceDataType* = enum + TdtOops = 0 + TdtBlob ## Kvt and Aristo + TdtError ## Kvt and Aristo + TdtVoid ## Kvt and Aristo + TdtAccount ## Aristo only + TdtBigNum ## Aristo only + TdtHash ## Aristo only + + TraceDataItemRef* = ref object + ## Log journal entry + pfx*: TracePfx ## DB storage prefix + info*: int ## `KvtApiProfNames` or `AristoApiProfNames` + req*: TraceRequest ## Logged action request + case kind*: TraceDataType + of TdtBlob: + blob*: Blob + of TdtError: + error*: int ## `KvtError` or `AristoError` + of TdtAccount: + account*: AristoAccount + of TdtBigNum: + bigNum*: UInt256 + of TdtHash: + hash*: Hash256 + of TdtVoid, TdtOops: + discard + + TraceLogInstRef* = ref object + ## Logger instance + base: TraceRecorderRef + level: int + truncated: bool + journal: KeyedQueue[Blob,TraceDataItemRef] + + TraceRecorderRef* = ref object of RootRef + log: seq[TraceLogInstRef] ## Production stack for log database + db: CoreDbRef + kvtSave: KvtApiRef ## Restore `KVT` data + ariSave: AristoApiRef ## Restore `Aristo` data + +doAssert LEAST_FREE_VID <= 256 # needed for journal key byte prefix + +# ------------------------------------------------------------------------------ +# Private helpers +# ------------------------------------------------------------------------------ + +when CoreDbNoisyCaptJournal: + import + std/strutils, + chronicles, + stew/byteutils + + func squeezeHex(s: string; ignLen = false): string = + result = if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1] + if not ignLen: + let n = (s.len + 1) div 2 + result &= "[" & (if 0 < n: "#" & $n else: "") & "]" + + func stripZeros(a: string; toExp = false): string = + if 0 < a.len: + result = a.toLowerAscii.strip(leading=true, trailing=false, chars={'0'}) + if result.len == 0: + result = "0" + elif result[^1] == '0' and toExp: + var n = 0 + while result[^1] == '0': + let w = result.len + result.setLen(w-1) + n.inc + if n == 1: + result &= "0" + elif n == 2: + result &= "00" + elif 2 < n: + result &= "↑" & $n + + func `$$`(w: openArray[byte]): string = + w.toHex.squeezeHex + + func `$`(w: Blob): string = + w.toHex.squeezeHex + + func `$`(w: UInt256): string = + "#" & w.toHex.stripZeros.squeezeHex + + func `$`(w: Hash256): string = + "£" & w.data.toHex.squeezeHex + + func `$`(w: VertexID): string = + if 0 < w.uint64: "$" & w.uint64.toHex.stripZeros else: "$ø" + + func `$`(w: AristoAccount): string = + "(" & $w.nonce & "," & $w.balance & "," & $w.codeHash & ")" + + func `$`(ti: TraceDataItemRef): string = + result = "(" & + (if ti.pfx == TrpKvt: $KvtApiProfNames(ti.info) + elif ti.pfx == TrpOops: "" + else: $AristoApiProfNames(ti.info)) + + result &= "," & ( + case ti.req: + of TrqOops: "" + of TrqFind: "" + of TrqModify: "=" + of TrqDelete: "-" + of TrqAdd: "+") + + result &= ( + case ti.kind: + of TdtOops: "" + of TdtBlob: $ti.blob + of TdtBigNum: $ti.bigNum + of TdtHash: $ti.hash + of TdtVoid: "ø" + of TdtError: (if ti.pfx == TrpKvt: $KvtError(ti.error) + elif ti.pfx == TrpOops: "" + else: $AristoError(ti.error)) + of TdtAccount: $ti.account) + + result &= ")" + + func toStr(pfx: TracePfx, key: openArray[byte]): string = + case pfx: + of TrpOops: + "" + of TrpKvt: + $$(key.toOpenArray(0, key.len - 1)) + of TrpAccounts: + "1:" & $$(key.toOpenArray(0, key.len - 1)) + of TrpGeneric: + $key[0] & ":" & $$(key.toOpenArray(1, key.len - 1)) + of TrpStorage: + "1:" & $$(key.toOpenArray(0, min(31, key.len - 1))) & ":" & + (if 32 < key.len: $$(key.toOpenArray(32, key.len - 1)) else: "") + + func `$`(key: openArray[byte]; ti: TraceDataItemRef): string = + "(" & + TracePfx(key[0]).toStr(key.toOpenArray(1, key.len - 1)) & "," & + $ti & ")" + +# ------------------------------- + +template logTxt(info: static[string]): static[string] = + "trace " & info + +func topLevel(tr: TraceRecorderRef): int = + tr.log.len - 1 + +# -------------------- + +proc jLogger( + tr: TraceRecorderRef; + key: openArray[byte]; + ti: TraceDataItemRef; + ) = + ## Add or update journal entry. The `tr.pfx` argument indicates the key type: + ## + ## * `TrpKvt`: followed by KVT key + ## * `TrpAccounts`: followed by + ## * `TrpGeneric`: followed by + + ## * `TrpStorage`: followed by + + ## + doAssert ti.pfx != TrpOops + let + pfx = @[ti.pfx.byte] + lRec = tr.log[^1].journal.lruFetch(pfx & @key).valueOr: + if LogJournalMax <= tr.log[^1].journal.len: + tr.log[^1].truncated = true + discard tr.log[^1].journal.lruAppend(pfx & @key, ti, LogJournalMax) + return + if ti.req != TrqFind: + lRec[] = ti[] + +proc jLogger( + tr: TraceRecorderRef; + accPath: Hash256; + ti: TraceDataItemRef; + ) = + tr.jLogger(accPath.data.toSeq, ti) + +proc jLogger( + tr: TraceRecorderRef; + ti: TraceDataItemRef; + ) = + tr.jLogger(EmptyBlob, ti) + +proc jLogger( + tr: TraceRecorderRef; + root: VertexID; + path: openArray[byte]; + ti: TraceDataItemRef; + ) = + tr.jLogger(@[root.byte] & @path, ti) + +proc jLogger( + tr: TraceRecorderRef; + root: VertexID; + ti: TraceDataItemRef; + ) = + tr.jLogger(@[root.byte], ti) + +proc jLogger( + tr: TraceRecorderRef; + accPath: Hash256; + stoPath: Hash256; + ti: TraceDataItemRef; + ) = + tr.jLogger(accPath.data.toSeq & stoPath.data.toSeq, ti) + +# -------------------- + +func to(w: AristoApiProfNames; T: type TracePfx): T = + case w: + of AristoApiProfFetchAccountRecordFn, + AristoApiProfFetchAccountStateFn, + AristoApiProfDeleteAccountRecordFn, + AristoApiProfMergeAccountRecordFn: + return TrpAccounts + of AristoApiProfFetchGenericDataFn, + AristoApiProfFetchGenericStateFn, + AristoApiProfDeleteGenericDataFn, + AristoApiProfDeleteGenericTreeFn, + AristoApiProfMergeGenericDataFn: + return TrpGeneric + of AristoApiProfFetchStorageDataFn, + AristoApiProfFetchStorageStateFn, + AristoApiProfDeleteStorageDataFn, + AristoApiProfDeleteStorageTreeFn, + AristoApiProfMergeStorageDataFn: + return TrpStorage + else: + discard + raiseAssert "Unsupported AristoApiProfNames: " & $w + +func to(w: KvtApiProfNames; T: type TracePfx): T = + TrpKvt + +# -------------------- + +func logRecord( + info: KvtApiProfNames | AristoApiProfNames; + req: TraceRequest; + data: openArray[byte]; + ): TraceDataItemRef = + TraceDataItemRef( + pfx: info.to(TracePfx), + info: info.ord, + req: req, + kind: TdtBlob, + blob: @data) + +func logRecord( + info: KvtApiProfNames | AristoApiProfNames; + req: TraceRequest; + error: KvtError | AristoError; + ): TraceDataItemRef = + TraceDataItemRef( + pfx: info.to(TracePfx), + info: info.ord, + req: req, + kind: TdtError, + error: error.ord) + +func logRecord( + info: KvtApiProfNames | AristoApiProfNames; + req: TraceRequest; + ): TraceDataItemRef = + TraceDataItemRef( + pfx: info.to(TracePfx), + info: info.ord, + req: req, + kind: TdtVoid) + +# -------------------- + +func logRecord( + info: AristoApiProfNames; + req: TraceRequest; + accRec: AristoAccount; + ): TraceDataItemRef = + TraceDataItemRef( + pfx: info.to(TracePfx), + info: info.ord, + req: req, + kind: TdtAccount, + account: accRec) + +func logRecord( + info: AristoApiProfNames; + req: TraceRequest; + state: Hash256; + ): TraceDataItemRef = + TraceDataItemRef( + pfx: info.to(TracePfx), + info: info.ord, + req: req, + kind: TdtHash, + hash: state) + +func logRecord( + info: AristoApiProfNames; + req: TraceRequest; + sto: Uint256; + ): TraceDataItemRef = + TraceDataItemRef( + pfx: info.to(TracePfx), + info: info.ord, + req: req, + kind: TdtBigNum, + bigNum: sto) + +# ------------------------------------------------------------------------------ +# Private functions +# ------------------------------------------------------------------------------ + +proc kvtTraceRecorder(tr: TraceRecorderRef) = + let + api = tr.db.kvtApi + tracerApi = api.dup + + # Set up new production api `tracerApi` and save the old one + tr.kvtSave = api + tr.db.kvtApi = tracerApi + + # Update production api + tracerApi.get = + proc(kvt: KvtDbRef; key: openArray[byte]): Result[Blob,KvtError] = + const info = KvtApiProfGetFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + let data = api.get(kvt, key).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), error + tr.jLogger(key, logRecord(info, TrqFind, error)) + return err(error) # No way + + tr.jLogger(key, logRecord(info, TrqFind, data)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), data=($$data) + ok(data) + + tracerApi.del = + proc(kvt: KvtDbRef; key: openArray[byte]): Result[void,KvtError] = + const info = KvtApiProfDelFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let tiRec = block: + let rc = api.get(kvt, key) + if rc.isOk: + logRecord(info, TrqDelete, rc.value) + elif rc.error == GetNotFound: + logRecord(info, TrqDelete) + else: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), error=rc.error + tr.jLogger(key, logRecord(info, TrqDelete, rc.error)) + return err(rc.error) + + # Delete from DB + api.del(kvt, key).isOkOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), error + tr.jLogger(key, logRecord(info, TrqDelete, error)) + return err(error) + + # Log on journal + tr.jLogger(key, tiRec) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key) + ok() + + tracerApi.put = + proc(kvt: KvtDbRef; key, data: openArray[byte]): Result[void,KvtError] = + const info = KvtApiProfPutFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let + hasKey = api.hasKey(kvt, key).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), error + tr.jLogger(key, logRecord(info, TrqAdd, error)) + return err(error) + mode = if hasKey: TrqModify else: TrqAdd + + # Store on DB + api.put(kvt, key, data).isOkOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), data=($$data) + tr.jLogger(key, logRecord(info, mode, error)) + return err(error) + + tr.jLogger(key, logRecord(info, mode, data)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, key=($$key), data=($$data) + ok() + + assert tr.kvtSave != tr.db.kvtApi + assert tr.kvtSave.del != tr.db.kvtApi.del + assert tr.kvtSave.hasKey == tr.db.kvtApi.hasKey + + +proc ariTraceRecorder(tr: TraceRecorderRef) = + let + api = tr.db.ariApi + tracerApi = api.dup + + # Set up new production api `tracerApi` and save the old one + tr.ariSave = api + tr.db.ariApi = tracerApi + + tracerApi.fetchAccountRecord = + proc(mpt: AristoDbRef; + accPath: Hash256; + ): Result[AristoAccount,AristoError] = + const info = AristoApiProfFetchAccountRecordFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let accRec = api.fetchAccountRecord(mpt, accPath).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, error + tr.jLogger(accPath, logRecord(info, TrqFind, error)) + return err(error) + + tr.jLogger(accPath, logRecord(info, TrqFind, accRec)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, accRec + ok accRec + + tracerApi.fetchAccountState = + proc(mpt: AristoDbRef; + updateOk: bool; + ): Result[Hash256,AristoError] = + const info = AristoApiProfFetchAccountStateFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let state = api.fetchAccountState(mpt, updateOk).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, updateOk, error + tr.jLogger logRecord(info, TrqFind, error) + return err(error) + + tr.jLogger logRecord(info, TrqFind, state) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, updateOk, state + ok state + + tracerApi.fetchGenericData = + proc(mpt: AristoDbRef; + root: VertexID; + path: openArray[byte]; + ): Result[Blob,AristoError] = + const info = AristoApiProfFetchGenericDataFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let data = api.fetchGenericData(mpt, root, path).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path=($$path), error + tr.jLogger(root, path, logRecord(info, TrqFind, error)) + return err(error) + + tr.jLogger(root, path, logRecord(info, TrqFind, data)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path=($$path), data + ok data + + tracerApi.fetchGenericState = + proc(mpt: AristoDbRef; + root: VertexID; + updateOk: bool; + ): Result[Hash256,AristoError] = + const info = AristoApiProfFetchGenericStateFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let state = api.fetchAccountState(mpt, updateOk).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, updateOk, error + tr.jLogger(root, logRecord(info, TrqFind, error)) + return err(error) + + tr.jLogger(root, logRecord(info, TrqFind, state)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, updateOk, state + ok state + + tracerApi.fetchStorageData = + proc(mpt: AristoDbRef; + accPath: Hash256; + stoPath: Hash256; + ): Result[Uint256,AristoError] = + const info = AristoApiProfFetchStorageDataFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let stoData = api.fetchStorageData(mpt, accPath, stoPath).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, error + tr.jLogger(accPath, stoPath, logRecord(info, TrqFind, error)) + return err(error) + + tr.jLogger(accPath, stoPath, logRecord(info, TrqFind, stoData)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, stoData + ok stoData + + tracerApi.fetchStorageState = + proc(mpt: AristoDbRef; + accPath: Hash256; + updateOk: bool; + ): Result[Hash256,AristoError] = + const info = AristoApiProfFetchStorageStateFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB + let state = api.fetchStorageState(mpt, accPath, updateOk).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, updateOk, error + tr.jLogger(accPath, logRecord(info, TrqFind, error)) + return err(error) + + tr.jLogger(accPath, logRecord(info, TrqFind, state)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, updateOk, state + ok state + + tracerApi.deleteAccountRecord = + proc(mpt: AristoDbRef; + accPath: Hash256; + ): Result[void,AristoError] = + const info = AristoApiProfDeleteAccountRecordFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let tiRec = block: + let rc = api.fetchAccountRecord(mpt, accPath) + if rc.isOk: + logRecord(info, TrqDelete, rc.value) + elif rc.error == FetchPathNotFound: + logRecord(info, TrqDelete) + else: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, error=rc.error + tr.jLogger(accPath, logRecord(info, TrqDelete, rc.error)) + return err(rc.error) + + # Delete from DB + api.deleteAccountRecord(mpt, accPath).isOkOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, error + tr.jLogger(accPath, logRecord(info, TrqDelete, error)) + return err(error) + + # Log on journal + tr.jLogger(accPath, tiRec) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath + ok() + + tracerApi.deleteGenericData = + proc(mpt: AristoDbRef; + root: VertexID; + path: openArray[byte]; + ): Result[bool,AristoError] = + const info = AristoApiProfDeleteGenericDataFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let tiRec = block: + let rc = api.fetchGenericData(mpt, root, path) + if rc.isOk: + logRecord(info, TrqDelete, rc.value) + elif rc.error == FetchPathNotFound: + logRecord(info, TrqDelete) + else: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path=($$path), error=rc.error + tr.jLogger(root, path, logRecord(info, TrqDelete, rc.error)) + return err(rc.error) + + # Delete from DB + let emptyTrie = api.deleteGenericData(mpt, root, path).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path=($$path), error + tr.jLogger(root, path, logRecord(info, TrqDelete, error)) + return err(error) + + # Log on journal + tr.jLogger(root, path, tiRec) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path=($$path), emptyTrie + ok emptyTrie + + tracerApi.deleteGenericTree = + proc(mpt: AristoDbRef; + root: VertexID; + ): Result[void,AristoError] = + const info = AristoApiProfDeleteGenericTreeFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Delete from DB + api.deleteGenericTree(mpt, root).isOkOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, error + tr.jLogger(root, logRecord(info, TrqDelete, error)) + return err(error) + + # Log on journal + tr.jLogger(root, logRecord(info, TrqDelete)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root + ok() + + tracerApi.deleteStorageData = + proc(mpt: AristoDbRef; + accPath: Hash256; + stoPath: Hash256; + ): Result[bool,AristoError] = + const info = AristoApiProfDeleteStorageDataFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let tiRec = block: + let rc = api.fetchStorageData(mpt, accPath, stoPath) + if rc.isOk: + logRecord(info, TrqDelete, rc.value) + elif rc.error == FetchPathNotFound: + logRecord(info, TrqDelete) + else: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, error=rc.error + tr.jLogger(accPath, stoPath, logRecord(info, TrqDelete, rc.error)) + return err(rc.error) + + let emptyTrie = api.deleteStorageData(mpt, accPath, stoPath).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, error + tr.jLogger(accPath, stoPath, logRecord(info, TrqDelete, error)) + return err(error) + + # Log on journal + tr.jLogger(accPath, stoPath, tiRec) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, emptyTrie + ok emptyTrie + + tracerApi.deleteStorageTree = + proc(mpt: AristoDbRef; + accPath: Hash256; + ): Result[void,AristoError] = + const info = AristoApiProfDeleteStorageTreeFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Delete from DB + api.deleteStorageTree(mpt, accPath).isOkOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, error + tr.jLogger(accPath, logRecord(info, TrqDelete, error)) + return err(error) + + # Log on journal + tr.jLogger(accPath, logRecord(info, TrqDelete)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath + ok() + + tracerApi.mergeAccountRecord = + proc(mpt: AristoDbRef; + accPath: Hash256; + accRec: AristoAccount; + ): Result[bool,AristoError] = + const info = AristoApiProfMergeAccountRecordFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let + hadPath = api.hasPathAccount(mpt, accPath).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, error + tr.jLogger(accPath, logRecord(info, TrqAdd, error)) + return err(error) + mode = if hadPath: TrqModify else: TrqAdd + + # Do the merge + let updated = api.mergeAccountRecord(mpt, accPath, accRec).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, hadPath, error + tr.jLogger(accPath, logRecord(info, mode, error)) + return err(error) + + # Log on journal + tr.jLogger(accPath, logRecord(info, mode, accRec)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, accRec, hadPath, updated + ok updated + + tracerApi.mergeGenericData = + proc(mpt: AristoDbRef; + root: VertexID; + path: openArray[byte]; + data: openArray[byte]; + ): Result[bool,AristoError] = + const info = AristoApiProfMergeGenericDataFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let + hadPath = api.hasPathGeneric(mpt, root, path).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path, error + tr.jLogger(root, path, logRecord(info, TrqAdd, error)) + return err(error) + mode = if hadPath: TrqModify else: TrqAdd + + # Do the merge + let updated = api.mergeGenericData(mpt, root, path, data).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path, error + tr.jLogger(root, path, logRecord(info, mode, error)) + return err(error) + + # Log on journal + tr.jLogger(root, path, logRecord(info, mode, data)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, root, path, data=($$data), hadPath, updated + ok updated + + tracerApi.mergeStorageData = + proc(mpt: AristoDbRef; + accPath: Hash256; + stoPath: Hash256; + stoData: UInt256; + ): Result[void,AristoError] = + const info = AristoApiProfMergeStorageDataFn + + when CoreDbNoisyCaptJournal: + let level = tr.topLevel() + + # Find entry on DB (for comprehensive log record) + let + hadPath = api.hasPathStorage(mpt, accPath, stoPath).valueOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, error + tr.jLogger(accPath, stoPath, logRecord(info, TrqAdd, error)) + return err(error) + mode = if hadPath: TrqModify else: TrqAdd + + # Do the merge + api.mergeStorageData(mpt, accPath, stoPath,stoData).isOkOr: + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, error + tr.jLogger(accPath, stoPath, logRecord(info, mode, error)) + return err(error) + + # Log on journal + tr.jLogger(accPath, stoPath, logRecord(info, mode, stoData)) + + when CoreDbNoisyCaptJournal: + debug logTxt $info, level, accPath, stoPath, stoData, hadPath + ok() + + assert tr.ariSave != tr.db.ariApi + assert tr.ariSave.deleteAccountRecord != tr.db.ariApi.deleteAccountRecord + assert tr.ariSave.hasPathAccount == tr.db.ariApi.hasPathAccount + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc topInst*(tr: TraceRecorderRef): TraceLogInstRef = + ## Get top level logger + tr.log[^1] + +func truncated*(log: TraceLogInstRef): bool = + ## True if journal was truncated due to collecting too many entries + log.truncated + +func level*(log: TraceLogInstRef): int = + ## Non-negative stack level of this log instance. + log.level + +func journal*(log: TraceLogInstRef): KeyedQueue[Blob,TraceDataItemRef] = + ## Get the journal + log.journal + +func db*(log: TraceLogInstRef): CoreDbRef = + ## Get database + log.base.db + +iterator kvtLog*(log: TraceLogInstRef): (Blob,TraceDataItemRef) = + ## Extract `Kvt` journal + for p in log.journal.nextPairs: + let pfx = TracePfx(p.key[0]) + if pfx == TrpKvt: + yield (p.key[1..^1], p.data) + +proc kvtLogBlobs*(log: TraceLogInstRef): seq[(Blob,Blob)] = + log.kvtLog.toSeq + .filterIt(it[1].kind==TdtBlob) + .mapIt((it[0],it[1].blob)) + +iterator ariLog*(log: TraceLogInstRef): (VertexID,Blob,TraceDataItemRef) = + ## Extract `Aristo` journal + for p in log.journal.nextPairs: + let + pfx = TracePfx(p.key[0]) + (root, key) = block: + case pfx: + of TrpAccounts,TrpStorage: + (VertexID(1), p.key[1..^1]) + of TrpGeneric: + (VertexID(p.key[1]), p.key[2..^1]) + else: + continue + yield (root, key, p.data) + +proc pop*(log: TraceLogInstRef): bool = + ## Reduce logger stack by the argument descriptor `log` which must be the + ## top entry on the stack. The function returns `true` if the descriptor + ## `log` was not the only one on stack and the stack was reduced by the + ## top entry. Otherwise nothing is done and `false` returned. + ## + let tr = log.base + doAssert log.level == tr.topLevel() + if 1 < tr.log.len: # Always leave one instance on stack + tr.log.setLen(tr.log.len - 1) + return true + +proc push*(tr: TraceRecorderRef) = + ## Push overlay logger instance + tr.log.add TraceLogInstRef(base: tr, level: tr.log.len) + +# ------------------------------------------------------------------------------ +# Public constructor/destructor +# ------------------------------------------------------------------------------ + +proc init*( + T: type TraceRecorderRef; # Recorder desc to instantiate + db: CoreDbRef; # Database + ): T = + ## Constructor, create initial/base tracer descriptor + result = T(db: db) + result.push() + result.kvtTraceRecorder() + result.ariTraceRecorder() + +proc restore*(tr: TraceRecorderRef) = + ## Restore production API. + tr.db.kvtApi = tr.kvtSave + tr.db.ariApi = tr.ariSave + tr[].reset + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ + diff --git a/nimbus/db/core_db/base.nim b/nimbus/db/core_db/base.nim index 04a40dcd40..8074a4f72e 100644 --- a/nimbus/db/core_db/base.nim +++ b/nimbus/db/core_db/base.nim @@ -15,6 +15,7 @@ import eth/common, "../.."/[constants, errors], ".."/[kvt, aristo], + ./backend/aristo_db, ./base/[api_tracking, base_config, base_desc, base_helpers] export @@ -44,7 +45,7 @@ when CoreDbEnableProfiling: CoreDbFnInx, CoreDbProfListRef -when CoreDbEnableCaptJournal and false: +when CoreDbEnableCaptJournal: import ./backend/aristo_trace type @@ -54,7 +55,8 @@ when CoreDbEnableCaptJournal and false: else: import ../aristo/[ - aristo_delete, aristo_desc, aristo_fetch, aristo_merge, aristo_tx], + aristo_delete, aristo_desc, aristo_fetch, aristo_merge, aristo_part, + aristo_tx], ../kvt/[kvt_desc, kvt_utils, kvt_tx] # ------------------------------------------------------------------------------ @@ -69,33 +71,69 @@ proc ctx*(db: CoreDbRef): CoreDbCtxRef = ## db.defCtx -proc swapCtx*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef = +proc newCtxByKey*(ctx: CoreDbCtxRef; root: Hash256): CoreDbRc[CoreDbCtxRef] = + ## Create new context derived from a matching transaction of the currently + ## active context. If successful, the resulting context has the following + ## properties: + ## + ## * Transaction level is 1 + ## * The state of the accounts column is equal to the argument `root` + ## + ## If successful, the resulting descriptor **must** be manually released + ## with `forget()` when it is not used, anymore. + ## + ## Note: + ## The underlying `Aristo` backend uses lazy hashing so this function + ## might fail simply because there is no computed state when nesting + ## the next transaction. If the previous transaction needs to be found, + ## then it must called like this: + ## :: + ## let db = .. # Instantiate CoreDb handle + ## ... + ## discard db.ctx.getAccounts.state() # Compute state hash + ## db.ctx.newTransaction() # Enter new transaction + ## + ## However, remember that unused hash computations are contle relative + ## to processing time. + ## + ctx.setTrackNewApi CtxNewCtxByKeyFn + result = ctx.newCtxByKey(root, $api) + ctx.ifTrackNewApi: debug logTxt, api, elapsed, root=($$root), result + +proc swapCtx*(ctx: CoreDbCtxRef; db: CoreDbRef): CoreDbCtxRef = ## Activate argument context `ctx` as default and return the previously - ## active context. This function goes typically together with `forget()`. A - ## valid scenario might look like + ## active context. This function goes typically together with `forget()`. + ## A valid scenario might look like ## :: - ## proc doSomething(db: CoreDbRef; ctx: CoreDbCtxRef) = - ## let saved = db.swapCtx ctx - ## defer: db.swapCtx(saved).forget() - ## ... + ## let db = .. # Instantiate CoreDb handle + ## ... + ## let ctx = newCtxByKey(..).expect "ctx" # Create new context + ## let saved = db.swapCtx ctx # Swap context dandles + ## defer: db.swapCtx(saved).forget() # Restore + ## ... ## doAssert not ctx.isNil - db.setTrackNewApi BaseSwapCtxFn + assert db.defCtx != ctx # debugging only + db.setTrackNewApi CtxSwapCtxFn + + # Swap default context with argument `ctx` result = db.defCtx + db.defCtx = ctx # Set read-write access and install CoreDbAccRef(ctx).call(reCentre, db.ctx.mpt).isOkOr: raiseAssert $api & " failed: " & $error CoreDbKvtRef(ctx).call(reCentre, db.ctx.kvt).isOkOr: raiseAssert $api & " failed: " & $error - db.defCtx = ctx + doAssert db.defCtx != result db.ifTrackNewApi: debug logTxt, api, elapsed proc forget*(ctx: CoreDbCtxRef) = ## Dispose `ctx` argument context and related columns created with this - ## context. This function fails if `ctx` is the default context. + ## context. This function throws an exception `ctx` is the default context. ## ctx.setTrackNewApi CtxForgetFn + doAssert ctx != ctx.parent.defCtx CoreDbAccRef(ctx).call(forget, ctx.mpt).isOkOr: raiseAssert $api & ": " & $error CoreDbKvtRef(ctx).call(forget, ctx.kvt).isOkOr: @@ -103,7 +141,7 @@ proc forget*(ctx: CoreDbCtxRef) = ctx.ifTrackNewApi: debug logTxt, api, elapsed # ------------------------------------------------------------------------------ -# Public main descriptor methods +# Public base descriptor methods # ------------------------------------------------------------------------------ proc finish*(db: CoreDbRef; eradicate = false) = @@ -175,6 +213,104 @@ proc stateBlockNumber*(db: CoreDbRef): BlockNumber = 0u64 db.ifTrackNewApi: debug logTxt, api, elapsed, result +proc verify*( + db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + proof: openArray[Blob]; + root: Hash256; + path: openArray[byte]; + ): CoreDbRc[Blob] = + ## This function os the counterpart of any of the `proof()` functions. Given + ## the argument chain of rlp-encoded nodes `proof`, this function verifies + ## that the chain represents a partial MPT starting with a root node state + ## `root` followig the path `key` leading to leaf node encapsulating a + ## payload which is passed back as return code. + ## + ## Note: The `mpt` argument is used for administative purposes (e.g. logging) + ## only. The functionality is provided by the `Aristo` database + ## function `aristo_part.partUntwigGeneric()` with the same prototype + ## arguments except the `db`. + ## + template mpt: untyped = + when db is CoreDbRef: + CoreDbAccRef(db.defCtx) + else: + db + mpt.setTrackNewApi BaseVerifyFn + result = block: + let rc = mpt.call(partUntwigGeneric, proof, root, path) + if rc.isOk: + ok(rc.value) + else: + err(rc.error.toError($api, ProofVerify)) + mpt.ifTrackNewApi: debug logTxt, api, elapsed, result + +proc verifyOk*( + db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + proof: openArray[Blob]; + root: Hash256; + path: openArray[byte]; + payload: openArray[byte]; + ): CoreDbRc[void] = + ## Variant of `verify()` which directly checks the argument `payload` + ## against what would be the return code in `verify()`. + ## + template mpt: untyped = + when db is CoreDbRef: + CoreDbAccRef(db.defCtx) + else: + db + mpt.setTrackNewApi BaseVerifyOkFn + result = block: + let rc = mpt.call(partUntwigGenericOk, proof, root, path, payload) + if rc.isOk: + ok() + else: + err(rc.error.toError($api, ProofVerify)) + mpt.ifTrackNewApi: debug logTxt, api, elapsed, result + +proc verify*( + db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + proof: openArray[Blob]; + root: Hash256; + path: Hash256; + ): CoreDbRc[Blob] = + ## Variant of `verify()`. + template mpt: untyped = + when db is CoreDbRef: + CoreDbAccRef(db.defCtx) + else: + db + mpt.setTrackNewApi BaseVerifyFn + result = block: + let rc = mpt.call(partUntwigPath, proof, root, path) + if rc.isOk: + ok(rc.value) + else: + err(rc.error.toError($api, ProofVerify)) + mpt.ifTrackNewApi: debug logTxt, api, elapsed, result + +proc verifyOk*( + db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + proof: openArray[Blob]; + root: Hash256; + path: Hash256; + payload: openArray[byte]; + ): CoreDbRc[void] = + ## Variant of `verifyOk()`. + template mpt: untyped = + when db is CoreDbRef: + CoreDbAccRef(db.defCtx) + else: + db + mpt.setTrackNewApi BaseVerifyOkFn + result = block: + let rc = mpt.call(partUntwigPathOk, proof, root, path, payload) + if rc.isOk: + ok() + else: + err(rc.error.toError($api, ProofVerify)) + mpt.ifTrackNewApi: debug logTxt, api, elapsed, result + # ------------------------------------------------------------------------------ # Public key-value table methods # ------------------------------------------------------------------------------ @@ -286,6 +422,22 @@ proc getGeneric*( # ----------- generic MPT --------------- +proc proof*( + mpt: CoreDbMptRef; + key: openArray[byte]; + ): CoreDbRc[seq[Blob]] = + ## On the generic MPT, collect the nodes along the `key` interpreted as + ## path. Return these path nodes as a chain of rlp-encoded blobs. + ## + mpt.setTrackNewApi MptProofFn + result = block: + let rc = mpt.call(partGenericTwig, mpt.mpt, CoreDbVidGeneric, key) + if rc.isOk: + ok(rc.value) + else: + err(rc.error.toError($api, ProofCreate)) + mpt.ifTrackNewApi: debug logTxt, api, elapsed, result + proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] = ## Fetch data from the argument `mpt`. The function always returns a ## non-empty `Blob` or an error code. @@ -385,6 +537,22 @@ proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef = # ----------- accounts --------------- +proc proof*( + acc: CoreDbAccRef; + accPath: Hash256; + ): CoreDbRc[seq[Blob]] = + ## On the accounts MPT, collect the nodes along the `accPath` interpreted as + ## path. Return these path nodes as a chain of rlp-encoded blobs. + ## + acc.setTrackNewApi AccProofFn + result = block: + let rc = acc.call(partAccountTwig, acc.mpt, accPath) + if rc.isOk: + ok(rc.value) + else: + err(rc.error.toError($api, ProofCreate)) + acc.ifTrackNewApi: debug logTxt, api, elapsed, result + proc fetch*( acc: CoreDbAccRef; accPath: Hash256; @@ -492,6 +660,24 @@ proc state*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash256] = # ------------ storage --------------- +proc slotProof*( + acc: CoreDbAccRef; + accPath: Hash256; + stoPath: Hash256; + ): CoreDbRc[seq[Blob]] = + ## On the storage MPT related to the argument account `acPath`, collect the + ## nodes along the `stoPath` interpreted as path. Return these path nodes as + ## a chain of rlp-encoded blobs. + ## + acc.setTrackNewApi AccSlotProofFn + result = block: + let rc = acc.call(partStorageTwig, acc.mpt, accPath, stoPath) + if rc.isOk: + ok(rc.value) + else: + err(rc.error.toError($api, ProofCreate)) + acc.ifTrackNewApi: debug logTxt, api, elapsed, result + proc slotFetch*( acc: CoreDbAccRef; accPath: Hash256; @@ -713,66 +899,54 @@ proc dispose*(tx: CoreDbTxRef) = # Public tracer methods # ------------------------------------------------------------------------------ -when CoreDbEnableCaptJournal and false: # currently disabled - proc newCapture*( - db: CoreDbRef; - ): CoreDbRc[CoreDbCaptRef] = - ## Trace constructor providing an overlay on top of the argument database - ## `db`. This overlay provides a replacement database handle that can be - ## retrieved via `db.recorder()` (which can in turn be ovelayed.) While - ## running the overlay stores data in a log-table which can be retrieved - ## via `db.logDb()`. - ## - ## Caveat: - ## The original database argument `db` should not be used while the tracer - ## is active (i.e. exists as overlay). The behaviour for this situation - ## is undefined and depends on the backend implementation of the tracer. +when CoreDbEnableCaptJournal: + proc pushCapture*(db: CoreDbRef): CoreDbCaptRef = + ## .. ## - db.setTrackNewApi BaseNewCaptureFn - result = db.methods.newCaptureFn flags + db.setTrackNewApi BasePushCaptureFn + if db.tracerHook.isNil: + db.tracerHook = TraceRecorderRef.init(db) + else: + TraceRecorderRef(db.tracerHook).push() + result = TraceRecorderRef(db.tracerHook).topInst().CoreDbCaptRef db.ifTrackNewApi: debug logTxt, api, elapsed, result - proc recorder*(cpt: CoreDbCaptRef): CoreDbRef = - ## Getter, returns a tracer replacement handle to be used as new database. - ## It records every action like fetch, store, hasKey, hasPath and delete. - ## This descriptor can be superseded by a new overlay tracer (using - ## `newCapture()`, again.) - ## - ## Caveat: - ## Unless the desriptor `cpt` referes to the top level overlay tracer, the - ## result is undefined and depends on the backend implementation of the - ## tracer. - ## - cpt.setTrackNewApi CptRecorderFn - result = cpt.methods.recorderFn() - cpt.ifTrackNewApi: debug logTxt, api, elapsed - - proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] = - ## Getter, returns the logger table for the overlay tracer database. + proc level*(cpt: CoreDbCaptRef): int = + ## Getter, returns the positive number of stacked instances. ## - ## Caveat: - ## Unless the desriptor `cpt` referes to the top level overlay tracer, the - ## result is undefined and depends on the backend implementation of the - ## tracer. - ## - cp.setTrackNewApi CptLogDbFn - result = cp.methods.logDbFn() - cp.ifTrackNewApi: debug logTxt, api, elapsed + let log = cpt.distinctBase + log.db.setTrackNewApi CptLevelFn + result = log.level() + log.db.ifTrackNewApi: debug logTxt, api, elapsed, result - proc flags*(cp: CoreDbCaptRef):set[CoreDbCaptFlags] = - ## Getter + proc kvtLog*(cpt: CoreDbCaptRef): seq[(Blob,Blob)] = + ## Getter, returns the `Kvt` logger list for the argument instance. ## - cp.setTrackNewApi CptFlagsFn - result = cp.methods.getFlagsFn() - cp.ifTrackNewApi: debug logTxt, api, elapsed, result + let log = cpt.distinctBase + log.db.setTrackNewApi CptKvtLogFn + result = log.kvtLogBlobs() + log.db.ifTrackNewApi: debug logTxt, api, elapsed - proc forget*(cp: CoreDbCaptRef) = + proc pop*(cpt: CoreDbCaptRef) = ## Explicitely stop recording the current tracer instance and reset to ## previous level. ## - cp.setTrackNewApi CptForgetFn - cp.methods.forgetFn() - cp.ifTrackNewApi: debug logTxt, api, elapsed + let db = cpt.distinctBase.db + db.setTrackNewApi CptPopFn + if not cpt.distinctBase.pop(): + TraceRecorderRef(db.tracerHook).restore() + db.tracerHook = TraceRecorderRef(nil) + db.ifTrackNewApi: debug logTxt, api, elapsed, cpt + + proc stopCapture*(db: CoreDbRef) = + ## Discard capture instances. This function is equivalent to `pop()`-ing + ## all instances. + ## + db.setTrackNewApi CptStopCaptureFn + if not db.tracerHook.isNil: + TraceRecorderRef(db.tracerHook).restore() + db.tracerHook = TraceRecorderRef(nil) + db.ifTrackNewApi: debug logTxt, api, elapsed # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/core_db/base/api_tracking.nim b/nimbus/db/core_db/base/api_tracking.nim index 9688af5293..05c9e7f7a4 100644 --- a/nimbus/db/core_db/base/api_tracking.nim +++ b/nimbus/db/core_db/base/api_tracking.nim @@ -11,7 +11,7 @@ {.push raises: [].} import - std/[strutils, times, typetraits], + std/[sequtils, strutils, times, typetraits], eth/common, results, stew/byteutils, @@ -36,6 +36,7 @@ type AccForgetFn = "acc/forget" AccHasPathFn = "acc/hasPath" AccMergeFn = "acc/merge" + AccProofFn = "acc/proof" AccRecastFn = "recast" AccStateFn = "acc/state" @@ -43,6 +44,7 @@ type AccSlotDeleteFn = "slotDelete" AccSlotHasPathFn = "slotHasPath" AccSlotMergeFn = "slotMerge" + AccSlotProofFn = "slotProof" AccSlotStateFn = "slotState" AccSlotStateEmptyFn = "slotStateEmpty" AccSlotStateEmptyOrVoidFn = "slotStateEmptyOrVoid" @@ -50,20 +52,23 @@ type BaseFinishFn = "finish" BaseLevelFn = "level" - BaseNewCaptureFn = "newCapture" - BaseNewCtxFromTxFn = "ctxFromTx" + BasePushCaptureFn = "pushCapture" BaseNewTxFn = "newTransaction" BasePersistentFn = "persistent" BaseStateBlockNumberFn = "stateBlockNumber" - BaseSwapCtxFn = "swapCtx" + BaseVerifyFn = "verify" + BaseVerifyOkFn = "verifyOk" - CptLogDbFn = "cpt/logDb" - CptRecorderFn = "cpt/recorder" - CptForgetFn = "cpt/forget" + CptKvtLogFn = "kvtLog" + CptLevelFn = "level" + CptPopFn = "pop" + CptStopCaptureFn = "stopCapture" CtxForgetFn = "ctx/forget" CtxGetAccountsFn = "getAccounts" CtxGetGenericFn = "getGeneric" + CtxNewCtxByKeyFn = "newCtxByKey" + CtxSwapCtxFn = "swapCtx" KvtDelFn = "del" KvtGetFn = "get" @@ -79,6 +84,7 @@ type MptForgetFn = "mpt/forget" MptHasPathFn = "mpt/hasPath" MptMergeFn = "mpt/merge" + MptProofFn = "mpt/proof" MptPairsIt = "mpt/pairs" MptReplicateIt = "mpt/replicate" MptStateFn = "mpt/state" @@ -121,6 +127,10 @@ func toStr(rc: CoreDbRc[Blob]): string = if rc.isOk: "ok(Blob[" & $rc.value.len & "])" else: "err(" & rc.error.toStr & ")" +func toStr(rc: CoreDbRc[seq[Blob]]): string = + if rc.isOk: "ok([" & rc.value.mapIt("[#" & $it.len & "]").join(",") & "])" + else: "err(" & rc.error.toStr & ")" + func toStr(rc: CoreDbRc[Hash256]): string = if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")" diff --git a/nimbus/db/core_db/base/base_desc.nim b/nimbus/db/core_db/base/base_desc.nim index be660d3988..8ad2c0bb5f 100644 --- a/nimbus/db/core_db/base/base_desc.nim +++ b/nimbus/db/core_db/base/base_desc.nim @@ -54,6 +54,8 @@ type HashNotAvailable KvtNotFound MptNotFound + ProofCreate + ProofVerify RlpException StoNotFound TxPending diff --git a/nimbus/db/core_db/base/base_helpers.nim b/nimbus/db/core_db/base/base_helpers.nim index 1a41b4ef7c..1b7a0aef41 100644 --- a/nimbus/db/core_db/base/base_helpers.nim +++ b/nimbus/db/core_db/base/base_helpers.nim @@ -51,10 +51,10 @@ proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbMptRef | CoreDbTxRef): auto = # ------------------------------------------------------------------------------ template kvt*(dsc: CoreDbKvtRef): KvtDbRef = - dsc.distinctBase.kvt + CoreDbCtxRef(dsc).kvt template ctx*(kvt: CoreDbKvtRef): CoreDbCtxRef = - kvt.distinctBase + CoreDbCtxRef(kvt) # --------------- @@ -65,7 +65,7 @@ template call*(api: KvtApiRef; fn: untyped; args: varArgs[untyped]): untyped = fn(args) template call*(kvt: CoreDbKvtRef; fn: untyped; args: varArgs[untyped]): untyped = - kvt.distinctBase.parent.kvtApi.call(fn, args) + CoreDbCtxRef(kvt).parent.kvtApi.call(fn, args) # --------------- @@ -81,13 +81,13 @@ func toError*(e: KvtError; s: string; error = Unspecified): CoreDbError = # ------------------------------------------------------------------------------ template mpt*(dsc: CoreDbAccRef | CoreDbMptRef): AristoDbRef = - dsc.distinctBase.mpt + CoreDbCtxRef(dsc).mpt template mpt*(tx: CoreDbTxRef): AristoDbRef = tx.ctx.mpt template ctx*(acc: CoreDbAccRef): CoreDbCtxRef = - acc.distinctBase + CoreDbCtxRef(acc) # --------------- @@ -102,7 +102,7 @@ template call*( fn: untyped; args: varArgs[untyped]; ): untyped = - acc.distinctBase.parent.ariApi.call(fn, args) + CoreDbCtxRef(acc).parent.ariApi.call(fn, args) # --------------- diff --git a/nimbus/db/kvt/kvt_api.nim b/nimbus/db/kvt/kvt_api.nim index cf05eaf5af..f1cad133e0 100644 --- a/nimbus/db/kvt/kvt_api.nim +++ b/nimbus/db/kvt/kvt_api.nim @@ -365,8 +365,9 @@ func init*( data.list[KvtApiProfBeLenKvpFn.ord].masked = true beDup.putKvpFn = - proc(a: PutHdlRef; b: openArray[(Blob,Blob)]) = - be.putKvpFn(a,b) + proc(a: PutHdlRef; b, c: openArray[byte]) = + KvtApiProfBePutKvpFn.profileRunner: + be.putKvpFn(a, b, c) data.list[KvtApiProfBePutKvpFn.ord].masked = true beDup.putEndFn = diff --git a/nimbus/evm/stack.nim b/nimbus/evm/stack.nim index 8fe0d8944e..76d405d0ee 100644 --- a/nimbus/evm/stack.nim +++ b/nimbus/evm/stack.nim @@ -41,7 +41,7 @@ template toStackElem(v: EvmStackInts, elem: EvmStackElement) = template toStackElem(v: EthAddress, elem: EvmStackElement) = elem.initFromBytesBE(v) -template toStackElem(v: MDigest, elem: EvmStackElement) = +template toStackElem(v: Hash256, elem: EvmStackElement) = elem.initFromBytesBE(v.data) template toStackElem(v: openArray[byte], elem: EvmStackElement) = @@ -254,4 +254,3 @@ template unaryAddress*(stack: EvmStack, unOp): EvmResultVoid = EvmResultVoid.ok() else: EvmResultVoid.err(stackErr(StackInsufficient)) - \ No newline at end of file diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index e456fa2ae6..6dbd88135d 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -243,8 +243,6 @@ proc run(nimbus: NimbusNode, conf: NimbusConf) = defer: com.db.finish() - com.initializeEmptyDb() - case conf.cmd of NimbusCmd.`import`: importBlocks(conf, com) diff --git a/nimbus/rpc.nim b/nimbus/rpc.nim index 1d0391bb19..de84341a56 100644 --- a/nimbus/rpc.nim +++ b/nimbus/rpc.nim @@ -149,7 +149,8 @@ proc addHandler(handlers: var seq[RpcHandlerProc], proc addHttpServices(handlers: var seq[RpcHandlerProc], nimbus: NimbusNode, conf: NimbusConf, com: CommonRef, oracle: Oracle, - protocols: set[ProtocolFlag]) = + protocols: set[ProtocolFlag], + address: TransportAddress) = # The order is important: graphql, ws, rpc # graphql depends on /graphl path @@ -160,6 +161,7 @@ proc addHttpServices(handlers: var seq[RpcHandlerProc], let ctx = setupGraphqlContext(com, nimbus.ethNode, nimbus.txPool) let server = GraphqlHttpHandlerRef.new(ctx) handlers.addHandler(server) + info "GraphQL API enabled", url = "http://" & $address if conf.wsEnabled: let server = newRpcWebsocketHandler() @@ -167,6 +169,7 @@ proc addHttpServices(handlers: var seq[RpcHandlerProc], if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth installRPC(server, nimbus, conf, com, oracle, rpcFlags) handlers.addHandler(server) + info "JSON-RPC WebSocket API enabled", url = "ws://" & $address if conf.rpcEnabled: let server = newRpcHttpHandler() @@ -174,10 +177,12 @@ proc addHttpServices(handlers: var seq[RpcHandlerProc], if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth installRPC(server, nimbus, conf, com, oracle, rpcFlags) handlers.addHandler(server) + info "JSON-RPC API enabled", url = "http://" & $address proc addEngineApiServices(handlers: var seq[RpcHandlerProc], nimbus: NimbusNode, conf: NimbusConf, - com: CommonRef, oracle: Oracle,) = + com: CommonRef, oracle: Oracle, + address: TransportAddress) = # The order is important: ws, rpc @@ -186,16 +191,19 @@ proc addEngineApiServices(handlers: var seq[RpcHandlerProc], setupEngineAPI(nimbus.beaconEngine, server) installRPC(server, nimbus, conf, com, oracle, {RpcFlag.Eth}) handlers.addHandler(server) + info "Engine WebSocket API enabled", url = "ws://" & $address if conf.engineApiEnabled: let server = newRpcHttpHandler() setupEngineAPI(nimbus.beaconEngine, server) installRPC(server, nimbus, conf, com, oracle, {RpcFlag.Eth}) handlers.addHandler(server) + info "Engine API enabled", url = "http://" & $address proc addServices(handlers: var seq[RpcHandlerProc], nimbus: NimbusNode, conf: NimbusConf, - com: CommonRef, oracle: Oracle, protocols: set[ProtocolFlag]) = + com: CommonRef, oracle: Oracle, protocols: set[ProtocolFlag], + address: TransportAddress) = # The order is important: graphql, ws, rpc @@ -203,18 +211,24 @@ proc addServices(handlers: var seq[RpcHandlerProc], let ctx = setupGraphqlContext(com, nimbus.ethNode, nimbus.txPool) let server = GraphqlHttpHandlerRef.new(ctx) handlers.addHandler(server) + info "GraphQL API enabled", url = "http://" & $address if conf.wsEnabled or conf.engineApiWsEnabled: let server = newRpcWebsocketHandler() if conf.engineApiWsEnabled: setupEngineAPI(nimbus.beaconEngine, server) + if not conf.wsEnabled: installRPC(server, nimbus, conf, com, oracle, {RpcFlag.Eth}) + info "Engine WebSocket API enabled", url = "ws://" & $address + if conf.wsEnabled: var rpcFlags = conf.getWsFlags() if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth installRPC(server, nimbus, conf, com, oracle, rpcFlags) + info "JSON-RPC WebSocket API enabled", url = "ws://" & $address + handlers.addHandler(server) if conf.rpcEnabled or conf.engineApiEnabled: @@ -224,14 +238,22 @@ proc addServices(handlers: var seq[RpcHandlerProc], if not conf.rpcEnabled: installRPC(server, nimbus, conf, com, oracle, {RpcFlag.Eth}) + info "Engine API enabled", url = "http://" & $address + if conf.rpcEnabled: var rpcFlags = conf.getRpcFlags() if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth installRPC(server, nimbus, conf, com, oracle, rpcFlags) + + info "JSON-RPC API enabled", url = "http://" & $address + handlers.addHandler(server) proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, com: CommonRef, protocols: set[ProtocolFlag]) = + if not conf.engineApiEnabled: + warn "Engine API disabled, the node will not respond to consensus client updates (enable with `--engine-api`)" + if not conf.serverEnabled: return @@ -252,10 +274,10 @@ proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, oracle = Oracle.new(com) if conf.combinedServer: - let hooks = @[jwtAuthHook, corsHook] + let hooks: seq[RpcAuthHook] = @[jwtAuthHook, corsHook] var handlers: seq[RpcHandlerProc] - handlers.addServices(nimbus, conf, com, oracle, protocols) let address = initTAddress(conf.httpAddress, conf.httpPort) + handlers.addServices(nimbus, conf, com, oracle, protocols, address) let res = newHttpServerWithParams(address, hooks, handlers) if res.isErr: fatal "Cannot create RPC server", msg=res.error @@ -267,8 +289,8 @@ proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, if conf.httpServerEnabled: let hooks = @[corsHook] var handlers: seq[RpcHandlerProc] - handlers.addHttpServices(nimbus, conf, com, oracle, protocols) let address = initTAddress(conf.httpAddress, conf.httpPort) + handlers.addHttpServices(nimbus, conf, com, oracle, protocols, address) let res = newHttpServerWithParams(address, hooks, handlers) if res.isErr: fatal "Cannot create RPC server", msg=res.error @@ -279,13 +301,11 @@ proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, if conf.engineApiServerEnabled: let hooks = @[jwtAuthHook, corsHook] var handlers: seq[RpcHandlerProc] - handlers.addEngineApiServices(nimbus, conf, com, oracle) let address = initTAddress(conf.engineApiAddress, conf.engineApiPort) + handlers.addEngineApiServices(nimbus, conf, com, oracle, address) let res = newHttpServerWithParams(address, hooks, handlers) if res.isErr: fatal "Cannot create RPC server", msg=res.error quit(QuitFailure) nimbus.engineApiServer = res.get nimbus.engineApiServer.start() - -{.pop.} diff --git a/nimbus/rpc/experimental.nim b/nimbus/rpc/experimental.nim index 280c33db84..27926ca779 100644 --- a/nimbus/rpc/experimental.nim +++ b/nimbus/rpc/experimental.nim @@ -35,7 +35,7 @@ proc getMultiKeys*( com: CommonRef, blockHeader: BlockHeader, statePostExecution: bool): MultiKeysRef - {.raises: [RlpError, BlockNotFound, ValueError].} = + {.raises: [BlockNotFound, ValueError].} = let chainDB = com.db diff --git a/nimbus/rpc/jwt_auth.nim b/nimbus/rpc/jwt_auth.nim index 089cb3aaf7..776c2bff5e 100644 --- a/nimbus/rpc/jwt_auth.nim +++ b/nimbus/rpc/jwt_auth.nim @@ -222,6 +222,7 @@ proc jwtSharedSecret*( try: let newSecret = rndSecret() jwtSecretPath.writeFile(newSecret.JwtSharedKeyRaw.to0xHex) + notice "JWT secret generated", jwtSecretPath return ok(newSecret) except IOError as e: # Allow continuing to run, though this is effectively fatal for a merge @@ -240,6 +241,7 @@ proc jwtSharedSecret*( let rc = key.fromHex(lines[0]) if rc.isErr: return err(rc.error) + info "JWT secret loaded", jwtSecretPath = config.jwtSecret.get.string return ok(key) except IOError: return err(jwtKeyFileCannotOpen) diff --git a/nimbus/sync/handlers/eth.nim b/nimbus/sync/handlers/eth.nim index b84dadf496..f0f88a0b42 100644 --- a/nimbus/sync/handlers/eth.nim +++ b/nimbus/sync/handlers/eth.nim @@ -11,15 +11,14 @@ {.push raises: [].} import - std/[tables, times, hashes, sets, sequtils], + std/[tables, times, hashes, sets], chronicles, chronos, stew/endians2, eth/p2p, eth/p2p/peer_pool, ".."/[types, protocol], ../protocol/eth/eth_types, - ../protocol/trace_config, # gossip noise control - ../../core/[chain, tx_pool, tx_pool/tx_item] + ../../core/[chain, tx_pool] logScope: topics = "eth-wire" @@ -27,47 +26,22 @@ logScope: type HashToTime = TableRef[Hash256, Time] - NewBlockHandler* = proc( - arg: pointer, - peer: Peer, - blk: EthBlock, - totalDifficulty: DifficultyInt) {. - gcsafe, raises: [CatchableError].} - - NewBlockHashesHandler* = proc( - arg: pointer, - peer: Peer, - hashes: openArray[NewBlockHashesAnnounce]) {. - gcsafe, raises: [CatchableError].} - - NewBlockHandlerPair = object - arg: pointer - handler: NewBlockHandler - - NewBlockHashesHandlerPair = object - arg: pointer - handler: NewBlockHashesHandler - - EthWireRunState = enum - Enabled - Suspended - NotAvailable - EthWireRef* = ref object of EthWireBase db: CoreDbRef chain: ForkedChainRef txPool: TxPoolRef peerPool: PeerPool - enableTxPool: EthWireRunState knownByPeer: Table[Peer, HashToTime] pending: HashSet[Hash256] lastCleanup: Time - newBlockHandler: NewBlockHandlerPair - newBlockHashesHandler: NewBlockHashesHandlerPair const - NUM_PEERS_REBROADCAST_QUOTIENT = 4 - POOLED_STORAGE_TIME_LIMIT = initDuration(minutes = 20) + txpool_enabled = defined(enable_txpool_in_synchronizer) + +when txpool_enabled: + const + NUM_PEERS_REBROADCAST_QUOTIENT = 4 + POOLED_STORAGE_TIME_LIMIT = initDuration(minutes = 20) # ------------------------------------------------------------------------------ # Private functions: helper functions @@ -110,169 +84,169 @@ proc blockHeader(db: CoreDbRef, proc hash(peer: Peer): hashes.Hash {.used.} = hash(peer.remote) -proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] = - # do not send back tx or txhash to thisPeer - for peer in peers(ctx.peerPool): - if peer != thisPeer: - result.add peer - -proc cleanupKnownByPeer(ctx: EthWireRef) = - let now = getTime() - var tmp = HashSet[Hash256]() - for _, map in ctx.knownByPeer: - for hash, time in map: - if time - now >= POOLED_STORAGE_TIME_LIMIT: - tmp.incl hash - for hash in tmp: - map.del(hash) - tmp.clear() - - var tmpPeer = HashSet[Peer]() - for peer, map in ctx.knownByPeer: - if map.len == 0: - tmpPeer.incl peer - - for peer in tmpPeer: - ctx.knownByPeer.del peer - - ctx.lastCleanup = now - -proc addToKnownByPeer(ctx: EthWireRef, txHashes: openArray[Hash256], peer: Peer) = - var map: HashToTime - ctx.knownByPeer.withValue(peer, val) do: - map = val[] - do: - map = newTable[Hash256, Time]() - ctx.knownByPeer[peer] = map - - for txHash in txHashes: - if txHash notin map: - map[txHash] = getTime() - -proc addToKnownByPeer(ctx: EthWireRef, - txHashes: openArray[Hash256], - peer: Peer, - newHashes: var seq[Hash256]) = - var map: HashToTime - ctx.knownByPeer.withValue(peer, val) do: - map = val[] - do: - map = newTable[Hash256, Time]() - ctx.knownByPeer[peer] = map - - newHashes = newSeqOfCap[Hash256](txHashes.len) - for txHash in txHashes: - if txHash notin map: - map[txHash] = getTime() - newHashes.add txHash +when txpool_enabled: + proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] = + # do not send back tx or txhash to thisPeer + for peer in peers(ctx.peerPool): + if peer != thisPeer: + result.add peer + + proc cleanupKnownByPeer(ctx: EthWireRef) = + let now = getTime() + var tmp = HashSet[Hash256]() + for _, map in ctx.knownByPeer: + for hash, time in map: + if time - now >= POOLED_STORAGE_TIME_LIMIT: + tmp.incl hash + for hash in tmp: + map.del(hash) + tmp.clear() + + var tmpPeer = HashSet[Peer]() + for peer, map in ctx.knownByPeer: + if map.len == 0: + tmpPeer.incl peer + + for peer in tmpPeer: + ctx.knownByPeer.del peer + + ctx.lastCleanup = now + + proc addToKnownByPeer(ctx: EthWireRef, txHashes: openArray[Hash256], peer: Peer) = + var map: HashToTime + ctx.knownByPeer.withValue(peer, val) do: + map = val[] + do: + map = newTable[Hash256, Time]() + ctx.knownByPeer[peer] = map + + for txHash in txHashes: + if txHash notin map: + map[txHash] = getTime() + + proc addToKnownByPeer(ctx: EthWireRef, + txHashes: openArray[Hash256], + peer: Peer, + newHashes: var seq[Hash256]) = + var map: HashToTime + ctx.knownByPeer.withValue(peer, val) do: + map = val[] + do: + map = newTable[Hash256, Time]() + ctx.knownByPeer[peer] = map + + newHashes = newSeqOfCap[Hash256](txHashes.len) + for txHash in txHashes: + if txHash notin map: + map[txHash] = getTime() + newHashes.add txHash # ------------------------------------------------------------------------------ # Private functions: async workers # ------------------------------------------------------------------------------ -proc sendNewTxHashes(ctx: EthWireRef, - txHashes: seq[Hash256], - peers: seq[Peer]): Future[void] {.async.} = - try: - for peer in peers: - # Add to known tx hashes and get hashes still to send to peer - var hashesToSend: seq[Hash256] - ctx.addToKnownByPeer(txHashes, peer, hashesToSend) - - # Broadcast to peer if at least 1 new tx hash to announce - if hashesToSend.len > 0: - # Currently only one protocol version is available as compiled - when ethVersion == 68: - await newPooledTransactionHashes( - peer, - 1u8.repeat hashesToSend.len, # type - 0.repeat hashesToSend.len, # sizes - hashesToSend) - else: - await newPooledTransactionHashes(peer, hashesToSend) - - except TransportError: - debug "Transport got closed during sendNewTxHashes" - except CatchableError as e: - debug "Exception in sendNewTxHashes", exc = e.name, err = e.msg - -proc sendTransactions(ctx: EthWireRef, +when txpool_enabled: + proc sendNewTxHashes(ctx: EthWireRef, txHashes: seq[Hash256], - txs: seq[Transaction], peers: seq[Peer]): Future[void] {.async.} = - try: - for peer in peers: - # This is used to avoid re-sending along pooledTxHashes - # announcements/re-broadcasts - ctx.addToKnownByPeer(txHashes, peer) - await peer.transactions(txs) - - except TransportError: - debug "Transport got closed during sendTransactions" - except CatchableError as e: - debug "Exception in sendTransactions", exc = e.name, err = e.msg - -proc fetchTransactions(ctx: EthWireRef, reqHashes: seq[Hash256], peer: Peer): Future[void] {.async.} = - debug "fetchTx: requesting txs", - number = reqHashes.len - - try: - - let res = await peer.getPooledTransactions(reqHashes) - if res.isNone: - error "not able to get pooled transactions" + try: + for peer in peers: + # Add to known tx hashes and get hashes still to send to peer + var hashesToSend: seq[Hash256] + ctx.addToKnownByPeer(txHashes, peer, hashesToSend) + + # Broadcast to peer if at least 1 new tx hash to announce + if hashesToSend.len > 0: + # Currently only one protocol version is available as compiled + when ethVersion == 68: + await newPooledTransactionHashes( + peer, + 1u8.repeat hashesToSend.len, # type + 0.repeat hashesToSend.len, # sizes + hashesToSend) + else: + await newPooledTransactionHashes(peer, hashesToSend) + + except TransportError: + debug "Transport got closed during sendNewTxHashes" + except CatchableError as e: + debug "Exception in sendNewTxHashes", exc = e.name, err = e.msg + + proc sendTransactions(ctx: EthWireRef, + txHashes: seq[Hash256], + txs: seq[Transaction], + peers: seq[Peer]): Future[void] {.async.} = + try: + for peer in peers: + # This is used to avoid re-sending along pooledTxHashes + # announcements/re-broadcasts + ctx.addToKnownByPeer(txHashes, peer) + await peer.transactions(txs) + + except TransportError: + debug "Transport got closed during sendTransactions" + except CatchableError as e: + debug "Exception in sendTransactions", exc = e.name, err = e.msg + + proc fetchTransactions(ctx: EthWireRef, reqHashes: seq[Hash256], peer: Peer): Future[void] {.async.} = + debug "fetchTx: requesting txs", + number = reqHashes.len + + try: + + let res = await peer.getPooledTransactions(reqHashes) + if res.isNone: + error "not able to get pooled transactions" + return + + let txs = res.get() + debug "fetchTx: received requested txs", + number = txs.transactions.len + + # Remove from pending list regardless if tx is in result + for tx in txs.transactions: + let txHash = rlpHash(tx) + ctx.pending.excl txHash + + ctx.txPool.add(txs.transactions) + + except TransportError: + debug "Transport got closed during fetchTransactions" + return + except CatchableError as e: + debug "Exception in fetchTransactions", exc = e.name, err = e.msg return - let txs = res.get() - debug "fetchTx: received requested txs", - number = txs.transactions.len - - # Remove from pending list regardless if tx is in result - for tx in txs.transactions: - let txHash = rlpHash(tx) - ctx.pending.excl txHash - - ctx.txPool.add(txs.transactions) - - except TransportError: - debug "Transport got closed during fetchTransactions" - return - except CatchableError as e: - debug "Exception in fetchTransactions", exc = e.name, err = e.msg - return - - var newTxHashes = newSeqOfCap[Hash256](reqHashes.len) - for txHash in reqHashes: - if ctx.txPool.inPoolAndOk(txHash): - newTxHashes.add txHash + var newTxHashes = newSeqOfCap[Hash256](reqHashes.len) + for txHash in reqHashes: + if ctx.txPool.inPoolAndOk(txHash): + newTxHashes.add txHash - let peers = ctx.getPeers(peer) - if peers.len == 0 or newTxHashes.len == 0: - return + let peers = ctx.getPeers(peer) + if peers.len == 0 or newTxHashes.len == 0: + return - await ctx.sendNewTxHashes(newTxHashes, peers) + await ctx.sendNewTxHashes(newTxHashes, peers) # ------------------------------------------------------------------------------ # Private functions: peer observer # ------------------------------------------------------------------------------ proc onPeerConnected(ctx: EthWireRef, peer: Peer) = - if ctx.enableTxPool != Enabled: - when trMissingOrDisabledGossipOk: - notEnabled("onPeerConnected") - return - - var txHashes = newSeqOfCap[Hash256](ctx.txPool.numTxs) - for txHash, item in okPairs(ctx.txPool): - txHashes.add txHash + when txpool_enabled: + var txHashes = newSeqOfCap[Hash256](ctx.txPool.numTxs) + for txHash, item in okPairs(ctx.txPool): + txHashes.add txHash - if txHashes.len == 0: - return + if txHashes.len == 0: + return - debug "announce tx hashes to newly connected peer", - number = txHashes.len + debug "announce tx hashes to newly connected peer", + number = txHashes.len - asyncSpawn ctx.sendNewTxHashes(txHashes, @[peer]) + asyncSpawn ctx.sendNewTxHashes(txHashes, @[peer]) + else: + discard proc onPeerDisconnected(ctx: EthWireRef, peer: Peer) = debug "remove peer from knownByPeer", @@ -304,43 +278,11 @@ proc new*(_: type EthWireRef, chain: chain, txPool: txPool, peerPool: peerPool, - enableTxPool: Enabled, lastCleanup: getTime()) - if txPool.isNil: - ctx.enableTxPool = NotAvailable - when trMissingOrDisabledGossipOk: - trace "New eth handler, minimal/outbound support only" ctx.setupPeerObserver() ctx -# ------------------------------------------------------------------------------ -# Public functions: callbacks setters -# ------------------------------------------------------------------------------ - -proc setNewBlockHandler*(ctx: EthWireRef, handler: NewBlockHandler, arg: pointer) = - ctx.newBlockHandler = NewBlockHandlerPair( - arg: arg, - handler: handler - ) - -proc setNewBlockHashesHandler*(ctx: EthWireRef, handler: NewBlockHashesHandler, arg: pointer) = - ctx.newBlockHashesHandler = NewBlockHashesHandlerPair( - arg: arg, - handler: handler - ) - -# ------------------------------------------------------------------------------ -# Public getters/setters -# ------------------------------------------------------------------------------ - -proc `txPoolEnabled=`*(ctx: EthWireRef; ena: bool) {.gcsafe, raises: [].} = - if ctx.enableTxPool != NotAvailable: - ctx.enableTxPool = if ena: Enabled else: Suspended - -proc txPoolEnabled*(ctx: EthWireRef): bool {.gcsafe, raises: [].} = - ctx.enableTxPool == Enabled - # ------------------------------------------------------------------------------ # Public functions: eth wire protocol handlers # ------------------------------------------------------------------------------ @@ -391,15 +333,20 @@ method getPooledTxs*(ctx: EthWireRef, hashes: openArray[Hash256]): Result[seq[PooledTransaction], string] {.gcsafe.} = - let txPool = ctx.txPool - var list: seq[PooledTransaction] - for txHash in hashes: - let res = txPool.getItem(txHash) - if res.isOk: - list.add res.value.pooledTx - else: - trace "handlers.getPooledTxs: tx not found", txHash - ok(list) + + when txpool_enabled: + let txPool = ctx.txPool + var list: seq[PooledTransaction] + for txHash in hashes: + let res = txPool.getItem(txHash) + if res.isOk: + list.add res.value.pooledTx + else: + trace "handlers.getPooledTxs: tx not found", txHash + ok(list) + else: + var list: seq[PooledTransaction] + ok(list) method getBlockBodies*(ctx: EthWireRef, hashes: openArray[Hash256]): @@ -446,55 +393,53 @@ method handleAnnouncedTxs*(ctx: EthWireRef, Result[void, string] {.gcsafe.} = - try: - if ctx.enableTxPool != Enabled: - when trMissingOrDisabledGossipOk: - notEnabled("handleAnnouncedTxs") - return ok() + when txpool_enabled: + try: + if txs.len == 0: + return ok() - if txs.len == 0: - return ok() + debug "received new transactions", + number = txs.len - debug "received new transactions", - number = txs.len - - if ctx.lastCleanup - getTime() > POOLED_STORAGE_TIME_LIMIT: - ctx.cleanupKnownByPeer() - - var txHashes = newSeqOfCap[Hash256](txs.len) - for tx in txs: - txHashes.add rlpHash(tx) - - ctx.addToKnownByPeer(txHashes, peer) - for tx in txs: - if tx.versionedHashes.len > 0: - # EIP-4844 blobs are not persisted and cannot be broadcasted - continue - ctx.txPool.add PooledTransaction(tx: tx) - - var newTxHashes = newSeqOfCap[Hash256](txHashes.len) - var validTxs = newSeqOfCap[Transaction](txHashes.len) - for i, txHash in txHashes: - # Nodes must not automatically broadcast blob transactions to - # their peers. per EIP-4844 spec - if ctx.txPool.inPoolAndOk(txHash) and txs[i].txType != TxEip4844: - newTxHashes.add txHash - validTxs.add txs[i] + if ctx.lastCleanup - getTime() > POOLED_STORAGE_TIME_LIMIT: + ctx.cleanupKnownByPeer() - let - peers = ctx.getPeers(peer) - numPeers = peers.len - sendFull = max(1, numPeers div NUM_PEERS_REBROADCAST_QUOTIENT) + var txHashes = newSeqOfCap[Hash256](txs.len) + for tx in txs: + txHashes.add rlpHash(tx) - if numPeers == 0 or validTxs.len == 0: + ctx.addToKnownByPeer(txHashes, peer) + for tx in txs: + if tx.versionedHashes.len > 0: + # EIP-4844 blobs are not persisted and cannot be broadcasted + continue + ctx.txPool.add PooledTransaction(tx: tx) + + var newTxHashes = newSeqOfCap[Hash256](txHashes.len) + var validTxs = newSeqOfCap[Transaction](txHashes.len) + for i, txHash in txHashes: + # Nodes must not automatically broadcast blob transactions to + # their peers. per EIP-4844 spec + if ctx.txPool.inPoolAndOk(txHash) and txs[i].txType != TxEip4844: + newTxHashes.add txHash + validTxs.add txs[i] + + let + peers = ctx.getPeers(peer) + numPeers = peers.len + sendFull = max(1, numPeers div NUM_PEERS_REBROADCAST_QUOTIENT) + + if numPeers == 0 or validTxs.len == 0: + return ok() + + asyncSpawn ctx.sendTransactions(txHashes, validTxs, peers[0.. " & $rc.error + + # Save keys to database + for (rvid,key) in ps.vkPairs: + ps.db.layersPutKey(rvid, key) + + # Make sure all is OK + block: + let rc = ps.check() + if rc.isErr: raiseAssert info & ": check => " & $rc.error + + +proc preLoadAristoDb(jKvp: JsonNode): PartStateRef = + const info = "preLoadAristoDb" + let ps = PartStateRef.init AristoDbRef.init() + + # Collect rlp-encodede node blobs + var proof: seq[Blob] + for (k,v) in jKvp.pairs: + let + key = hexToSeqByte(k) + val = hexToSeqByte(v.getStr()) + if key.len == 32: + doAssert key == val.keccakHash.data + if val != @[0x80u8]: # Exclude empty item + proof.add val + + ps.createPartDb(proof, info) + ps + + +proc collectAddresses(node: JsonNode, collect: var HashSet[EthAddress]) = + case node.kind: + of JObject: + for k,v in node.pairs: + if k == "address" and v.kind == JString: + collect.incl EthAddress.fromHex v.getStr + else: + v.collectAddresses collect + of JArray: + for v in node.items: + v.collectAddresses collect + else: + discard + + +proc payloadAsBlob(pyl: LeafPayload; ps: PartStateRef): Blob = + ## Modified function `aristo_serialise.serialise()`. + ## + const info = "payloadAsBlob" + case pyl.pType: + of RawData: + pyl.rawBlob + of AccountData: + let key = block: + if pyl.stoID.isValid: + let rc = ps.db.getKeyRc (VertexID(1),pyl.stoID.vid) + if rc.isErr: + raiseAssert info & ": getKey => " & $rc.error + rc.value[0] + else: + VOID_HASH_KEY + + rlp.encode Account( + nonce: pyl.account.nonce, + balance: pyl.account.balance, + storageRoot: key.to(Hash256), + codeHash: pyl.account.codeHash) + of StoData: + rlp.encode pyl.stoData + + +func asExtension(b: Blob; path: Hash256): Blob = + var node = rlpFromBytes b + if node.listLen == 17: + let nibble = NibblesBuf.fromBytes(path.data)[0] + var wr = initRlpWriter() + + wr.startList(2) + wr.append NibblesBuf.fromBytes(@[nibble]).slice(1).toHexPrefix(isleaf=false) + wr.append node.listElem(nibble.int).toBytes + wr.finish() + + else: + b + +# ------------------------------------------------------------------------------ +# Private test functions +# ------------------------------------------------------------------------------ + +proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) = + const info = "testCreateProofTwig" + + # Create partial database + let ps = node["state"].preLoadAristoDb() + + # Collect addresses from json structure + var addresses: HashSet[EthAddress] + node.collectAddresses addresses + + # Convert addresses to valid paths (not all addresses might work) + var sample: Table[Hash256,ProofData] + for a in addresses: + let + path = a.keccakHash + rc = path.hikeUp(VertexID(1), ps.db) + sample[path] = ProofData( + error: (if rc.isErr: rc.error[1] else: AristoError(0)), + hike: rc.to(Hike)) # keep `hike` for potential debugging + + # Verify that there is somehing to do, at all + check 0 < sample.values.toSeq.filterIt(it.error == AristoError 0).len + + # Create proof chains + for (path,proof) in sample.pairs: + let rc = ps.db.partAccountTwig path + check rc.isOk == (proof.error == AristoError 0) + if rc.isOk: + proof.chain = rc.value + + # Verify proof chains + for (path,proof) in sample.pairs: + if proof.error == AristoError 0: + let + rVid = proof.hike.root + pyl = proof.hike.legs[^1].wp.vtx.lData.payloadAsBlob(ps) + + block: + # Use these root and chain + let chain = proof.chain + + # Create another partial database from tree + let pq = PartStateRef.init AristoDbRef.init() + pq.createPartDb(chain, info) + + # Create the same proof again which must result into the same as before + block: + let rc = pq.db.partAccountTwig path + check rc.isOk + if rc.isOk: + check rc.value == proof.chain + + # Verify proof + let root = pq.db.getKey((rVid,rVid)).to(Hash256) + block: + let rc = proof.chain.partUntwigPath(root, path) + check rc.isOk + if rc.isOk: + check rc.value == pyl + + # Just for completeness (same a above combined into a single function) + check proof.chain.partUntwigPathOk(root, path, pyl).isOk + + # Extension nodes are rare, so there is one created, inserted and the + # previous test repeated. + block: + let + ext = proof.chain[0].asExtension(path) + tail = @(proof.chain.toOpenArray(1,proof.chain.len-1)) + chain = @[ext] & tail + + # Create a third partial database from modified proof + let pq = PartStateRef.init AristoDbRef.init() + pq.createPartDb(chain, info) + + # Re-create proof again + block: + let rc = pq.db.partAccountTwig path + check rc.isOk + if rc.isOk: + check rc.value == chain + + let root = pq.db.getKey((rVid,rVid)).to(Hash256) + block: + let rc = chain.partUntwigPath(root, path) + check rc.isOk + if rc.isOk: + check rc.value == pyl + + check chain.partUntwigPathOk(root, path, pyl).isOk + +# ------------------------------------------------------------------------------ +# Test +# ------------------------------------------------------------------------------ + +suite "Encoding & verification of portal proof twigs for Aristo DB": + # Piggyback on tracer test suite environment + jsonTest("TracerTests", testCreatePortalProof) + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index 8e6822e9dc..0f9e373c1e 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -26,7 +26,6 @@ import aristo_get, aristo_hike, aristo_init/persistent, - aristo_layers, aristo_nearby, aristo_part, aristo_part/part_debug, @@ -42,9 +41,6 @@ type ## ( & "#" , (,)) const - MaxFilterBulk = 150_000 - ## Policy settig for `pack()` - testRootVid = VertexID(2) ## Need to reconfigure for the test, root ID 1 cannot be deleted as a trie @@ -121,26 +117,7 @@ proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} = db = AristoDbRef(nil) true -proc innerCleanUp(ps: var PartStateRef): bool {.discardable.} = - if not ps.isNil: - if not ps.db.innerCleanUp(): - return false - ps = PartStateRef(nil) - true - -proc schedStow( - db: AristoDbRef; # Database - ): Result[void,AristoError] = - ## Scheduled storage - let - layersMeter = db.nLayersVtx() + db.nLayersKey() - filterMeter = if db.balancer.isNil: 0 - else: db.balancer.sTab.len + db.balancer.kMap.len - persistent = MaxFilterBulk < max(layersMeter, filterMeter) - if persistent: - db.persist() - else: - db.stow() +# -------------------------------- proc saveToBackend( tx: var AristoTxRef; @@ -197,52 +174,6 @@ proc saveToBackend( true -proc saveToBackendWithOops( - tx: var AristoTxRef; - noisy: bool; - debugID: int; - oops: (int,AristoError); - ): bool = - var db = tx.to(AristoDbRef) - - # Verify context: nesting level must be 2 (i.e. two transactions) - xCheck tx.level == 2 - - # Commit and hashify the current layer - block: - let rc = tx.commit() - xCheckRc rc.error == 0 - - block: - let rc = db.txTop() - xCheckRc rc.error == 0 - tx = rc.value - - # Verify context: nesting level must be 1 (i.e. one transaction) - xCheck tx.level == 1 - - # Commit and save to backend - block: - let rc = tx.commit() - xCheckRc rc.error == 0 - - block: - let rc = db.txTop() - xCheckErr rc.value.level < 0 # force error - - block: - let rc = db.schedStow() - xCheckRc rc.error == 0: - noisy.say "***", "saveToBackendWithOops(8)", - " debugID=", debugID, - "\n db\n ", db.pp(backendOk=true), - "" - - # Update layers to original level - tx = db.txBegin().value.to(AristoDbRef).txBegin().value - - true - proc fwdWalkVerify( db: AristoDbRef; @@ -500,114 +431,6 @@ proc testTxMergeAndDeleteSubTree*( true - -proc testTxMergeProofAndKvpList*( - noisy: bool; - list: openArray[ProofTrieData]; - rdbPath: string; # Rocks DB storage directory - resetDb = false; - idPfx = ""; - oops: KnownHasherFailure = @[]; - ): bool = - let - oopsTab = oops.toTable - var - ps = PartStateRef(nil) - tx = AristoTxRef(nil) - rootKey: Hash256 - count = 0 - defer: - if not ps.isNil: - ps.db.finish(eradicate=true) - - for n,w in list: - - # Start new database upon request - if resetDb or w.root != rootKey or w.proof.len == 0: - ps.innerCleanUp() - let db = block: - # New DB with disabled filter slots management - if 0 < rdbPath.len: - let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() - let rc = AristoDbRef.init(RdbBackendRef, rdbPath, dbOpts, cfOpts, []) - xCheckRc rc.error == 0 - rc.value()[0] - else: - AristoDbRef.init(MemBackendRef) - ps = PartStateRef.init(db) - - # Start transaction (double frame for testing) - tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value - xCheck tx.isTop() - - # Update root - rootKey = w.root - count = 0 - count.inc - - let - db = ps.db - testId = idPfx & "#" & $w.id & "." & $n - runID = n - sTabLen = db.nLayersVtx() - leafs = w.kvpLst.mapRootVid testRootVid # merge into main trie - - if 0 < w.proof.len: - let rc = ps.partPut(w.proof, ForceGenericPayload) - xCheckRc rc.error == 0: - noisy.say "***", "testTxMergeProofAndKvpList (5)", - " <", n, "/", list.len-1, ">", - " runID=", runID, - " nGroup=", count, - " error=", rc.error, - " nProof=", w.proof.len, - "\n ps\n \n", ps.pp(), - "" - block: - let rc = ps.check() - xCheckRc rc.error == (0,0) - - when true and false: - noisy.say "***", "testTxMergeProofAndKvpList (6)", - " <", n, "/", list.len-1, ">", - " runID=", runID, - " nGroup=", count, - " nProof=", w.proof.len, - #"\n ps\n \n", ps.pp(), - "" - - for ltp in leafs: - block: - let rc = ps.partMergeGenericData( - ltp.leafTie.root, @(ltp.leafTie.path), ltp.payload.rawBlob) - xCheckRc rc.error == 0 - block: - let rc = ps.check() - xCheckRc rc.error == (0,0) - - when true and false: - noisy.say "***", "testTxMergeProofAndKvpList (7)", - " <", n, "/", list.len-1, ">", - " runID=", runID, - " nGroup=", count, - " nProof=", w.proof.len, - #"\n ps\n \n", ps.pp(), - "" - - block: - let - oops = oopsTab.getOrDefault(testId,(0,AristoError(0))) - saveBeOk = tx.saveToBackendWithOops(noisy=noisy, debugID=runID, oops) - xCheck saveBeOk - - when true and false: - noisy.say "***", "testTxMergeProofAndKvpList (9)", - " <", n, "/", list.len-1, ">", - " runID=", runID, - " nGroup=", count, " merged=", merged - - true - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/tests/test_beacon/setup_env.nim b/tests/test_beacon/setup_env.nim index 6859109e83..e517d6204b 100644 --- a/tests/test_beacon/setup_env.nim +++ b/tests/test_beacon/setup_env.nim @@ -69,7 +69,6 @@ proc setupEnv*(extraValidation: bool = false, ccm: CCModify = nil): TestEnv = ) chain = newForkedChain(com, com.genesisHeader, extraValidation = extraValidation) - com.initializeEmptyDb() TestEnv( conf : conf, chain: chain, diff --git a/tests/test_coredb.nim b/tests/test_coredb.nim index acd5f10852..613f2d091d 100644 --- a/tests/test_coredb.nim +++ b/tests/test_coredb.nim @@ -185,8 +185,6 @@ proc initRunnerDB( params = params, pruneHistory = pruneHistory) - result.initializeEmptyDb - setErrorLevel() when CoreDbEnableApiTracking: coreDB.trackCoreDbApi = false diff --git a/tests/test_forked_chain.nim b/tests/test_forked_chain.nim index d0e48961bc..a1a25d395e 100644 --- a/tests/test_forked_chain.nim +++ b/tests/test_forked_chain.nim @@ -34,16 +34,12 @@ proc setupEnv(): TestEnv = TestEnv(conf: conf) proc newCom(env: TestEnv): CommonRef = - let - com = CommonRef.new( + CommonRef.new( newCoreDbRef DefaultDbMemory, env.conf.networkId, env.conf.networkParams ) - com.initializeEmptyDb() - com - proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: EthBlock): EthBlock = template parent(): BlockHeader = parentBlk.header diff --git a/tests/test_graphql.nim b/tests/test_graphql.nim index f1a847ce57..ef1a026ad1 100644 --- a/tests/test_graphql.nim +++ b/tests/test_graphql.nim @@ -73,7 +73,6 @@ proc setupChain(): CommonRef = CustomNet, customNetwork ) - com.initializeEmptyDb() let blocks = jn["blocks"] var headers: seq[BlockHeader] diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index 8a5f61c333..5308f88049 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -102,7 +102,6 @@ proc initEnv(): TestEnv = conf.networkId, conf.networkParams ) - com.initializeEmptyDb() TestEnv( com : com, diff --git a/tests/test_merge.nim b/tests/test_merge.nim index 28df17f863..a2a425d3f7 100644 --- a/tests/test_merge.nim +++ b/tests/test_merge.nim @@ -71,8 +71,6 @@ proc runTest(steps: Steps) = ) chainRef = newChain(com) - com.initializeEmptyDb() - var rpcServer = newRpcSocketServer(["127.0.0.1:0"]) client = newRpcSocketClient() diff --git a/tests/test_rpc.nim b/tests/test_rpc.nim index 0c4f0b31de..b721fced03 100644 --- a/tests/test_rpc.nim +++ b/tests/test_rpc.nim @@ -238,7 +238,6 @@ proc rpcMain*() = debugEcho unlock.error doAssert(unlock.isOk) - com.initializeEmptyDb() let env = setupEnv(com, signer, ks2, ctx) # Create Ethereum RPCs diff --git a/tests/test_rpc_getproofs_track_state_changes.nim b/tests/test_rpc_getproofs_track_state_changes.nim index ee32952a23..e6dda6730b 100644 --- a/tests/test_rpc_getproofs_track_state_changes.nim +++ b/tests/test_rpc_getproofs_track_state_changes.nim @@ -121,7 +121,6 @@ proc rpcGetProofsTrackStateChangesMain*() = let com = CommonRef.new(newCoreDbRef( DefaultDbPersistent, DATABASE_PATH, DbOptions.init())) - com.initializeEmptyDb() let blockHeader = waitFor client.eth_getBlockByNumber(blockId(START_BLOCK), false) diff --git a/tests/test_tracer_json.nim b/tests/test_tracer_json.nim index afe95cb1d6..975d0059e5 100644 --- a/tests/test_tracer_json.nim +++ b/tests/test_tracer_json.nim @@ -9,16 +9,16 @@ # or distributed except according to those terms. import - std/[json, os, sets, tables, strutils], + std/[json, os, tables, strutils], stew/byteutils, chronicles, unittest2, results, ./test_helpers, - ../nimbus/sync/protocol/snap/snap_types, - ../nimbus/db/aristo/aristo_merge, - ../nimbus/db/kvt/kvt_utils, ../nimbus/db/aristo, + ../nimbus/db/aristo/[aristo_desc, aristo_layers, aristo_nearby, aristo_part], + ../nimbus/db/aristo/aristo_part/part_debug, + ../nimbus/db/kvt/kvt_utils, ../nimbus/[tracer, evm/types], ../nimbus/common/common @@ -28,14 +28,17 @@ proc setErrorLevel {.used.} = proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = ## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader + const + info = "preLoadAristoDb" var - proof: seq[SnapProof] # for pre-loading MPT - predRoot: Hash256 # from predecessor header - txRoot: Hash256 # header with block number `num` - rcptRoot: Hash256 # ditto + proof: seq[Blob] # for pre-loading MPT + predRoot: Hash256 # from predecessor header + txRoot: Hash256 # header with block number `num` + rcptRoot: Hash256 # ditto let - adb = cdb.mpt - kdb = cdb.kvt + adb = cdb.ctx.mpt # `Aristo` db + kdb = cdb.ctx.kvt # `Kvt` db + ps = PartStateRef.init adb # Partial DB descriptor # Fill KVT and collect `proof` data for (k,v) in jKvp.pairs: @@ -45,7 +48,7 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = if key.len == 32: doAssert key == val.keccakHash.data if val != @[0x80u8]: # Exclude empty item - proof.add SnapProof(val) + proof.add val else: if key[0] == 0: try: @@ -60,19 +63,62 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = discard check kdb.put(key, val).isOk - # TODO: `getColumn(CtXyy)` does not exists anymore. There is only the generic - # `MPT` left that can be retrieved with `getGeneric()`, optionally with - # argument `clearData=true` + # Set up production MPT + ps.partPut(proof, AutomaticPayload).isOkOr: + raiseAssert info & ": partPut => " & $error - # Install sub-trie roots onto production db + # Handle transaction sub-tree if txRoot.isValid: - doAssert adb.mergeProof(txRoot, VertexID(CtTxs)).isOk + var txs: seq[Transaction] + for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot): + let + inx = key.path.to(UInt256).truncate(uint) + tx = rlp.decode(pyl.rawBlob, Transaction) + # + # FIXME: Is this might be a bug in the test data? + # + # The single item test key is always `128`. For non-single test + # lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit + # number `N`.) + # + # Unless the `128` item value is put at the start of the argument + # list `txs[]` for `persistTransactions()`, the `tracer` module + # will throw an exception at + # `doAssert(transactions.calcTxRoot == header.txRoot)` in the + # function `traceTransactionImpl()`. + # + if (inx and 0x80) != 0: + txs = @[tx] & txs + else: + txs.add tx + cdb.persistTransactions(num, txRoot, txs) + + # Handle receipts sub-tree if rcptRoot.isValid: - doAssert adb.mergeProof(rcptRoot, VertexID(CtReceipts)).isOk - doAssert adb.mergeProof(predRoot, VertexID(CtAccounts)).isOk - - # Set up production MPT - doAssert adb.mergeProof(proof).isOk + var rcpts: seq[Receipt] + for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot): + let + inx = key.path.to(UInt256).truncate(uint) + rcpt = rlp.decode(pyl.rawBlob, Receipt) + # FIXME: See comment at `txRoot` section. + if (inx and 0x80) != 0: + rcpts = @[rcpt] & rcpts + else: + rcpts.add rcpt + cdb.persistReceipts(rcptRoot, rcpts) + + # Save keys to database + for (rvid,key) in ps.vkPairs: + adb.layersPutKey(rvid, key) + + ps.check().isOkOr: + raiseAssert info & ": check => " & $error + + #echo ">>> preLoadAristoDb (9)", + # "\n ps\n ", ps.pp(byKeyOk=false,byVidOk=false), + # "" + # ----------- + #if true: quit() # use tracerTestGen.nim to generate additional test data proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) = @@ -98,15 +144,25 @@ proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: C let stateDump = dumpBlockState(com, blk) let blockTrace = traceBlock(com, blk, {DisableState}) + # Fix hex representation + for inx in 0 ..< node["txTraces"].len: + for key in ["beforeRoot", "afterRoot"]: + # Here, `node["txTraces"]` stores a string while `txTraces` uses a + # `Hash256` which might expand to a didfferent upper/lower case. + var strHash = txTraces[inx]["stateDiff"][key].getStr.toUpperAscii + if strHash.len < 64: + strHash = '0'.repeat(64 - strHash.len) & strHash + txTraces[inx]["stateDiff"][key] = %(strHash) + check node["txTraces"] == txTraces check node["stateDump"] == stateDump check node["blockTrace"] == blockTrace + for i in 0 ..< receipts.len: let receipt = receipts[i] let stateDiff = txTraces[i]["stateDiff"] check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii() - proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) = node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory) diff --git a/tests/test_txpool.nim b/tests/test_txpool.nim index b2f4058c0e..0c1e98a04b 100644 --- a/tests/test_txpool.nim +++ b/tests/test_txpool.nim @@ -52,7 +52,7 @@ var statCount: array[TxItemStatus,int] # per status bucket txList: seq[TxItemRef] - effGasTips: seq[GasPriceEx] + effGasTips: seq[GasPrice] # Running block chain bcCom: CommonRef @@ -484,16 +484,16 @@ proc runTxPackerTests(noisy = true) = test "Calculate some non-trivial base fee": var - feesList = SortedSet[GasPriceEx,bool].init() + feesList = SortedSet[GasPrice,bool].init() # provide a sorted list of gas fees for item in txList: discard feesList.insert(item.tx.effectiveGasTip(0.GasPrice)) let - minKey = max(0, feesList.ge(GasPriceEx.low).value.key.int64) - lowKey = feesList.gt(minKey.GasPriceEx).value.key.uint64 - highKey = feesList.le(GasPriceEx.high).value.key.uint64 + minKey = max(0, feesList.ge(GasPrice.low).value.key.int64) + lowKey = feesList.gt(minKey.GasPrice).value.key.uint64 + highKey = feesList.le(GasPrice.high).value.key.uint64 keyRange = highKey - lowKey keyStep = max(1u64, keyRange div 500_000) @@ -505,7 +505,7 @@ proc runTxPackerTests(noisy = true) = # the following might throw an exception if the table is de-generated var nextKey = ntBaseFee for _ in [1, 2, 3]: - let rcNextKey = feesList.gt(nextKey.GasPriceEx) + let rcNextKey = feesList.gt(nextKey.GasPrice) check rcNextKey.isOk nextKey = rcNextKey.value.key.uint64.GasPrice diff --git a/tests/test_txpool/helpers.nim b/tests/test_txpool/helpers.nim index 2058617e6e..24348d048a 100644 --- a/tests/test_txpool/helpers.nim +++ b/tests/test_txpool/helpers.nim @@ -10,8 +10,8 @@ import std/[os, strformat, sequtils, strutils, times], - ../../nimbus/core/tx_pool/[tx_chain, tx_desc, tx_item, tx_tabs], - ../../nimbus/core/tx_pool/tx_tasks/[tx_packer, tx_recover], + ../../nimbus/core/tx_pool/[tx_desc, tx_item, tx_tabs], + ../../nimbus/core/tx_pool/tx_tasks/[tx_recover], ../replay/[pp, undump_blocks_gz], chronicles, eth/[common, keys], @@ -22,21 +22,13 @@ import # to import `tx_pool/*` sup-modules export pp, - tx_chain.clearAccounts, - tx_chain.com, - tx_chain.nextFork, - tx_chain.vmState, - tx_desc.chain, tx_desc.txDB, - tx_desc.verify, - tx_packer.packerVmExec, tx_recover.recoverItem, tx_tabs.TxTabsRef, tx_tabs.decAccount, tx_tabs.dispose, tx_tabs.eq, tx_tabs.flushRejects, - tx_tabs.gasLimits, tx_tabs.ge, tx_tabs.gt, tx_tabs.incAccount, @@ -47,7 +39,6 @@ export tx_tabs.nItems, tx_tabs.reassign, tx_tabs.reject, - tx_tabs.verify, undumpBlocksGz const @@ -165,9 +156,6 @@ proc pp*(txs: openArray[Transaction]; pfxLen: int): string = proc pp*(w: TxTabsItemsCount): string = &"{w.pending}/{w.staged}/{w.packed}:{w.total}/{w.disposed}" -proc pp*(w: TxTabsGasTotals): string = - &"{w.pending}/{w.staged}/{w.packed}" - # ------------------------------------------------------------------------------ # Public functions, other # ------------------------------------------------------------------------------ diff --git a/tests/test_txpool/setup.nim b/tests/test_txpool/setup.nim index 8068a9ad1f..f3ea808202 100644 --- a/tests/test_txpool/setup.nim +++ b/tests/test_txpool/setup.nim @@ -96,7 +96,6 @@ proc setupTxPool*(getStatus: proc(): TxItemStatus): (CommonRef, TxPoolRef, int) conf.networkParams ) - com.initializeEmptyDb() let txPool = TxPoolRef.new(com) for n, tx in txEnv.txs: diff --git a/tests/test_txpool2.nim b/tests/test_txpool2.nim index e001620c7e..d24f38972c 100644 --- a/tests/test_txpool2.nim +++ b/tests/test_txpool2.nim @@ -16,9 +16,11 @@ import ../nimbus/core/chain, ../nimbus/[config, transaction, constants], ../nimbus/core/tx_pool, + ../nimbus/core/tx_pool/tx_desc, ../nimbus/core/casper, ../nimbus/common/common, ../nimbus/utils/utils, + ../nimbus/evm/types, ./test_txpool/helpers, ./macro_assembler @@ -38,7 +40,7 @@ type xp : TxPoolRef const - signerKeyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c" + # signerKeyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c" vaultKeyHex = "63b508a03c3b5937ceb903af8b1b0c191012ef6eb7e9c3fb7afa94e5d214d376" recipient = hexToByteArray[20]("0000000000000000000000000000000000000318") feeRecipient = hexToByteArray[20]("0000000000000000000000000000000000000212") @@ -110,8 +112,6 @@ proc initEnv(envFork: HardFork): TestEnv = ) chain = newChain(com) - com.initializeEmptyDb() - result = TestEnv( conf: conf, com: com, @@ -212,7 +212,8 @@ proc runTxPoolBlobhashTest() = check false return - blk = r.get.blk + let bundle = r.get + blk = bundle.blk check com.isBlockAfterTtd(blk.header) body = BlockBody( @@ -222,6 +223,14 @@ proc runTxPoolBlobhashTest() = ) check blk.txs.len == 2 + let + gasUsed1 = xp.vmState.receipts[0].cumulativeGasUsed + gasUsed2 = xp.vmState.receipts[1].cumulativeGasUsed - gasUsed1 + blockValue = gasUsed1.u256 * tx1.effectiveGasTip(blk.header.baseFeePerGas).u256 + + gasUsed2.u256 * tx2.effectiveGasTip(blk.header.baseFeePerGas).u256 + + check blockValue == bundle.blockValue + test "Blobhash persistBlocks": let rr = chain.persistBlocks([EthBlock.init(blk.header, body)]) check rr.isOk() diff --git a/vendor/nim-bncurve b/vendor/nim-bncurve index 9c10dec560..b88ed93a44 160000 --- a/vendor/nim-bncurve +++ b/vendor/nim-bncurve @@ -1 +1 @@ -Subproject commit 9c10dec5607bc52d176e05060ff288581e8144c9 +Subproject commit b88ed93a443c218d31206d61c3e632cb183034cc diff --git a/vendor/nim-eth b/vendor/nim-eth index ebfe63b9b6..56f72c7a66 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit ebfe63b9b6523a1823e4505f0972d81047a77cf5 +Subproject commit 56f72c7a664f8b3ca5938c7de0a95a8fed8c7efa diff --git a/vendor/portal-spec-tests b/vendor/portal-spec-tests index 4254dac8ce..92b5a99e74 160000 --- a/vendor/portal-spec-tests +++ b/vendor/portal-spec-tests @@ -1 +1 @@ -Subproject commit 4254dac8ce1cbe28fc4704d92aa6809c73451c20 +Subproject commit 92b5a99e748a964fcc5c59e0c6fef248ccdd88f4