diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index d68e297554a2ef..b9db91f68d3c0e 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -1,748 +1,748 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021-present The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test for assumeutxo, a means of quickly bootstrapping a node using -a serialized version of the UTXO set at a certain height, which corresponds -to a hash that has been compiled into bitcoind. - -The assumeutxo value generated and used here is committed to in -`CRegTestParams::m_assumeutxo_data` in `src/kernel/chainparams.cpp`. -""" -import time -from shutil import rmtree - -from dataclasses import dataclass -from test_framework.blocktools import ( - create_block, - create_coinbase -) -from test_framework.messages import ( - CBlockHeader, - from_hex, - msg_headers, - tx_from_hex -) -from test_framework.p2p import ( - P2PInterface, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_approx, - assert_equal, - assert_not_equal, - assert_raises_rpc_error, - sha256sum_file, - try_rpc, -) -from test_framework.wallet import ( - getnewdestination, - MiniWallet, -) - -START_HEIGHT = 199 -SNAPSHOT_BASE_HEIGHT = 299 -FINAL_HEIGHT = 399 -COMPLETE_IDX = {'synced': True, 'best_block_height': FINAL_HEIGHT} - - -class AssumeutxoTest(BitcoinTestFramework): - - def set_test_params(self): - """Use the pregenerated, deterministic chain up to height 199.""" - self.num_nodes = 4 - self.rpc_timeout = 120 - self.extra_args = [ - [], - ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"], - ["-persistmempool=0","-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"], - [] - ] - - def setup_network(self): - """Start with the nodes disconnected so that one can generate a snapshot - including blocks the other hasn't yet seen.""" - self.add_nodes(4) - self.start_nodes(extra_args=self.extra_args) - - def test_invalid_snapshot_scenarios(self, valid_snapshot_path): - self.log.info("Test different scenarios of loading invalid snapshot files") - with open(valid_snapshot_path, 'rb') as f: - valid_snapshot_contents = f.read() - bad_snapshot_path = valid_snapshot_path + '.mod' - node = self.nodes[1] - - def expected_error(msg): - assert_raises_rpc_error(-32603, f"Unable to load UTXO snapshot: Population failed: {msg}", node.loadtxoutset, bad_snapshot_path) - - self.log.info(" - snapshot file with invalid file magic") - parsing_error_code = -22 - bad_magic = 0xf00f00f000 - with open(bad_snapshot_path, 'wb') as f: - f.write(bad_magic.to_bytes(5, "big") + valid_snapshot_contents[5:]) - assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: Invalid UTXO set snapshot magic bytes. Please check if this is indeed a snapshot file or if you are using an outdated snapshot format.", node.loadtxoutset, bad_snapshot_path) - - self.log.info(" - snapshot file with unsupported version") - for version in [0, 1, 3]: - with open(bad_snapshot_path, 'wb') as f: - f.write(valid_snapshot_contents[:5] + version.to_bytes(2, "little") + valid_snapshot_contents[7:]) - assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: Version of snapshot {version} does not match any of the supported versions.", node.loadtxoutset, bad_snapshot_path) - - self.log.info(" - snapshot file with mismatching network magic") - invalid_magics = [ - # magic, name, real - [0xf9beb4d9, "main", True], - [0x0b110907, "test", True], - [0x0a03cf40, "signet", True], - [0x00000000, "", False], - [0xffffffff, "", False], - ] - for [magic, name, real] in invalid_magics: - with open(bad_snapshot_path, 'wb') as f: - f.write(valid_snapshot_contents[:7] + magic.to_bytes(4, 'big') + valid_snapshot_contents[11:]) - if real: - assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: The network of the snapshot ({name}) does not match the network of this node (regtest).", node.loadtxoutset, bad_snapshot_path) - else: - assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: This snapshot has been created for an unrecognized network. This could be a custom signet, a new testnet or possibly caused by data corruption.", node.loadtxoutset, bad_snapshot_path) - - self.log.info(" - snapshot file referring to a block that is not in the assumeutxo parameters") - prev_block_hash = self.nodes[0].getblockhash(SNAPSHOT_BASE_HEIGHT - 1) - bogus_block_hash = "0" * 64 # Represents any unknown block hash - for bad_block_hash in [bogus_block_hash, prev_block_hash]: - with open(bad_snapshot_path, 'wb') as f: - f.write(valid_snapshot_contents[:11] + bytes.fromhex(bad_block_hash)[::-1] + valid_snapshot_contents[43:]) - - msg = f"Unable to load UTXO snapshot: assumeutxo block hash in snapshot metadata not recognized (hash: {bad_block_hash}). The following snapshot heights are available: 110, 200, 299." - assert_raises_rpc_error(-32603, msg, node.loadtxoutset, bad_snapshot_path) - - self.log.info(" - snapshot file with wrong number of coins") - valid_num_coins = int.from_bytes(valid_snapshot_contents[43:43 + 8], "little") - for off in [-1, +1]: - with open(bad_snapshot_path, 'wb') as f: - f.write(valid_snapshot_contents[:43]) - f.write((valid_num_coins + off).to_bytes(8, "little")) - f.write(valid_snapshot_contents[43 + 8:]) - expected_error(msg="Bad snapshot - coins left over after deserializing 298 coins." if off == -1 else "Bad snapshot format or truncated snapshot after deserializing 299 coins.") - - self.log.info(" - snapshot file with alternated but parsable UTXO data results in different hash") - cases = [ - # (content, offset, wrong_hash, custom_message) - [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b", None], # wrong outpoint hash - [(2).to_bytes(1, "little"), 32, None, "Bad snapshot data after deserializing 1 coins."], # wrong txid coins count - [b"\xfd\xff\xff", 32, None, "Mismatch in coins count in snapshot metadata and actual snapshot data"], # txid coins count exceeds coins left - [b"\x01", 33, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad", None], # wrong outpoint index - [b"\x81", 34, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8", None], # wrong coin code VARINT - [b"\x80", 34, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5", None], # another wrong coin code - [b"\x84\x58", 34, None, "Bad snapshot data after deserializing 0 coins"], # wrong coin case with height 364 and coinbase 0 - [b"\xCA\xD2\x8F\x5A", 39, None, "Bad snapshot data after deserializing 0 coins - bad tx out value"], # Amount exceeds MAX_MONEY - ] - - for content, offset, wrong_hash, custom_message in cases: - with open(bad_snapshot_path, "wb") as f: - # Prior to offset: Snapshot magic, snapshot version, network magic, hash, coins count - f.write(valid_snapshot_contents[:(5 + 2 + 4 + 32 + 8 + offset)]) - f.write(content) - f.write(valid_snapshot_contents[(5 + 2 + 4 + 32 + 8 + offset + len(content)):]) - - msg = custom_message if custom_message is not None else f"Bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}." - expected_error(msg) - - def test_headers_not_synced(self, valid_snapshot_path): - for node in self.nodes[1:]: - msg = "Unable to load UTXO snapshot: The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) must appear in the headers chain. Make sure all headers are syncing, and call loadtxoutset again." - assert_raises_rpc_error(-32603, msg, node.loadtxoutset, valid_snapshot_path) - - def test_invalid_chainstate_scenarios(self): - self.log.info("Test different scenarios of invalid snapshot chainstate in datadir") - - self.log.info(" - snapshot chainstate referring to a block that is not in the assumeutxo parameters") - self.stop_node(0) - chainstate_snapshot_path = self.nodes[0].chain_path / "chainstate_snapshot" - chainstate_snapshot_path.mkdir() - with open(chainstate_snapshot_path / "base_blockhash", 'wb') as f: - f.write(b'z' * 32) - - def expected_error(log_msg="", error_msg=""): - with self.nodes[0].assert_debug_log([log_msg]): - self.nodes[0].assert_start_raises_init_error(expected_msg=error_msg) - - expected_error_msg = f"Error: A fatal internal error occurred, see debug.log for details: Assumeutxo data not found for the given blockhash '7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a'." - error_details = f"Assumeutxo data not found for the given blockhash" - expected_error(log_msg=error_details, error_msg=expected_error_msg) - - # resurrect node again - rmtree(chainstate_snapshot_path) - self.start_node(0) - - def test_invalid_mempool_state(self, dump_output_path): - self.log.info("Test bitcoind should fail when mempool not empty.") - node=self.nodes[2] - tx = MiniWallet(node).send_self_transfer(from_node=node) - - assert tx['txid'] in node.getrawmempool() - - # Attempt to load the snapshot on Node 2 and expect it to fail - msg = "Unable to load UTXO snapshot: Can't activate a snapshot when mempool not empty" - assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path) - - self.restart_node(2, extra_args=self.extra_args[2]) - - def test_invalid_file_path(self): - self.log.info("Test bitcoind should fail when file path is invalid.") - node = self.nodes[0] - path = node.datadir_path / node.chain / "invalid" / "path" - assert_raises_rpc_error(-8, "Couldn't open file {} for reading.".format(path), node.loadtxoutset, path) - - def test_snapshot_with_less_work(self, dump_output_path): - self.log.info("Test bitcoind should fail when snapshot has less accumulated work than this node.") - node = self.nodes[0] - msg = "Unable to load UTXO snapshot: Population failed: Work does not exceed active chainstate." - assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path) - - def test_snapshot_block_invalidated(self, dump_output_path): - self.log.info("Test snapshot is not loaded when base block is invalid.") - node = self.nodes[0] - # We are testing the case where the base block is invalidated itself - # and also the case where one of its parents is invalidated. - for height in [SNAPSHOT_BASE_HEIGHT, SNAPSHOT_BASE_HEIGHT - 1]: - block_hash = node.getblockhash(height) - node.invalidateblock(block_hash) - assert_equal(node.getblockcount(), height - 1) - msg = "Unable to load UTXO snapshot: The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) is part of an invalid chain." - assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path) - node.reconsiderblock(block_hash) - - def test_snapshot_in_a_divergent_chain(self, dump_output_path): - n0 = self.nodes[0] - n3 = self.nodes[3] - assert_equal(n0.getblockcount(), FINAL_HEIGHT) - assert_equal(n3.getblockcount(), START_HEIGHT) - - self.log.info("Check importing a snapshot where current chain-tip is not an ancestor of the snapshot block but has less work") - # Generate a divergent chain in n3 up to 298 - self.generate(n3, nblocks=99, sync_fun=self.no_op) - assert_equal(n3.getblockcount(), SNAPSHOT_BASE_HEIGHT - 1) - - # Try importing the snapshot and assert its success - loaded = n3.loadtxoutset(dump_output_path) - assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) - normal, snapshot = n3.getchainstates()["chainstates"] - assert_equal(normal['blocks'], START_HEIGHT + 99) - assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) - - # Now lets sync the nodes and wait for the background validation to finish - self.connect_nodes(0, 3) - self.sync_blocks(nodes=(n0, n3)) - self.wait_until(lambda: len(n3.getchainstates()['chainstates']) == 1) - - def test_snapshot_not_on_most_work_chain(self, dump_output_path): - self.log.info("Test snapshot is not loaded when the node knows the headers of another chain with more work.") - node0 = self.nodes[0] - node1 = self.nodes[1] - # Create an alternative chain of 2 new blocks, forking off the main chain at the block before the snapshot block. - # This simulates a longer chain than the main chain when submitting these two block headers to node 1 because it is only aware of - # the main chain headers up to the snapshot height. - parent_block_hash = node0.getblockhash(SNAPSHOT_BASE_HEIGHT - 1) - block_time = node0.getblock(node0.getbestblockhash())['time'] + 1 - fork_block1 = create_block(int(parent_block_hash, 16), create_coinbase(SNAPSHOT_BASE_HEIGHT), block_time) - fork_block1.solve() - fork_block2 = create_block(fork_block1.sha256, create_coinbase(SNAPSHOT_BASE_HEIGHT + 1), block_time + 1) - fork_block2.solve() - node1.submitheader(fork_block1.serialize().hex()) - node1.submitheader(fork_block2.serialize().hex()) - msg = "A forked headers-chain with more work than the chain with the snapshot base block header exists. Please proceed to sync without AssumeUtxo." - assert_raises_rpc_error(-32603, msg, node1.loadtxoutset, dump_output_path) - # Cleanup: submit two more headers of the snapshot chain to node 1, so that it is the most-work chain again and loading - # the snapshot in future subtests succeeds - main_block1 = node0.getblock(node0.getblockhash(SNAPSHOT_BASE_HEIGHT + 1), 0) - main_block2 = node0.getblock(node0.getblockhash(SNAPSHOT_BASE_HEIGHT + 2), 0) - node1.submitheader(main_block1) - node1.submitheader(main_block2) - - def test_sync_from_assumeutxo_node(self, snapshot): - """ - This test verifies that: - 1. An IBD node can sync headers from an AssumeUTXO node at any time. - 2. IBD nodes do not request historical blocks from AssumeUTXO nodes while they are syncing the background-chain. - 3. The assumeUTXO node dynamically adjusts the network services it offers according to its state. - 4. IBD nodes can fully sync from AssumeUTXO nodes after they finish the background-chain sync. - """ - self.log.info("Testing IBD-sync from assumeUTXO node") - # Node2 starts clean and loads the snapshot. - # Node3 starts clean and seeks to sync-up from snapshot_node. - miner = self.nodes[0] - snapshot_node = self.nodes[2] - ibd_node = self.nodes[3] - - # Start test fresh by cleaning up node directories - for node in (snapshot_node, ibd_node): - self.stop_node(node.index) - rmtree(node.chain_path) - self.start_node(node.index, extra_args=self.extra_args[node.index]) - - # Sync-up headers chain on snapshot_node to load snapshot - headers_provider_conn = snapshot_node.add_p2p_connection(P2PInterface()) - headers_provider_conn.wait_for_getheaders() - msg = msg_headers() - for block_num in range(1, miner.getblockcount()+1): - msg.headers.append(from_hex(CBlockHeader(), miner.getblockheader(miner.getblockhash(block_num), verbose=False))) - headers_provider_conn.send_message(msg) - - # Ensure headers arrived - default_value = {'status': ''} # No status - headers_tip_hash = miner.getbestblockhash() - self.wait_until(lambda: next(filter(lambda x: x['hash'] == headers_tip_hash, snapshot_node.getchaintips()), default_value)['status'] == "headers-only") - snapshot_node.disconnect_p2ps() - - # Load snapshot - snapshot_node.loadtxoutset(snapshot['path']) - - # Connect nodes and verify the ibd_node can sync-up the headers-chain from the snapshot_node - self.connect_nodes(ibd_node.index, snapshot_node.index) - snapshot_block_hash = snapshot['base_hash'] - self.wait_until(lambda: next(filter(lambda x: x['hash'] == snapshot_block_hash, ibd_node.getchaintips()), default_value)['status'] == "headers-only") - - # Once the headers-chain is synced, the ibd_node must avoid requesting historical blocks from the snapshot_node. - # If it does request such blocks, the snapshot_node will ignore requests it cannot fulfill, causing the ibd_node - # to stall. This stall could last for up to 10 min, ultimately resulting in an abrupt disconnection due to the - # ibd_node's perceived unresponsiveness. - time.sleep(3) # Sleep here because we can't detect when a node avoids requesting blocks from other peer. - assert_equal(len(ibd_node.getpeerinfo()[0]['inflight']), 0) - - # Now disconnect nodes and finish background chain sync - self.disconnect_nodes(ibd_node.index, snapshot_node.index) - self.connect_nodes(snapshot_node.index, miner.index) - self.sync_blocks(nodes=(miner, snapshot_node)) - # Check the base snapshot block was stored and ensure node signals full-node service support - self.wait_until(lambda: not try_rpc(-1, "Block not available (not fully downloaded)", snapshot_node.getblock, snapshot_block_hash)) - self.wait_until(lambda: 'NETWORK' in snapshot_node.getnetworkinfo()['localservicesnames']) - - # Now that the snapshot_node is synced, verify the ibd_node can sync from it - self.connect_nodes(snapshot_node.index, ibd_node.index) - assert 'NETWORK' in ibd_node.getpeerinfo()[0]['servicesnames'] - self.sync_blocks(nodes=(ibd_node, snapshot_node)) - - def assert_only_network_limited_service(self, node): - node_services = node.getnetworkinfo()['localservicesnames'] - assert 'NETWORK' not in node_services - assert 'NETWORK_LIMITED' in node_services - - def run_test(self): - """ - Bring up two (disconnected) nodes, mine some new blocks on the first, - and generate a UTXO snapshot. - - Load the snapshot into the second, ensure it syncs to tip and completes - background validation when connected to the first. - """ - n0 = self.nodes[0] - n1 = self.nodes[1] - n2 = self.nodes[2] - n3 = self.nodes[3] - - self.mini_wallet = MiniWallet(n0) - - # Mock time for a deterministic chain - for n in self.nodes: - n.setmocktime(n.getblockheader(n.getbestblockhash())['time']) - - # Generate a series of blocks that `n0` will have in the snapshot, - # but that n1 and n2 don't yet see. - assert n0.getblockcount() == START_HEIGHT - blocks = {START_HEIGHT: Block(n0.getbestblockhash(), 1, START_HEIGHT + 1)} - for i in range(100): - block_tx = 1 - if i % 3 == 0: - self.mini_wallet.send_self_transfer(from_node=n0) - block_tx += 1 - self.generate(n0, nblocks=1, sync_fun=self.no_op) - height = n0.getblockcount() - hash = n0.getbestblockhash() - blocks[height] = Block(hash, block_tx, blocks[height-1].chain_tx + block_tx) - if i == 4: - # Create a stale block that forks off the main chain before the snapshot. - temp_invalid = n0.getbestblockhash() - n0.invalidateblock(temp_invalid) - stale_hash = self.generateblock(n0, output="raw(aaaa)", transactions=[], sync_fun=self.no_op)["hash"] - n0.invalidateblock(stale_hash) - n0.reconsiderblock(temp_invalid) - stale_block = n0.getblock(stale_hash, 0) - - - self.log.info("-- Testing assumeutxo + some indexes + pruning") - - assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT) - assert_equal(n1.getblockcount(), START_HEIGHT) - - self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}") - dump_output = n0.dumptxoutset('utxos.dat', "latest") - - self.log.info("Test loading snapshot when the node tip is on the same block as the snapshot") - assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT) - assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) - self.test_snapshot_with_less_work(dump_output['path']) - - self.log.info("Test loading snapshot when headers are not synced") - self.test_headers_not_synced(dump_output['path']) - - # In order for the snapshot to activate, we have to ferry over the new - # headers to n1 and n2 so that they see the header of the snapshot's - # base block while disconnected from n0. - for i in range(1, 300): - block = n0.getblock(n0.getblockhash(i), 0) - # make n1 and n2 aware of the new header, but don't give them the - # block. - n1.submitheader(block) - n2.submitheader(block) - n3.submitheader(block) - - # Ensure everyone is seeing the same headers. - for n in self.nodes: - assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT) - - assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) - - def check_dump_output(output): - assert_equal( - output['txoutset_hash'], - "a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27") - assert_equal(output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx) - - check_dump_output(dump_output) - - # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This - # will allow us to test n1's sync-to-tip on top of a snapshot. - self.generate(n0, nblocks=100, sync_fun=self.no_op) - - assert_equal(n0.getblockcount(), FINAL_HEIGHT) - assert_equal(n1.getblockcount(), START_HEIGHT) - - assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT) - - self.log.info(f"Check that dumptxoutset works for past block heights") - # rollback defaults to the snapshot base height - dump_output2 = n0.dumptxoutset('utxos2.dat', "rollback") - check_dump_output(dump_output2) - assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output2['path'])) - - # Rollback with specific height - dump_output3 = n0.dumptxoutset('utxos3.dat', rollback=SNAPSHOT_BASE_HEIGHT) - check_dump_output(dump_output3) - assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output3['path'])) - - # Specified height that is not a snapshot height - prev_snap_height = SNAPSHOT_BASE_HEIGHT - 1 - dump_output4 = n0.dumptxoutset(path='utxos4.dat', rollback=prev_snap_height) - assert_equal( - dump_output4['txoutset_hash'], - "8a1db0d6e958ce0d7c963bc6fc91ead596c027129bacec68acc40351037b09d7") - assert sha256sum_file(dump_output['path']) != sha256sum_file(dump_output4['path']) - - # Use a hash instead of a height - prev_snap_hash = n0.getblockhash(prev_snap_height) - dump_output5 = n0.dumptxoutset('utxos5.dat', rollback=prev_snap_hash) - assert_equal(sha256sum_file(dump_output4['path']), sha256sum_file(dump_output5['path'])) - - # TODO: This is a hack to set m_best_header to the correct value after - # dumptxoutset/reconsiderblock. Otherwise the wrong error messages are - # returned in following tests. It can be removed once this bug is - # fixed. See also https://github.com/bitcoin/bitcoin/issues/26245 - self.restart_node(0, ["-reindex"]) - - # Ensure n0 is back at the tip - assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT) - - self.test_snapshot_with_less_work(dump_output['path']) - self.test_invalid_mempool_state(dump_output['path']) - self.test_invalid_snapshot_scenarios(dump_output['path']) - self.test_invalid_chainstate_scenarios() - self.test_invalid_file_path() - self.test_snapshot_block_invalidated(dump_output['path']) - self.test_snapshot_not_on_most_work_chain(dump_output['path']) - - # Prune-node sanity check - assert 'NETWORK' not in n1.getnetworkinfo()['localservicesnames'] - - self.log.info(f"Loading snapshot into second node from {dump_output['path']}") - # This node's tip is on an ancestor block of the snapshot, which should - # be the normal case - loaded = n1.loadtxoutset(dump_output['path']) - assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) - assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) - - self.log.info("Confirm that local services remain unchanged") - # Since n1 is a pruned node, the 'NETWORK' service flag must always be unset. - self.assert_only_network_limited_service(n1) - - self.log.info("Check that UTXO-querying RPCs operate on snapshot chainstate") - snapshot_hash = loaded['tip_hash'] - snapshot_num_coins = loaded['coins_loaded'] - # coinstatsindex might be not caught up yet and is not relevant for this test, so don't use it - utxo_info = n1.gettxoutsetinfo(use_index=False) - assert_equal(utxo_info['txouts'], snapshot_num_coins) - assert_equal(utxo_info['height'], SNAPSHOT_BASE_HEIGHT) - assert_equal(utxo_info['bestblock'], snapshot_hash) - - # find coinbase output at snapshot height on node0 and scan for it on node1, - # where the block is not available, but the snapshot was loaded successfully - coinbase_tx = n0.getblock(snapshot_hash, verbosity=2)['tx'][0] - assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, snapshot_hash) - coinbase_output_descriptor = coinbase_tx['vout'][0]['scriptPubKey']['desc'] - scan_result = n1.scantxoutset('start', [coinbase_output_descriptor]) - assert_equal(scan_result['success'], True) - assert_equal(scan_result['txouts'], snapshot_num_coins) - assert_equal(scan_result['height'], SNAPSHOT_BASE_HEIGHT) - assert_equal(scan_result['bestblock'], snapshot_hash) - scan_utxos = [(coin['txid'], coin['vout']) for coin in scan_result['unspents']] - assert (coinbase_tx['txid'], 0) in scan_utxos - - txout_result = n1.gettxout(coinbase_tx['txid'], 0) - assert_equal(txout_result['scriptPubKey']['desc'], coinbase_output_descriptor) - - def check_tx_counts(final: bool) -> None: - """Check nTx and nChainTx intermediate values right after loading - the snapshot, and final values after the snapshot is validated.""" - for height, block in blocks.items(): - tx = n1.getblockheader(block.hash)["nTx"] - stats = n1.getchaintxstats(nblocks=1, blockhash=block.hash) - chain_tx = stats.get("txcount", None) - window_tx_count = stats.get("window_tx_count", None) - tx_rate = stats.get("txrate", None) - window_interval = stats.get("window_interval") - - # Intermediate nTx of the starting block should be set, but nTx of - # later blocks should be 0 before they are downloaded. - # The window_tx_count of one block is equal to the blocks tx count. - # If the window tx count is unknown, the value is missing. - # The tx_rate is calculated from window_tx_count and window_interval - # when possible. - if final or height == START_HEIGHT: - assert_equal(tx, block.tx) - assert_equal(window_tx_count, tx) - if window_interval > 0: - assert_approx(tx_rate, window_tx_count / window_interval, vspan=0.1) - else: - assert_equal(tx_rate, None) - else: - assert_equal(tx, 0) - assert_equal(window_tx_count, None) - - # Intermediate nChainTx of the starting block and snapshot block - # should be set, but others should be None until they are downloaded. - if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT): - assert_equal(chain_tx, block.chain_tx) - else: - assert_equal(chain_tx, None) - - check_tx_counts(final=False) - - normal, snapshot = n1.getchainstates()["chainstates"] - assert_equal(normal['blocks'], START_HEIGHT) - assert_equal(normal.get('snapshot_blockhash'), None) - assert_equal(normal['validated'], True) - assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) - assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash']) - assert_equal(snapshot['validated'], False) - - assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) - - self.log.info("Submit a stale block that forked off the chain before the snapshot") - # Normally a block like this would not be downloaded, but if it is - # submitted early before the background chain catches up to the fork - # point, it winds up in m_blocks_unlinked and triggers a corner case - # that previously crashed CheckBlockIndex. - n1.submitblock(stale_block) - n1.getchaintips() - n1.getblock(stale_hash) - - self.log.info("Submit a spending transaction for a snapshot chainstate coin to the mempool") - # spend the coinbase output of the first block that is not available on node1 - spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1) - assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, spend_coin_blockhash) - prev_tx = n0.getblock(spend_coin_blockhash, 3)['tx'][0] - prevout = {"txid": prev_tx['txid'], "vout": 0, "scriptPubKey": prev_tx['vout'][0]['scriptPubKey']['hex']} - privkey = n0.get_deterministic_priv_key().key - raw_tx = n1.createrawtransaction([prevout], {getnewdestination()[2]: 24.99}) - signed_tx = n1.signrawtransactionwithkey(raw_tx, [privkey], [prevout])['hex'] - signed_txid = tx_from_hex(signed_tx).rehash() - - assert n1.gettxout(prev_tx['txid'], 0) is not None - n1.sendrawtransaction(signed_tx) - assert signed_txid in n1.getrawmempool() - assert not n1.gettxout(prev_tx['txid'], 0) - - PAUSE_HEIGHT = FINAL_HEIGHT - 40 - - self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT) - self.restart_node(1, extra_args=[ - f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]]) - - # Upon restart during snapshot tip sync, the node must remain in 'limited' mode. - self.assert_only_network_limited_service(n1) - - # Finally connect the nodes and let them sync. - # - # Set `wait_for_connect=False` to avoid a race between performing connection - # assertions and the -stopatheight tripping. - self.connect_nodes(0, 1, wait_for_connect=False) - - n1.wait_until_stopped(timeout=5) - - self.log.info("Checking that blocks are segmented on disk") - assert self.has_blockfile(n1, "00000"), "normal blockfile missing" - assert self.has_blockfile(n1, "00001"), "assumed blockfile missing" - assert not self.has_blockfile(n1, "00002"), "too many blockfiles" - - self.log.info("Restarted node before snapshot validation completed, reloading...") - self.restart_node(1, extra_args=self.extra_args[1]) - - # Upon restart, the node must remain in 'limited' mode - self.assert_only_network_limited_service(n1) - - # Send snapshot block to n1 out of order. This makes the test less - # realistic because normally the snapshot block is one of the last - # blocks downloaded, but its useful to test because it triggers more - # corner cases in ReceivedBlockTransactions() and CheckBlockIndex() - # setting and testing nChainTx values, and it exposed previous bugs. - snapshot_hash = n0.getblockhash(SNAPSHOT_BASE_HEIGHT) - snapshot_block = n0.getblock(snapshot_hash, 0) - n1.submitblock(snapshot_block) - - self.connect_nodes(0, 1) - - self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") - self.wait_until(lambda: n1.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) - self.sync_blocks(nodes=(n0, n1)) - - self.log.info("Ensuring background validation completes") - self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1) - - # Since n1 is a pruned node, it will not signal NODE_NETWORK after - # completing the background sync. - self.assert_only_network_limited_service(n1) - - # Ensure indexes have synced. - completed_idx_state = { - 'basic block filter index': COMPLETE_IDX, - 'coinstatsindex': COMPLETE_IDX, - } - self.wait_until(lambda: n1.getindexinfo() == completed_idx_state) - - self.log.info("Re-check nTx and nChainTx values") - check_tx_counts(final=True) - - for i in (0, 1): - n = self.nodes[i] - self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes") - self.restart_node(i, extra_args=self.extra_args[i]) - - assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) - - chainstate, = n.getchainstates()['chainstates'] - assert_equal(chainstate['blocks'], FINAL_HEIGHT) - - if i != 0: - # Ensure indexes have synced for the assumeutxo node - self.wait_until(lambda: n.getindexinfo() == completed_idx_state) - - - # Node 2: all indexes + reindex - # ----------------------------- - - self.log.info("-- Testing all indexes + reindex") - assert_equal(n2.getblockcount(), START_HEIGHT) - assert 'NETWORK' in n2.getnetworkinfo()['localservicesnames'] # sanity check - - self.log.info(f"Loading snapshot into third node from {dump_output['path']}") - loaded = n2.loadtxoutset(dump_output['path']) - assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) - assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) - - # Even though n2 is a full node, it will unset the 'NETWORK' service flag during snapshot loading. - # This indicates other peers that the node will temporarily not provide historical blocks. - self.log.info("Check node2 updated the local services during snapshot load") - self.assert_only_network_limited_service(n2) - - for reindex_arg in ['-reindex=1', '-reindex-chainstate=1']: - self.log.info(f"Check that restarting with {reindex_arg} will delete the snapshot chainstate") - self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]]) - assert_equal(1, len(n2.getchainstates()["chainstates"])) - for i in range(1, 300): - block = n0.getblock(n0.getblockhash(i), 0) - n2.submitheader(block) - loaded = n2.loadtxoutset(dump_output['path']) - assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) - assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) - - normal, snapshot = n2.getchainstates()['chainstates'] - assert_equal(normal['blocks'], START_HEIGHT) - assert_equal(normal.get('snapshot_blockhash'), None) - assert_equal(normal['validated'], True) - assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) - assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash']) - assert_equal(snapshot['validated'], False) - - self.log.info("Check that loading the snapshot again will fail because there is already an active snapshot.") - msg = "Unable to load UTXO snapshot: Can't activate a snapshot-based chainstate more than once" - assert_raises_rpc_error(-32603, msg, n2.loadtxoutset, dump_output['path']) - - # Upon restart, the node must stay in 'limited' mode until the background - # chain sync completes. - self.restart_node(2, extra_args=self.extra_args[2]) - self.assert_only_network_limited_service(n2) - - self.connect_nodes(0, 2) - self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) - self.sync_blocks(nodes=(n0, n2)) - - self.log.info("Ensuring background validation completes") - self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1) - - # Once background chain sync completes, the full node must start offering historical blocks again. - self.wait_until(lambda: {'NETWORK', 'NETWORK_LIMITED'}.issubset(n2.getnetworkinfo()['localservicesnames'])) - - completed_idx_state = { - 'basic block filter index': COMPLETE_IDX, - 'coinstatsindex': COMPLETE_IDX, - 'txindex': COMPLETE_IDX, - } - self.wait_until(lambda: n2.getindexinfo() == completed_idx_state) - - for i in (0, 2): - n = self.nodes[i] - self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes") - self.restart_node(i, extra_args=self.extra_args[i]) - - assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) - - chainstate, = n.getchainstates()['chainstates'] - assert_equal(chainstate['blocks'], FINAL_HEIGHT) - - if i != 0: - # Ensure indexes have synced for the assumeutxo node - self.wait_until(lambda: n.getindexinfo() == completed_idx_state) - - self.log.info("Test -reindex-chainstate of an assumeutxo-synced node") - self.restart_node(2, extra_args=[ - '-reindex-chainstate=1', *self.extra_args[2]]) - assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT) - self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) - - self.log.info("Test -reindex of an assumeutxo-synced node") - self.restart_node(2, extra_args=['-reindex=1', *self.extra_args[2]]) - self.connect_nodes(0, 2) - self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) - - self.test_snapshot_in_a_divergent_chain(dump_output['path']) - - # The following test cleans node2 and node3 chain directories. - self.test_sync_from_assumeutxo_node(snapshot=dump_output) - -@dataclass -class Block: - hash: str - tx: int - chain_tx: int - -if __name__ == '__main__': - AssumeutxoTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2021-present The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test for assumeutxo, a means of quickly bootstrapping a node using) +a serialized version of the UTXO set at a certain height, which corresponds) +to a hash that has been compiled into bitcoind.) +) +The assumeutxo value generated and used here is committed to in) +`CRegTestParams::m_assumeutxo_data` in `src/kernel/chainparams.cpp`.) +""") +import time) +from shutil import rmtree) +) +from dataclasses import dataclass) +from test_framework.blocktools import () + create_block,) + create_coinbase) +)) +from test_framework.messages import () + CBlockHeader,) + from_hex,) + msg_headers,) + tx_from_hex) +)) +from test_framework.p2p import () + P2PInterface,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_approx,) + assert_equal,) + assert_not_equal,) + assert_raises_rpc_error,) + sha256sum_file,) + try_rpc,) +)) +from test_framework.wallet import () + getnewdestination,) + MiniWallet,) +)) +) +START_HEIGHT = 199) +SNAPSHOT_BASE_HEIGHT = 299) +FINAL_HEIGHT = 399) +COMPLETE_IDX = {'synced': True, 'best_block_height': FINAL_HEIGHT}) +) +) +class AssumeutxoTest(BitcoinTestFramework):) +) + def set_test_params(self):) + """Use the pregenerated, deterministic chain up to height 199.""") + self.num_nodes = 4) + self.rpc_timeout = 120) + self.extra_args = [) + [],) + ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"],) + ["-persistmempool=0","-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"],) + []) + ]) +) + def setup_network(self):) + """Start with the nodes disconnected so that one can generate a snapshot) + including blocks the other hasn't yet seen.""") + self.add_nodes(4)) + self.start_nodes(extra_args=self.extra_args)) +) + def test_invalid_snapshot_scenarios(self, valid_snapshot_path):) + self.log.info("Test different scenarios of loading invalid snapshot files")) + with open(valid_snapshot_path, 'rb') as f:) + valid_snapshot_contents = f.read()) + bad_snapshot_path = valid_snapshot_path + '.mod') + node = self.nodes[1]) +) + def expected_error(msg):) + assert_raises_rpc_error(-32603, f"Unable to load UTXO snapshot: Population failed: {msg}", node.loadtxoutset, bad_snapshot_path)) +) + self.log.info(" - snapshot file with invalid file magic")) + parsing_error_code = -22) + bad_magic = 0xf00f00f000) + with open(bad_snapshot_path, 'wb') as f:) + f.write(bad_magic.to_bytes(5, "big") + valid_snapshot_contents[5:])) + assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: Invalid UTXO set snapshot magic bytes. Please check if this is indeed a snapshot file or if you are using an outdated snapshot format.", node.loadtxoutset, bad_snapshot_path)) +) + self.log.info(" - snapshot file with unsupported version")) + for version in [0, 1, 3]:) + with open(bad_snapshot_path, 'wb') as f:) + f.write(valid_snapshot_contents[:5] + version.to_bytes(2, "little") + valid_snapshot_contents[7:])) + assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: Version of snapshot {version} does not match any of the supported versions.", node.loadtxoutset, bad_snapshot_path)) +) + self.log.info(" - snapshot file with mismatching network magic")) + invalid_magics = [) + # magic, name, real) + [0xf9beb4d9, "main", True],) + [0x0b110907, "test", True],) + [0x0a03cf40, "signet", True],) + [0x00000000, "", False],) + [0xffffffff, "", False],) + ]) + for [magic, name, real] in invalid_magics:) + with open(bad_snapshot_path, 'wb') as f:) + f.write(valid_snapshot_contents[:7] + magic.to_bytes(4, 'big') + valid_snapshot_contents[11:])) + if real:) + assert_raises_rpc_error(parsing_error_code, f"Unable to parse metadata: The network of the snapshot ({name}) does not match the network of this node (regtest).", node.loadtxoutset, bad_snapshot_path)) + else:) + assert_raises_rpc_error(parsing_error_code, "Unable to parse metadata: This snapshot has been created for an unrecognized network. This could be a custom signet, a new testnet or possibly caused by data corruption.", node.loadtxoutset, bad_snapshot_path)) +) + self.log.info(" - snapshot file referring to a block that is not in the assumeutxo parameters")) + prev_block_hash = self.nodes[0].getblockhash(SNAPSHOT_BASE_HEIGHT - 1)) + bogus_block_hash = "0" * 64 # Represents any unknown block hash) + for bad_block_hash in [bogus_block_hash, prev_block_hash]:) + with open(bad_snapshot_path, 'wb') as f:) + f.write(valid_snapshot_contents[:11] + bytes.fromhex(bad_block_hash)[::-1] + valid_snapshot_contents[43:])) +) + msg = f"Unable to load UTXO snapshot: assumeutxo block hash in snapshot metadata not recognized (hash: {bad_block_hash}). The following snapshot heights are available: 110, 200, 299.") + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, bad_snapshot_path)) +) + self.log.info(" - snapshot file with wrong number of coins")) + valid_num_coins = int.from_bytes(valid_snapshot_contents[43:43 + 8], "little")) + for off in [-1, +1]:) + with open(bad_snapshot_path, 'wb') as f:) + f.write(valid_snapshot_contents[:43])) + f.write((valid_num_coins + off).to_bytes(8, "little"))) + f.write(valid_snapshot_contents[43 + 8:])) + expected_error(msg="Bad snapshot - coins left over after deserializing 298 coins." if off == -1 else "Bad snapshot format or truncated snapshot after deserializing 299 coins.")) +) + self.log.info(" - snapshot file with alternated but parsable UTXO data results in different hash")) + cases = [) + # (content, offset, wrong_hash, custom_message)) + [b"\xff" * 32, 0, "7d52155c9a9fdc4525b637ef6170568e5dad6fabd0b1fdbb9432010b8453095b", None], # wrong outpoint hash) + [(2).to_bytes(1, "little"), 32, None, "Bad snapshot data after deserializing 1 coins."], # wrong txid coins count) + [b"\xfd\xff\xff", 32, None, "Mismatch in coins count in snapshot metadata and actual snapshot data"], # txid coins count exceeds coins left) + [b"\x01", 33, "9f4d897031ab8547665b4153317ae2fdbf0130c7840b66427ebc48b881cb80ad", None], # wrong outpoint index) + [b"\x81", 34, "3da966ba9826fb6d2604260e01607b55ba44e1a5de298606b08704bc62570ea8", None], # wrong coin code VARINT) + [b"\x80", 34, "091e893b3ccb4334378709578025356c8bcb0a623f37c7c4e493133c988648e5", None], # another wrong coin code) + [b"\x84\x58", 34, None, "Bad snapshot data after deserializing 0 coins"], # wrong coin case with height 364 and coinbase 0) + [b"\xCA\xD2\x8F\x5A", 39, None, "Bad snapshot data after deserializing 0 coins - bad tx out value"], # Amount exceeds MAX_MONEY) + ]) +) + for content, offset, wrong_hash, custom_message in cases:) + with open(bad_snapshot_path, "wb") as f:) + # Prior to offset: Snapshot magic, snapshot version, network magic, hash, coins count) + f.write(valid_snapshot_contents[:(5 + 2 + 4 + 32 + 8 + offset)])) + f.write(content)) + f.write(valid_snapshot_contents[(5 + 2 + 4 + 32 + 8 + offset + len(content)):])) +) + msg = custom_message if custom_message is not None else f"Bad snapshot content hash: expected a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27, got {wrong_hash}.") + expected_error(msg)) +) + def test_headers_not_synced(self, valid_snapshot_path):) + for node in self.nodes[1:]:) + msg = "Unable to load UTXO snapshot: The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) must appear in the headers chain. Make sure all headers are syncing, and call loadtxoutset again.") + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, valid_snapshot_path)) +) + def test_invalid_chainstate_scenarios(self):) + self.log.info("Test different scenarios of invalid snapshot chainstate in datadir")) +) + self.log.info(" - snapshot chainstate referring to a block that is not in the assumeutxo parameters")) + self.stop_node(0)) + chainstate_snapshot_path = self.nodes[0].chain_path / "chainstate_snapshot") + chainstate_snapshot_path.mkdir()) + with open(chainstate_snapshot_path / "base_blockhash", 'wb') as f:) + f.write(b'z' * 32)) +) + def expected_error(log_msg="", error_msg=""):) + with self.nodes[0].assert_debug_log([log_msg]):) + self.nodes[0].assert_start_raises_init_error(expected_msg=error_msg)) +) + expected_error_msg = f"Error: A fatal internal error occurred, see debug.log for details: Assumeutxo data not found for the given blockhash '7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a'.") + error_details = f"Assumeutxo data not found for the given blockhash") + expected_error(log_msg=error_details, error_msg=expected_error_msg)) +) + # resurrect node again) + rmtree(chainstate_snapshot_path)) + self.start_node(0)) +) + def test_invalid_mempool_state(self, dump_output_path):) + self.log.info("Test bitcoind should fail when mempool not empty.")) + node=self.nodes[2]) + tx = MiniWallet(node).send_self_transfer(from_node=node)) +) + assert tx['txid'] in node.getrawmempool()) +) + # Attempt to load the snapshot on Node 2 and expect it to fail) + msg = "Unable to load UTXO snapshot: Can't activate a snapshot when mempool not empty") + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path)) +) + self.restart_node(2, extra_args=self.extra_args[2])) +) + def test_invalid_file_path(self):) + self.log.info("Test bitcoind should fail when file path is invalid.")) + node = self.nodes[0]) + path = node.datadir_path / node.chain / "invalid" / "path") + assert_raises_rpc_error(-8, "Couldn't open file {} for reading.".format(path), node.loadtxoutset, path)) +) + def test_snapshot_with_less_work(self, dump_output_path):) + self.log.info("Test bitcoind should fail when snapshot has less accumulated work than this node.")) + node = self.nodes[0]) + msg = "Unable to load UTXO snapshot: Population failed: Work does not exceed active chainstate.") + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path)) +) + def test_snapshot_block_invalidated(self, dump_output_path):) + self.log.info("Test snapshot is not loaded when base block is invalid.")) + node = self.nodes[0]) + # We are testing the case where the base block is invalidated itself) + # and also the case where one of its parents is invalidated.) + for height in [SNAPSHOT_BASE_HEIGHT, SNAPSHOT_BASE_HEIGHT - 1]:) + block_hash = node.getblockhash(height)) + node.invalidateblock(block_hash)) + assert_equal(node.getblockcount(), height - 1)) + msg = "Unable to load UTXO snapshot: The base block header (3bb7ce5eba0be48939b7a521ac1ba9316afee2c7bada3a0cca24188e6d7d96c0) is part of an invalid chain.") + assert_raises_rpc_error(-32603, msg, node.loadtxoutset, dump_output_path)) + node.reconsiderblock(block_hash)) +) + def test_snapshot_in_a_divergent_chain(self, dump_output_path):) + n0 = self.nodes[0]) + n3 = self.nodes[3]) + assert_equal(n0.getblockcount(), FINAL_HEIGHT)) + assert_equal(n3.getblockcount(), START_HEIGHT)) +) + self.log.info("Check importing a snapshot where current chain-tip is not an ancestor of the snapshot block but has less work")) + # Generate a divergent chain in n3 up to 298) + self.generate(n3, nblocks=99, sync_fun=self.no_op)) + assert_equal(n3.getblockcount(), SNAPSHOT_BASE_HEIGHT - 1)) +) + # Try importing the snapshot and assert its success) + loaded = n3.loadtxoutset(dump_output_path)) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)) + normal, snapshot = n3.getchainstates()["chainstates"]) + assert_equal(normal['blocks'], START_HEIGHT + 99)) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT)) +) + # Now lets sync the nodes and wait for the background validation to finish) + self.connect_nodes(0, 3)) + self.sync_blocks(nodes=(n0, n3))) + self.wait_until(lambda: len(n3.getchainstates()['chainstates']) == 1)) +) + def test_snapshot_not_on_most_work_chain(self, dump_output_path):) + self.log.info("Test snapshot is not loaded when the node knows the headers of another chain with more work.")) + node0 = self.nodes[0]) + node1 = self.nodes[1]) + # Create an alternative chain of 2 new blocks, forking off the main chain at the block before the snapshot block.) + # This simulates a longer chain than the main chain when submitting these two block headers to node 1 because it is only aware of) + # the main chain headers up to the snapshot height.) + parent_block_hash = node0.getblockhash(SNAPSHOT_BASE_HEIGHT - 1)) + block_time = node0.getblock(node0.getbestblockhash())['time'] + 1) + fork_block1 = create_block(int(parent_block_hash, 16), create_coinbase(SNAPSHOT_BASE_HEIGHT), block_time)) + fork_block1.solve()) + fork_block2 = create_block(fork_block1.sha256, create_coinbase(SNAPSHOT_BASE_HEIGHT + 1), block_time + 1)) + fork_block2.solve()) + node1.submitheader(fork_block1.serialize().hex())) + node1.submitheader(fork_block2.serialize().hex())) + msg = "A forked headers-chain with more work than the chain with the snapshot base block header exists. Please proceed to sync without AssumeUtxo.") + assert_raises_rpc_error(-32603, msg, node1.loadtxoutset, dump_output_path)) + # Cleanup: submit two more headers of the snapshot chain to node 1, so that it is the most-work chain again and loading) + # the snapshot in future subtests succeeds) + main_block1 = node0.getblock(node0.getblockhash(SNAPSHOT_BASE_HEIGHT + 1), 0)) + main_block2 = node0.getblock(node0.getblockhash(SNAPSHOT_BASE_HEIGHT + 2), 0)) + node1.submitheader(main_block1)) + node1.submitheader(main_block2)) +) + def test_sync_from_assumeutxo_node(self, snapshot):) + """) + This test verifies that:) + 1. An IBD node can sync headers from an AssumeUTXO node at any time.) + 2. IBD nodes do not request historical blocks from AssumeUTXO nodes while they are syncing the background-chain.) + 3. The assumeUTXO node dynamically adjusts the network services it offers according to its state.) + 4. IBD nodes can fully sync from AssumeUTXO nodes after they finish the background-chain sync.) + """) + self.log.info("Testing IBD-sync from assumeUTXO node")) + # Node2 starts clean and loads the snapshot.) + # Node3 starts clean and seeks to sync-up from snapshot_node.) + miner = self.nodes[0]) + snapshot_node = self.nodes[2]) + ibd_node = self.nodes[3]) +) + # Start test fresh by cleaning up node directories) + for node in (snapshot_node, ibd_node):) + self.stop_node(node.index)) + rmtree(node.chain_path)) + self.start_node(node.index, extra_args=self.extra_args[node.index])) +) + # Sync-up headers chain on snapshot_node to load snapshot) + headers_provider_conn = snapshot_node.add_p2p_connection(P2PInterface())) + headers_provider_conn.wait_for_getheaders()) + msg = msg_headers()) + for block_num in range(1, miner.getblockcount()+1):) + msg.headers.append(from_hex(CBlockHeader(), miner.getblockheader(miner.getblockhash(block_num), verbose=False)))) + headers_provider_conn.send_message(msg)) +) + # Ensure headers arrived) + default_value = {'status': ''} # No status) + headers_tip_hash = miner.getbestblockhash()) + self.wait_until(lambda: next(filter(lambda x: x['hash'] == headers_tip_hash, snapshot_node.getchaintips()), default_value)['status'] == "headers-only")) + snapshot_node.disconnect_p2ps()) +) + # Load snapshot) + snapshot_node.loadtxoutset(snapshot['path'])) +) + # Connect nodes and verify the ibd_node can sync-up the headers-chain from the snapshot_node) + self.connect_nodes(ibd_node.index, snapshot_node.index)) + snapshot_block_hash = snapshot['base_hash']) + self.wait_until(lambda: next(filter(lambda x: x['hash'] == snapshot_block_hash, ibd_node.getchaintips()), default_value)['status'] == "headers-only")) +) + # Once the headers-chain is synced, the ibd_node must avoid requesting historical blocks from the snapshot_node.) + # If it does request such blocks, the snapshot_node will ignore requests it cannot fulfill, causing the ibd_node) + # to stall. This stall could last for up to 10 min, ultimately resulting in an abrupt disconnection due to the) + # ibd_node's perceived unresponsiveness.) + time.sleep(3) # Sleep here because we can't detect when a node avoids requesting blocks from other peer.) + assert_equal(len(ibd_node.getpeerinfo()[0]['inflight']), 0)) +) + # Now disconnect nodes and finish background chain sync) + self.disconnect_nodes(ibd_node.index, snapshot_node.index)) + self.connect_nodes(snapshot_node.index, miner.index)) + self.sync_blocks(nodes=(miner, snapshot_node))) + # Check the base snapshot block was stored and ensure node signals full-node service support) + self.wait_until(lambda: not try_rpc(-1, "Block not available (not fully downloaded)", snapshot_node.getblock, snapshot_block_hash))) + self.wait_until(lambda: 'NETWORK' in snapshot_node.getnetworkinfo()['localservicesnames'])) +) + # Now that the snapshot_node is synced, verify the ibd_node can sync from it) + self.connect_nodes(snapshot_node.index, ibd_node.index)) + assert 'NETWORK' in ibd_node.getpeerinfo()[0]['servicesnames']) + self.sync_blocks(nodes=(ibd_node, snapshot_node))) +) + def assert_only_network_limited_service(self, node):) + node_services = node.getnetworkinfo()['localservicesnames']) + assert 'NETWORK' not in node_services) + assert 'NETWORK_LIMITED' in node_services) +) + def run_test(self):) + """) + Bring up two (disconnected) nodes, mine some new blocks on the first,) + and generate a UTXO snapshot.) +) + Load the snapshot into the second, ensure it syncs to tip and completes) + background validation when connected to the first.) + """) + n0 = self.nodes[0]) + n1 = self.nodes[1]) + n2 = self.nodes[2]) + n3 = self.nodes[3]) +) + self.mini_wallet = MiniWallet(n0)) +) + # Mock time for a deterministic chain) + for n in self.nodes:) + n.setmocktime(n.getblockheader(n.getbestblockhash())['time'])) +) + # Generate a series of blocks that `n0` will have in the snapshot,) + # but that n1 and n2 don't yet see.) + assert n0.getblockcount() == START_HEIGHT) + blocks = {START_HEIGHT: Block(n0.getbestblockhash(), 1, START_HEIGHT + 1)}) + for i in range(100):) + block_tx = 1) + if i % 3 == 0:) + self.mini_wallet.send_self_transfer(from_node=n0)) + block_tx += 1) + self.generate(n0, nblocks=1, sync_fun=self.no_op)) + height = n0.getblockcount()) + hash = n0.getbestblockhash()) + blocks[height] = Block(hash, block_tx, blocks[height-1].chain_tx + block_tx)) + if i == 4:) + # Create a stale block that forks off the main chain before the snapshot.) + temp_invalid = n0.getbestblockhash()) + n0.invalidateblock(temp_invalid)) + stale_hash = self.generateblock(n0, output="raw(aaaa)", transactions=[], sync_fun=self.no_op)["hash"]) + n0.invalidateblock(stale_hash)) + n0.reconsiderblock(temp_invalid)) + stale_block = n0.getblock(stale_hash, 0)) +) +) + self.log.info("-- Testing assumeutxo + some indexes + pruning")) +) + assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT)) + assert_equal(n1.getblockcount(), START_HEIGHT)) +) + self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}")) + dump_output = n0.dumptxoutset('utxos.dat', "latest")) +) + self.log.info("Test loading snapshot when the node tip is on the same block as the snapshot")) + assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT)) + assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)) + self.test_snapshot_with_less_work(dump_output['path'])) +) + self.log.info("Test loading snapshot when headers are not synced")) + self.test_headers_not_synced(dump_output['path'])) +) + # In order for the snapshot to activate, we have to ferry over the new) + # headers to n1 and n2 so that they see the header of the snapshot's) + # base block while disconnected from n0.) + for i in range(1, 300):) + block = n0.getblock(n0.getblockhash(i), 0)) + # make n1 and n2 aware of the new header, but don't give them the) + # block.) + n1.submitheader(block)) + n2.submitheader(block)) + n3.submitheader(block)) +) + # Ensure everyone is seeing the same headers.) + for n in self.nodes:) + assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT)) +) + assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)) +) + def check_dump_output(output):) + assert_equal() + output['txoutset_hash'],) + "a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27")) + assert_equal(output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx)) +) + check_dump_output(dump_output)) +) + # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This) + # will allow us to test n1's sync-to-tip on top of a snapshot.) + self.generate(n0, nblocks=100, sync_fun=self.no_op)) +) + assert_equal(n0.getblockcount(), FINAL_HEIGHT)) + assert_equal(n1.getblockcount(), START_HEIGHT)) +) + assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)) +) + self.log.info(f"Check that dumptxoutset works for past block heights")) + # rollback defaults to the snapshot base height) + dump_output2 = n0.dumptxoutset('utxos2.dat', "rollback")) + check_dump_output(dump_output2)) + assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output2['path']))) +) + # Rollback with specific height) + dump_output3 = n0.dumptxoutset('utxos3.dat', rollback=SNAPSHOT_BASE_HEIGHT)) + check_dump_output(dump_output3)) + assert_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output3['path']))) +) + # Specified height that is not a snapshot height) + prev_snap_height = SNAPSHOT_BASE_HEIGHT - 1) + dump_output4 = n0.dumptxoutset(path='utxos4.dat', rollback=prev_snap_height)) + assert_equal() + dump_output4['txoutset_hash'],) + "8a1db0d6e958ce0d7c963bc6fc91ead596c027129bacec68acc40351037b09d7")) + assert_not_equal(sha256sum_file(dump_output['path']), sha256sum_file(dump_output4['path'])) +) + # Use a hash instead of a height) + prev_snap_hash = n0.getblockhash(prev_snap_height)) + dump_output5 = n0.dumptxoutset('utxos5.dat', rollback=prev_snap_hash)) + assert_equal(sha256sum_file(dump_output4['path']), sha256sum_file(dump_output5['path']))) +) + # TODO: This is a hack to set m_best_header to the correct value after) + # dumptxoutset/reconsiderblock. Otherwise the wrong error messages are) + # returned in following tests. It can be removed once this bug is) + # fixed. See also https://github.com/bitcoin/bitcoin/issues/26245) + self.restart_node(0, ["-reindex"])) +) + # Ensure n0 is back at the tip) + assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)) +) + self.test_snapshot_with_less_work(dump_output['path'])) + self.test_invalid_mempool_state(dump_output['path'])) + self.test_invalid_snapshot_scenarios(dump_output['path'])) + self.test_invalid_chainstate_scenarios()) + self.test_invalid_file_path()) + self.test_snapshot_block_invalidated(dump_output['path'])) + self.test_snapshot_not_on_most_work_chain(dump_output['path'])) +) + # Prune-node sanity check) + assert 'NETWORK' not in n1.getnetworkinfo()['localservicesnames']) +) + self.log.info(f"Loading snapshot into second node from {dump_output['path']}")) + # This node's tip is on an ancestor block of the snapshot, which should) + # be the normal case) + loaded = n1.loadtxoutset(dump_output['path'])) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)) +) + self.log.info("Confirm that local services remain unchanged")) + # Since n1 is a pruned node, the 'NETWORK' service flag must always be unset.) + self.assert_only_network_limited_service(n1)) +) + self.log.info("Check that UTXO-querying RPCs operate on snapshot chainstate")) + snapshot_hash = loaded['tip_hash']) + snapshot_num_coins = loaded['coins_loaded']) + # coinstatsindex might be not caught up yet and is not relevant for this test, so don't use it) + utxo_info = n1.gettxoutsetinfo(use_index=False)) + assert_equal(utxo_info['txouts'], snapshot_num_coins)) + assert_equal(utxo_info['height'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(utxo_info['bestblock'], snapshot_hash)) +) + # find coinbase output at snapshot height on node0 and scan for it on node1,) + # where the block is not available, but the snapshot was loaded successfully) + coinbase_tx = n0.getblock(snapshot_hash, verbosity=2)['tx'][0]) + assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, snapshot_hash)) + coinbase_output_descriptor = coinbase_tx['vout'][0]['scriptPubKey']['desc']) + scan_result = n1.scantxoutset('start', [coinbase_output_descriptor])) + assert_equal(scan_result['success'], True)) + assert_equal(scan_result['txouts'], snapshot_num_coins)) + assert_equal(scan_result['height'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(scan_result['bestblock'], snapshot_hash)) + scan_utxos = [(coin['txid'], coin['vout']) for coin in scan_result['unspents']]) + assert (coinbase_tx['txid'], 0) in scan_utxos) +) + txout_result = n1.gettxout(coinbase_tx['txid'], 0)) + assert_equal(txout_result['scriptPubKey']['desc'], coinbase_output_descriptor)) +) + def check_tx_counts(final: bool) -> None:) + """Check nTx and nChainTx intermediate values right after loading) + the snapshot, and final values after the snapshot is validated.""") + for height, block in blocks.items():) + tx = n1.getblockheader(block.hash)["nTx"]) + stats = n1.getchaintxstats(nblocks=1, blockhash=block.hash)) + chain_tx = stats.get("txcount", None)) + window_tx_count = stats.get("window_tx_count", None)) + tx_rate = stats.get("txrate", None)) + window_interval = stats.get("window_interval")) +) + # Intermediate nTx of the starting block should be set, but nTx of) + # later blocks should be 0 before they are downloaded.) + # The window_tx_count of one block is equal to the blocks tx count.) + # If the window tx count is unknown, the value is missing.) + # The tx_rate is calculated from window_tx_count and window_interval) + # when possible.) + if final or height == START_HEIGHT:) + assert_equal(tx, block.tx)) + assert_equal(window_tx_count, tx)) + if window_interval > 0:) + assert_approx(tx_rate, window_tx_count / window_interval, vspan=0.1)) + else:) + assert_equal(tx_rate, None)) + else:) + assert_equal(tx, 0)) + assert_equal(window_tx_count, None)) +) + # Intermediate nChainTx of the starting block and snapshot block) + # should be set, but others should be None until they are downloaded.) + if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT):) + assert_equal(chain_tx, block.chain_tx)) + else:) + assert_equal(chain_tx, None)) +) + check_tx_counts(final=False)) +) + normal, snapshot = n1.getchainstates()["chainstates"]) + assert_equal(normal['blocks'], START_HEIGHT)) + assert_equal(normal.get('snapshot_blockhash'), None)) + assert_equal(normal['validated'], True)) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash'])) + assert_equal(snapshot['validated'], False)) +) + assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT)) +) + self.log.info("Submit a stale block that forked off the chain before the snapshot")) + # Normally a block like this would not be downloaded, but if it is) + # submitted early before the background chain catches up to the fork) + # point, it winds up in m_blocks_unlinked and triggers a corner case) + # that previously crashed CheckBlockIndex.) + n1.submitblock(stale_block)) + n1.getchaintips()) + n1.getblock(stale_hash)) +) + self.log.info("Submit a spending transaction for a snapshot chainstate coin to the mempool")) + # spend the coinbase output of the first block that is not available on node1) + spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1)) + assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", n1.getblock, spend_coin_blockhash)) + prev_tx = n0.getblock(spend_coin_blockhash, 3)['tx'][0]) + prevout = {"txid": prev_tx['txid'], "vout": 0, "scriptPubKey": prev_tx['vout'][0]['scriptPubKey']['hex']}) + privkey = n0.get_deterministic_priv_key().key) + raw_tx = n1.createrawtransaction([prevout], {getnewdestination()[2]: 24.99})) + signed_tx = n1.signrawtransactionwithkey(raw_tx, [privkey], [prevout])['hex']) + signed_txid = tx_from_hex(signed_tx).rehash()) +) + assert n1.gettxout(prev_tx['txid'], 0) is not None) + n1.sendrawtransaction(signed_tx)) + assert signed_txid in n1.getrawmempool()) + assert not n1.gettxout(prev_tx['txid'], 0)) +) + PAUSE_HEIGHT = FINAL_HEIGHT - 40) +) + self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT)) + self.restart_node(1, extra_args=[) + f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]])) +) + # Upon restart during snapshot tip sync, the node must remain in 'limited' mode.) + self.assert_only_network_limited_service(n1)) +) + # Finally connect the nodes and let them sync.) + #) + # Set `wait_for_connect=False` to avoid a race between performing connection) + # assertions and the -stopatheight tripping.) + self.connect_nodes(0, 1, wait_for_connect=False)) +) + n1.wait_until_stopped(timeout=5)) +) + self.log.info("Checking that blocks are segmented on disk")) + assert self.has_blockfile(n1, "00000"), "normal blockfile missing") + assert self.has_blockfile(n1, "00001"), "assumed blockfile missing") + assert not self.has_blockfile(n1, "00002"), "too many blockfiles") +) + self.log.info("Restarted node before snapshot validation completed, reloading...")) + self.restart_node(1, extra_args=self.extra_args[1])) +) + # Upon restart, the node must remain in 'limited' mode) + self.assert_only_network_limited_service(n1)) +) + # Send snapshot block to n1 out of order. This makes the test less) + # realistic because normally the snapshot block is one of the last) + # blocks downloaded, but its useful to test because it triggers more) + # corner cases in ReceivedBlockTransactions() and CheckBlockIndex()) + # setting and testing nChainTx values, and it exposed previous bugs.) + snapshot_hash = n0.getblockhash(SNAPSHOT_BASE_HEIGHT)) + snapshot_block = n0.getblock(snapshot_hash, 0)) + n1.submitblock(snapshot_block)) +) + self.connect_nodes(0, 1)) +) + self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})")) + self.wait_until(lambda: n1.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT)) + self.sync_blocks(nodes=(n0, n1))) +) + self.log.info("Ensuring background validation completes")) + self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1)) +) + # Since n1 is a pruned node, it will not signal NODE_NETWORK after) + # completing the background sync.) + self.assert_only_network_limited_service(n1)) +) + # Ensure indexes have synced.) + completed_idx_state = {) + 'basic block filter index': COMPLETE_IDX,) + 'coinstatsindex': COMPLETE_IDX,) + }) + self.wait_until(lambda: n1.getindexinfo() == completed_idx_state)) +) + self.log.info("Re-check nTx and nChainTx values")) + check_tx_counts(final=True)) +) + for i in (0, 1):) + n = self.nodes[i]) + self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes")) + self.restart_node(i, extra_args=self.extra_args[i])) +) + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT)) +) + chainstate, = n.getchainstates()['chainstates']) + assert_equal(chainstate['blocks'], FINAL_HEIGHT)) +) + if i,0:) + # Ensure indexes have synced for the assumeutxo node) + self.wait_until(lambda: n.getindexinfo() == completed_idx_state)) +) +) + # Node 2: all indexes + reindex) + # -----------------------------) +) + self.log.info("-- Testing all indexes + reindex")) + assert_equal(n2.getblockcount(), START_HEIGHT)) + assert 'NETWORK' in n2.getnetworkinfo()['localservicesnames'] # sanity check) +) + self.log.info(f"Loading snapshot into third node from {dump_output['path']}")) + loaded = n2.loadtxoutset(dump_output['path'])) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)) +) + # Even though n2 is a full node, it will unset the 'NETWORK' service flag during snapshot loading.) + # This indicates other peers that the node will temporarily not provide historical blocks.) + self.log.info("Check node2 updated the local services during snapshot load")) + self.assert_only_network_limited_service(n2)) +) + for reindex_arg in ['-reindex=1', '-reindex-chainstate=1']:) + self.log.info(f"Check that restarting with {reindex_arg} will delete the snapshot chainstate")) + self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]])) + assert_equal(1, len(n2.getchainstates()["chainstates"]))) + for i in range(1, 300):) + block = n0.getblock(n0.getblockhash(i), 0)) + n2.submitheader(block)) + loaded = n2.loadtxoutset(dump_output['path'])) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT)) +) + normal, snapshot = n2.getchainstates()['chainstates']) + assert_equal(normal['blocks'], START_HEIGHT)) + assert_equal(normal.get('snapshot_blockhash'), None)) + assert_equal(normal['validated'], True)) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT)) + assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash'])) + assert_equal(snapshot['validated'], False)) +) + self.log.info("Check that loading the snapshot again will fail because there is already an active snapshot.")) + msg = "Unable to load UTXO snapshot: Can't activate a snapshot-based chainstate more than once") + assert_raises_rpc_error(-32603, msg, n2.loadtxoutset, dump_output['path'])) +) + # Upon restart, the node must stay in 'limited' mode until the background) + # chain sync completes.) + self.restart_node(2, extra_args=self.extra_args[2])) + self.assert_only_network_limited_service(n2)) +) + self.connect_nodes(0, 2)) + self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT)) + self.sync_blocks(nodes=(n0, n2))) +) + self.log.info("Ensuring background validation completes")) + self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1)) +) + # Once background chain sync completes, the full node must start offering historical blocks again.) + self.wait_until(lambda: {'NETWORK', 'NETWORK_LIMITED'}.issubset(n2.getnetworkinfo()['localservicesnames']))) +) + completed_idx_state = {) + 'basic block filter index': COMPLETE_IDX,) + 'coinstatsindex': COMPLETE_IDX,) + 'txindex': COMPLETE_IDX,) + }) + self.wait_until(lambda: n2.getindexinfo() == completed_idx_state)) +) + for i in (0, 2):) + n = self.nodes[i]) + self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes")) + self.restart_node(i, extra_args=self.extra_args[i])) +) + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT)) +) + chainstate, = n.getchainstates()['chainstates']) + assert_equal(chainstate['blocks'], FINAL_HEIGHT)) +) + if i,0:) + # Ensure indexes have synced for the assumeutxo node) + self.wait_until(lambda: n.getindexinfo() == completed_idx_state)) +) + self.log.info("Test -reindex-chainstate of an assumeutxo-synced node")) + self.restart_node(2, extra_args=[) + '-reindex-chainstate=1', *self.extra_args[2]])) + assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT)) + self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT)) +) + self.log.info("Test -reindex of an assumeutxo-synced node")) + self.restart_node(2, extra_args=['-reindex=1', *self.extra_args[2]])) + self.connect_nodes(0, 2)) + self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT)) +) + self.test_snapshot_in_a_divergent_chain(dump_output['path'])) +) + # The following test cleans node2 and node3 chain directories.) + self.test_sync_from_assumeutxo_node(snapshot=dump_output)) +) +@dataclass) +class Block:) + hash: str) + tx: int) + chain_tx: int) +) +if __name__ == '__main__':) + AssumeutxoTest(__file__).main()) diff --git a/test/functional/feature_bind_port_discover.py b/test/functional/feature_bind_port_discover.py index 6046ba5e24ba8e..8ff31bdf35efcd 100755 --- a/test/functional/feature_bind_port_discover.py +++ b/test/functional/feature_bind_port_discover.py @@ -1,81 +1,81 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020-2021 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -""" -Test that -discover does not add all interfaces' addresses if we listen on only some of them -""" - -from test_framework.test_framework import BitcoinTestFramework, SkipTest -from test_framework.util import ( - assert_equal, - assert_not_equal, +#!/usr/bin/env python3) +# Copyright (c) 2020-2021 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +""") +Test that -discover does not add all interfaces' addresses if we listen on only some of them) +""") ) - -# We need to bind to a routable address for this test to exercise the relevant code -# and also must have another routable address on another interface which must not -# be named "lo" or "lo0". -# To set these routable addresses on the machine, use: -# Linux: -# ifconfig lo:0 1.1.1.1/32 up && ifconfig lo:1 2.2.2.2/32 up # to set up -# ifconfig lo:0 down && ifconfig lo:1 down # to remove it, after the test -# FreeBSD: -# ifconfig em0 1.1.1.1/32 alias && ifconfig wlan0 2.2.2.2/32 alias # to set up -# ifconfig em0 1.1.1.1 -alias && ifconfig wlan0 2.2.2.2 -alias # to remove it, after the test -ADDR1 = '1.1.1.1' -ADDR2 = '2.2.2.2' - -BIND_PORT = 31001 - -class BindPortDiscoverTest(BitcoinTestFramework): - def set_test_params(self): - # Avoid any -bind= on the command line. Force the framework to avoid adding -bind=127.0.0.1. - self.setup_clean_chain = True - self.bind_to_localhost_only = False - self.extra_args = [ - ['-discover', f'-port={BIND_PORT}'], # bind on any - ['-discover', f'-bind={ADDR1}:{BIND_PORT}'], - ] - self.num_nodes = len(self.extra_args) - - def add_options(self, parser): - parser.add_argument( - "--ihave1111and2222", action='store_true', dest="ihave1111and2222", - help=f"Run the test, assuming {ADDR1} and {ADDR2} are configured on the machine", - default=False) - - def skip_test_if_missing_module(self): - if not self.options.ihave1111and2222: - raise SkipTest( - f"To run this test make sure that {ADDR1} and {ADDR2} (routable addresses) are " - "assigned to the interfaces on this machine and rerun with --ihave1111and2222") - - def run_test(self): - self.log.info( - "Test that if -bind= is not passed then all addresses are " - "added to localaddresses") - found_addr1 = False - found_addr2 = False - for local in self.nodes[0].getnetworkinfo()['localaddresses']: - if local['address'] == ADDR1: - found_addr1 = True - assert_equal(local['port'], BIND_PORT) - if local['address'] == ADDR2: - found_addr2 = True - assert_equal(local['port'], BIND_PORT) - assert found_addr1 - assert found_addr2 - - self.log.info( - "Test that if -bind= is passed then only that address is " - "added to localaddresses") - found_addr1 = False - for local in self.nodes[1].getnetworkinfo()['localaddresses']: - if local['address'] == ADDR1: - found_addr1 = True - assert_equal(local['port'], BIND_PORT) - assert local['address'] != ADDR2 - assert found_addr1 - -if __name__ == '__main__': - BindPortDiscoverTest(__file__).main() +from test_framework.test_framework import BitcoinTestFramework, SkipTest) +from test_framework.util import () + assert_equal,) + assert_not_equal,) +)) +) +# We need to bind to a routable address for this test to exercise the relevant code) +# and also must have another routable address on another interface which must not) +# be named "lo" or "lo0".) +# To set these routable addresses on the machine, use:) +# Linux:) +# ifconfig lo:0 1.1.1.1/32 up && ifconfig lo:1 2.2.2.2/32 up # to set up) +# ifconfig lo:0 down && ifconfig lo:1 down # to remove it, after the test) +# FreeBSD:) +# ifconfig em0 1.1.1.1/32 alias && ifconfig wlan0 2.2.2.2/32 alias # to set up) +# ifconfig em0 1.1.1.1 -alias && ifconfig wlan0 2.2.2.2 -alias # to remove it, after the test) +ADDR1 = '1.1.1.1') +ADDR2 = '2.2.2.2') +) +BIND_PORT = 31001) +) +class BindPortDiscoverTest(BitcoinTestFramework):) + def set_test_params(self):) + # Avoid any -bind= on the command line. Force the framework to avoid adding -bind=127.0.0.1.) + self.setup_clean_chain = True) + self.bind_to_localhost_only = False) + self.extra_args = [) + ['-discover', f'-port={BIND_PORT}'], # bind on any) + ['-discover', f'-bind={ADDR1}:{BIND_PORT}'],) + ]) + self.num_nodes = len(self.extra_args)) +) + def add_options(self, parser):) + parser.add_argument() + "--ihave1111and2222", action='store_true', dest="ihave1111and2222",) + help=f"Run the test, assuming {ADDR1} and {ADDR2} are configured on the machine",) + default=False)) +) + def skip_test_if_missing_module(self):) + if not self.options.ihave1111and2222:) + raise SkipTest() + f"To run this test make sure that {ADDR1} and {ADDR2} (routable addresses) are ") + "assigned to the interfaces on this machine and rerun with --ihave1111and2222")) +) + def run_test(self):) + self.log.info() + "Test that if -bind= is not passed then all addresses are ") + "added to localaddresses")) + found_addr1 = False) + found_addr2 = False) + for local in self.nodes[0].getnetworkinfo()['localaddresses']:) + if local['address'] == ADDR1:) + found_addr1 = True) + assert_equal(local['port'], BIND_PORT)) + if local['address'] == ADDR2:) + found_addr2 = True) + assert_equal(local['port'], BIND_PORT)) + assert found_addr1) + assert found_addr2) +) + self.log.info() + "Test that if -bind= is passed then only that address is ") + "added to localaddresses")) + found_addr1 = False) + for local in self.nodes[1].getnetworkinfo()['localaddresses']:) + if local['address'] == ADDR1:) + found_addr1 = True) + assert_equal(local['port'], BIND_PORT)) + assert_not_equal(local['address'], ADDR2)) + assert found_addr1) +) +if __name__ == '__main__':) + BindPortDiscoverTest(__file__).main()) diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py index 45db3d1824df5f..12fa7166f939ad 100755 --- a/test/functional/feature_coinstatsindex.py +++ b/test/functional/feature_coinstatsindex.py @@ -1,328 +1,328 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test coinstatsindex across nodes. - -Test that the values returned by gettxoutsetinfo are consistent -between a node running the coinstatsindex and a node without -the index. -""" - -from decimal import Decimal - -from test_framework.blocktools import ( - COINBASE_MATURITY, - create_block, - create_coinbase, -) -from test_framework.messages import ( - COIN, - CTxOut, -) -from test_framework.script import ( - CScript, - OP_FALSE, - OP_RETURN, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_raises_rpc_error, -) -from test_framework.wallet import ( - MiniWallet, - getnewdestination, -) - - -class CoinStatsIndexTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.supports_cli = False - self.extra_args = [ - [], - ["-coinstatsindex"] - ] - - def run_test(self): - self.wallet = MiniWallet(self.nodes[0]) - self._test_coin_stats_index() - self._test_use_index_option() - self._test_reorg_index() - self._test_index_rejects_hash_serialized() - self._test_init_index_after_reorg() - - def block_sanity_check(self, block_info): - block_subsidy = 50 - assert_equal( - block_info['prevout_spent'] + block_subsidy, - block_info['new_outputs_ex_coinbase'] + block_info['coinbase'] + block_info['unspendable'] - ) - - def sync_index_node(self): - self.wait_until(lambda: self.nodes[1].getindexinfo()['coinstatsindex']['synced'] is True) - - def _test_coin_stats_index(self): - node = self.nodes[0] - index_node = self.nodes[1] - # Both none and muhash options allow the usage of the index - index_hash_options = ['none', 'muhash'] - - # Generate a normal transaction and mine it - self.generate(self.wallet, COINBASE_MATURITY + 1) - self.wallet.send_self_transfer(from_node=node) - self.generate(node, 1) - - self.log.info("Test that gettxoutsetinfo() output is consistent with or without coinstatsindex option") - res0 = node.gettxoutsetinfo('none') - - # The fields 'disk_size' and 'transactions' do not exist on the index - del res0['disk_size'], res0['transactions'] - - for hash_option in index_hash_options: - res1 = index_node.gettxoutsetinfo(hash_option) - # The fields 'block_info' and 'total_unspendable_amount' only exist on the index - del res1['block_info'], res1['total_unspendable_amount'] - res1.pop('muhash', None) - - # Everything left should be the same - assert_equal(res1, res0) - - self.log.info("Test that gettxoutsetinfo() can get fetch data on specific heights with index") - - # Generate a new tip - self.generate(node, 5) - - for hash_option in index_hash_options: - # Fetch old stats by height - res2 = index_node.gettxoutsetinfo(hash_option, 102) - del res2['block_info'], res2['total_unspendable_amount'] - res2.pop('muhash', None) - assert_equal(res0, res2) - - # Fetch old stats by hash - res3 = index_node.gettxoutsetinfo(hash_option, res0['bestblock']) - del res3['block_info'], res3['total_unspendable_amount'] - res3.pop('muhash', None) - assert_equal(res0, res3) - - # It does not work without coinstatsindex - assert_raises_rpc_error(-8, "Querying specific block heights requires coinstatsindex", node.gettxoutsetinfo, hash_option, 102) - - self.log.info("Test gettxoutsetinfo() with index and verbose flag") - - for hash_option in index_hash_options: - # Genesis block is unspendable - res4 = index_node.gettxoutsetinfo(hash_option, 0) - assert_equal(res4['total_unspendable_amount'], 50) - assert_equal(res4['block_info'], { - 'unspendable': 50, - 'prevout_spent': 0, - 'new_outputs_ex_coinbase': 0, - 'coinbase': 0, - 'unspendables': { - 'genesis_block': 50, - 'bip30': 0, - 'scripts': 0, - 'unclaimed_rewards': 0 - } - }) - self.block_sanity_check(res4['block_info']) - - # Test an older block height that included a normal tx - res5 = index_node.gettxoutsetinfo(hash_option, 102) - assert_equal(res5['total_unspendable_amount'], 50) - assert_equal(res5['block_info'], { - 'unspendable': 0, - 'prevout_spent': 50, - 'new_outputs_ex_coinbase': Decimal('49.99968800'), - 'coinbase': Decimal('50.00031200'), - 'unspendables': { - 'genesis_block': 0, - 'bip30': 0, - 'scripts': 0, - 'unclaimed_rewards': 0, - } - }) - self.block_sanity_check(res5['block_info']) - - # Generate and send a normal tx with two outputs - tx1 = self.wallet.send_to( - from_node=node, - scriptPubKey=self.wallet.get_scriptPubKey(), - amount=21 * COIN, - ) - - # Find the right position of the 21 BTC output - tx1_out_21 = self.wallet.get_utxo(txid=tx1["txid"], vout=tx1["sent_vout"]) - - # Generate and send another tx with an OP_RETURN output (which is unspendable) - tx2 = self.wallet.create_self_transfer(utxo_to_spend=tx1_out_21)['tx'] - tx2_val = '20.99' - tx2.vout = [CTxOut(int(Decimal(tx2_val) * COIN), CScript([OP_RETURN] + [OP_FALSE] * 30))] - tx2_hex = tx2.serialize().hex() - self.nodes[0].sendrawtransaction(tx2_hex, 0, tx2_val) - - # Include both txs in a block - self.generate(self.nodes[0], 1) - - for hash_option in index_hash_options: - # Check all amounts were registered correctly - res6 = index_node.gettxoutsetinfo(hash_option, 108) - assert_equal(res6['total_unspendable_amount'], Decimal('70.99000000')) - assert_equal(res6['block_info'], { - 'unspendable': Decimal('20.99000000'), - 'prevout_spent': 71, - 'new_outputs_ex_coinbase': Decimal('49.99999000'), - 'coinbase': Decimal('50.01001000'), - 'unspendables': { - 'genesis_block': 0, - 'bip30': 0, - 'scripts': Decimal('20.99000000'), - 'unclaimed_rewards': 0, - } - }) - self.block_sanity_check(res6['block_info']) - - # Create a coinbase that does not claim full subsidy and also - # has two outputs - cb = create_coinbase(109, nValue=35) - cb.vout.append(CTxOut(5 * COIN, CScript([OP_FALSE]))) - cb.rehash() - - # Generate a block that includes previous coinbase - tip = self.nodes[0].getbestblockhash() - block_time = self.nodes[0].getblock(tip)['time'] + 1 - block = create_block(int(tip, 16), cb, block_time) - block.solve() - self.nodes[0].submitblock(block.serialize().hex()) - self.sync_all() - - for hash_option in index_hash_options: - res7 = index_node.gettxoutsetinfo(hash_option, 109) - assert_equal(res7['total_unspendable_amount'], Decimal('80.99000000')) - assert_equal(res7['block_info'], { - 'unspendable': 10, - 'prevout_spent': 0, - 'new_outputs_ex_coinbase': 0, - 'coinbase': 40, - 'unspendables': { - 'genesis_block': 0, - 'bip30': 0, - 'scripts': 0, - 'unclaimed_rewards': 10 - } - }) - self.block_sanity_check(res7['block_info']) - - self.log.info("Test that the index is robust across restarts") - - res8 = index_node.gettxoutsetinfo('muhash') - self.restart_node(1, extra_args=self.extra_args[1]) - res9 = index_node.gettxoutsetinfo('muhash') - assert_equal(res8, res9) - - self.generate(index_node, 1, sync_fun=self.no_op) - res10 = index_node.gettxoutsetinfo('muhash') - assert res8['txouts'] < res10['txouts'] - - self.log.info("Test that the index works with -reindex") - - self.restart_node(1, extra_args=["-coinstatsindex", "-reindex"]) - self.sync_index_node() - res11 = index_node.gettxoutsetinfo('muhash') - assert_equal(res11, res10) - - self.log.info("Test that the index works with -reindex-chainstate") - - self.restart_node(1, extra_args=["-coinstatsindex", "-reindex-chainstate"]) - self.sync_index_node() - res12 = index_node.gettxoutsetinfo('muhash') - assert_equal(res12, res10) - - self.log.info("Test obtaining info for a non-existent block hash") - assert_raises_rpc_error(-5, "Block not found", index_node.gettxoutsetinfo, hash_type="none", hash_or_height="ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", use_index=True) - - def _test_use_index_option(self): - self.log.info("Test use_index option for nodes running the index") - - self.connect_nodes(0, 1) - self.nodes[0].waitforblockheight(110) - res = self.nodes[0].gettxoutsetinfo('muhash') - option_res = self.nodes[1].gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False) - del res['disk_size'], option_res['disk_size'] - assert_equal(res, option_res) - - def _test_reorg_index(self): - self.log.info("Test that index can handle reorgs") - - # Generate two block, let the index catch up, then invalidate the blocks - index_node = self.nodes[1] - reorg_blocks = self.generatetoaddress(index_node, 2, getnewdestination()[2]) - reorg_block = reorg_blocks[1] - self.sync_index_node() - res_invalid = index_node.gettxoutsetinfo('muhash') - index_node.invalidateblock(reorg_blocks[0]) - assert_equal(index_node.gettxoutsetinfo('muhash')['height'], 110) - - # Add two new blocks - block = self.generate(index_node, 2, sync_fun=self.no_op)[1] - res = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False) - - # Test that the result of the reorged block is not returned for its old block height - res2 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=112) - assert_equal(res["bestblock"], block) - assert_equal(res["muhash"], res2["muhash"]) - assert res["muhash"] != res_invalid["muhash"] - - # Test that requesting reorged out block by hash is still returning correct results - res_invalid2 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=reorg_block) - assert_equal(res_invalid2["muhash"], res_invalid["muhash"]) - assert res["muhash"] != res_invalid2["muhash"] - - # Add another block, so we don't depend on reconsiderblock remembering which - # blocks were touched by invalidateblock - self.generate(index_node, 1) - - # Ensure that removing and re-adding blocks yields consistent results - block = index_node.getblockhash(99) - index_node.invalidateblock(block) - index_node.reconsiderblock(block) - res3 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=112) - assert_equal(res2, res3) - - def _test_index_rejects_hash_serialized(self): - self.log.info("Test that the rpc raises if the legacy hash is passed with the index") - - msg = "hash_serialized_3 hash type cannot be queried for a specific block" - assert_raises_rpc_error(-8, msg, self.nodes[1].gettxoutsetinfo, hash_type='hash_serialized_3', hash_or_height=111) - - for use_index in {True, False, None}: - assert_raises_rpc_error(-8, msg, self.nodes[1].gettxoutsetinfo, hash_type='hash_serialized_3', hash_or_height=111, use_index=use_index) - - def _test_init_index_after_reorg(self): - self.log.info("Test a reorg while the index is deactivated") - index_node = self.nodes[1] - block = self.nodes[0].getbestblockhash() - self.generate(index_node, 2, sync_fun=self.no_op) - self.sync_index_node() - - # Restart without index - self.restart_node(1, extra_args=[]) - self.connect_nodes(0, 1) - index_node.invalidateblock(block) - self.generatetoaddress(index_node, 5, getnewdestination()[2]) - res = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False) - - # Restart with index that still has its best block on the old chain - self.restart_node(1, extra_args=self.extra_args[1]) - self.sync_index_node() - res1 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=True) - assert_equal(res["muhash"], res1["muhash"]) - - -if __name__ == '__main__': - CoinStatsIndexTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2020-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test coinstatsindex across nodes.) +) +Test that the values returned by gettxoutsetinfo are consistent) +between a node running the coinstatsindex and a node without) +the index.) +""") +) +from decimal import Decimal) +) +from test_framework.blocktools import () + COINBASE_MATURITY,) + create_block,) + create_coinbase,) +)) +from test_framework.messages import () + COIN,) + CTxOut,) +)) +from test_framework.script import () + CScript,) + OP_FALSE,) + OP_RETURN,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_raises_rpc_error,) +)) +from test_framework.wallet import () + MiniWallet,) + getnewdestination,) +)) +) +) +class CoinStatsIndexTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 2) + self.supports_cli = False) + self.extra_args = [) + [],) + ["-coinstatsindex"]) + ]) +) + def run_test(self):) + self.wallet = MiniWallet(self.nodes[0])) + self._test_coin_stats_index()) + self._test_use_index_option()) + self._test_reorg_index()) + self._test_index_rejects_hash_serialized()) + self._test_init_index_after_reorg()) +) + def block_sanity_check(self, block_info):) + block_subsidy = 50) + assert_equal() + block_info['prevout_spent'] + block_subsidy,) + block_info['new_outputs_ex_coinbase'] + block_info['coinbase'] + block_info['unspendable']) + )) +) + def sync_index_node(self):) + self.wait_until(lambda: self.nodes[1].getindexinfo()['coinstatsindex']['synced'] is True)) +) + def _test_coin_stats_index(self):) + node = self.nodes[0]) + index_node = self.nodes[1]) + # Both none and muhash options allow the usage of the index) + index_hash_options = ['none', 'muhash']) +) + # Generate a normal transaction and mine it) + self.generate(self.wallet, COINBASE_MATURITY + 1)) + self.wallet.send_self_transfer(from_node=node)) + self.generate(node, 1)) +) + self.log.info("Test that gettxoutsetinfo() output is consistent with or without coinstatsindex option")) + res0 = node.gettxoutsetinfo('none')) +) + # The fields 'disk_size' and 'transactions' do not exist on the index) + del res0['disk_size'], res0['transactions']) +) + for hash_option in index_hash_options:) + res1 = index_node.gettxoutsetinfo(hash_option)) + # The fields 'block_info' and 'total_unspendable_amount' only exist on the index) + del res1['block_info'], res1['total_unspendable_amount']) + res1.pop('muhash', None)) +) + # Everything left should be the same) + assert_equal(res1, res0)) +) + self.log.info("Test that gettxoutsetinfo() can get fetch data on specific heights with index")) +) + # Generate a new tip) + self.generate(node, 5)) +) + for hash_option in index_hash_options:) + # Fetch old stats by height) + res2 = index_node.gettxoutsetinfo(hash_option, 102)) + del res2['block_info'], res2['total_unspendable_amount']) + res2.pop('muhash', None)) + assert_equal(res0, res2)) +) + # Fetch old stats by hash) + res3 = index_node.gettxoutsetinfo(hash_option, res0['bestblock'])) + del res3['block_info'], res3['total_unspendable_amount']) + res3.pop('muhash', None)) + assert_equal(res0, res3)) +) + # It does not work without coinstatsindex) + assert_raises_rpc_error(-8, "Querying specific block heights requires coinstatsindex", node.gettxoutsetinfo, hash_option, 102)) +) + self.log.info("Test gettxoutsetinfo() with index and verbose flag")) +) + for hash_option in index_hash_options:) + # Genesis block is unspendable) + res4 = index_node.gettxoutsetinfo(hash_option, 0)) + assert_equal(res4['total_unspendable_amount'], 50)) + assert_equal(res4['block_info'], {) + 'unspendable': 50,) + 'prevout_spent': 0,) + 'new_outputs_ex_coinbase': 0,) + 'coinbase': 0,) + 'unspendables': {) + 'genesis_block': 50,) + 'bip30': 0,) + 'scripts': 0,) + 'unclaimed_rewards': 0) + }) + })) + self.block_sanity_check(res4['block_info'])) +) + # Test an older block height that included a normal tx) + res5 = index_node.gettxoutsetinfo(hash_option, 102)) + assert_equal(res5['total_unspendable_amount'], 50)) + assert_equal(res5['block_info'], {) + 'unspendable': 0,) + 'prevout_spent': 50,) + 'new_outputs_ex_coinbase': Decimal('49.99968800'),) + 'coinbase': Decimal('50.00031200'),) + 'unspendables': {) + 'genesis_block': 0,) + 'bip30': 0,) + 'scripts': 0,) + 'unclaimed_rewards': 0,) + }) + })) + self.block_sanity_check(res5['block_info'])) +) + # Generate and send a normal tx with two outputs) + tx1 = self.wallet.send_to() + from_node=node,) + scriptPubKey=self.wallet.get_scriptPubKey(),) + amount=21 * COIN,) + )) +) + # Find the right position of the 21 BTC output) + tx1_out_21 = self.wallet.get_utxo(txid=tx1["txid"], vout=tx1["sent_vout"])) +) + # Generate and send another tx with an OP_RETURN output (which is unspendable)) + tx2 = self.wallet.create_self_transfer(utxo_to_spend=tx1_out_21)['tx']) + tx2_val = '20.99') + tx2.vout = [CTxOut(int(Decimal(tx2_val) * COIN), CScript([OP_RETURN] + [OP_FALSE] * 30))]) + tx2_hex = tx2.serialize().hex()) + self.nodes[0].sendrawtransaction(tx2_hex, 0, tx2_val)) +) + # Include both txs in a block) + self.generate(self.nodes[0], 1)) +) + for hash_option in index_hash_options:) + # Check all amounts were registered correctly) + res6 = index_node.gettxoutsetinfo(hash_option, 108)) + assert_equal(res6['total_unspendable_amount'], Decimal('70.99000000'))) + assert_equal(res6['block_info'], {) + 'unspendable': Decimal('20.99000000'),) + 'prevout_spent': 71,) + 'new_outputs_ex_coinbase': Decimal('49.99999000'),) + 'coinbase': Decimal('50.01001000'),) + 'unspendables': {) + 'genesis_block': 0,) + 'bip30': 0,) + 'scripts': Decimal('20.99000000'),) + 'unclaimed_rewards': 0,) + }) + })) + self.block_sanity_check(res6['block_info'])) +) + # Create a coinbase that does not claim full subsidy and also) + # has two outputs) + cb = create_coinbase(109, nValue=35)) + cb.vout.append(CTxOut(5 * COIN, CScript([OP_FALSE])))) + cb.rehash()) +) + # Generate a block that includes previous coinbase) + tip = self.nodes[0].getbestblockhash()) + block_time = self.nodes[0].getblock(tip)['time'] + 1) + block = create_block(int(tip, 16), cb, block_time)) + block.solve()) + self.nodes[0].submitblock(block.serialize().hex())) + self.sync_all()) +) + for hash_option in index_hash_options:) + res7 = index_node.gettxoutsetinfo(hash_option, 109)) + assert_equal(res7['total_unspendable_amount'], Decimal('80.99000000'))) + assert_equal(res7['block_info'], {) + 'unspendable': 10,) + 'prevout_spent': 0,) + 'new_outputs_ex_coinbase': 0,) + 'coinbase': 40,) + 'unspendables': {) + 'genesis_block': 0,) + 'bip30': 0,) + 'scripts': 0,) + 'unclaimed_rewards': 10) + }) + })) + self.block_sanity_check(res7['block_info'])) +) + self.log.info("Test that the index is robust across restarts")) +) + res8 = index_node.gettxoutsetinfo('muhash')) + self.restart_node(1, extra_args=self.extra_args[1])) + res9 = index_node.gettxoutsetinfo('muhash')) + assert_equal(res8, res9)) +) + self.generate(index_node, 1, sync_fun=self.no_op)) + res10 = index_node.gettxoutsetinfo('muhash')) + assert res8['txouts'] < res10['txouts']) +) + self.log.info("Test that the index works with -reindex")) +) + self.restart_node(1, extra_args=["-coinstatsindex", "-reindex"])) + self.sync_index_node()) + res11 = index_node.gettxoutsetinfo('muhash')) + assert_equal(res11, res10)) +) + self.log.info("Test that the index works with -reindex-chainstate")) +) + self.restart_node(1, extra_args=["-coinstatsindex", "-reindex-chainstate"])) + self.sync_index_node()) + res12 = index_node.gettxoutsetinfo('muhash')) + assert_equal(res12, res10)) +) + self.log.info("Test obtaining info for a non-existent block hash")) + assert_raises_rpc_error(-5, "Block not found", index_node.gettxoutsetinfo, hash_type="none", hash_or_height="ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", use_index=True)) +) + def _test_use_index_option(self):) + self.log.info("Test use_index option for nodes running the index")) +) + self.connect_nodes(0, 1)) + self.nodes[0].waitforblockheight(110)) + res = self.nodes[0].gettxoutsetinfo('muhash')) + option_res = self.nodes[1].gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False)) + del res['disk_size'], option_res['disk_size']) + assert_equal(res, option_res)) +) + def _test_reorg_index(self):) + self.log.info("Test that index can handle reorgs")) +) + # Generate two block, let the index catch up, then invalidate the blocks) + index_node = self.nodes[1]) + reorg_blocks = self.generatetoaddress(index_node, 2, getnewdestination()[2])) + reorg_block = reorg_blocks[1]) + self.sync_index_node()) + res_invalid = index_node.gettxoutsetinfo('muhash')) + index_node.invalidateblock(reorg_blocks[0])) + assert_equal(index_node.gettxoutsetinfo('muhash')['height'], 110)) +) + # Add two new blocks) + block = self.generate(index_node, 2, sync_fun=self.no_op)[1]) + res = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False)) +) + # Test that the result of the reorged block is not returned for its old block height) + res2 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=112)) + assert_equal(res["bestblock"], block)) + assert_equal(res["muhash"], res2["muhash"])) + assert_not_equal(res["muhash"], res_invalid["muhash"])) +) + # Test that requesting reorged out block by hash is still returning correct results) + res_invalid2 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=reorg_block)) + assert_equal(res_invalid2["muhash"], res_invalid["muhash"])) + assert_not_equal(res["muhash"], res_invalid2["muhash"])) +) + # Add another block, so we don't depend on reconsiderblock remembering which) + # blocks were touched by invalidateblock) + self.generate(index_node, 1)) +) + # Ensure that removing and re-adding blocks yields consistent results) + block = index_node.getblockhash(99)) + index_node.invalidateblock(block)) + index_node.reconsiderblock(block)) + res3 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=112)) + assert_equal(res2, res3)) +) + def _test_index_rejects_hash_serialized(self):) + self.log.info("Test that the rpc raises if the legacy hash is passed with the index")) +) + msg = "hash_serialized_3 hash type cannot be queried for a specific block") + assert_raises_rpc_error(-8, msg, self.nodes[1].gettxoutsetinfo, hash_type='hash_serialized_3', hash_or_height=111)) +) + for use_index in {True, False, None}:) + assert_raises_rpc_error(-8, msg, self.nodes[1].gettxoutsetinfo, hash_type='hash_serialized_3', hash_or_height=111, use_index=use_index)) +) + def _test_init_index_after_reorg(self):) + self.log.info("Test a reorg while the index is deactivated")) + index_node = self.nodes[1]) + block = self.nodes[0].getbestblockhash()) + self.generate(index_node, 2, sync_fun=self.no_op)) + self.sync_index_node()) +) + # Restart without index) + self.restart_node(1, extra_args=[])) + self.connect_nodes(0, 1)) + index_node.invalidateblock(block)) + self.generatetoaddress(index_node, 5, getnewdestination()[2])) + res = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False)) +) + # Restart with index that still has its best block on the old chain) + self.restart_node(1, extra_args=self.extra_args[1])) + self.sync_index_node()) + res1 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=True)) + assert_equal(res["muhash"], res1["muhash"])) +) +) +if __name__ == '__main__':) + CoinStatsIndexTest(__file__).main()) diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index f4bae533357d03..4810385834af78 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -1,290 +1,290 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test recovery from a crash during chainstate writing. - -- 4 nodes - * node0, node1, and node2 will have different dbcrash ratios, and different - dbcache sizes - * node3 will be a regular node, with no crashing. - * The nodes will not connect to each other. - -- use default test framework starting chain. initialize starting_tip_height to - tip height. - -- Main loop: - * generate lots of transactions on node3, enough to fill up a block. - * uniformly randomly pick a tip height from starting_tip_height to - tip_height; with probability 1/(height_difference+4), invalidate this block. - * mine enough blocks to overtake tip_height at start of loop. - * for each node in [node0,node1,node2]: - - for each mined block: - * submit block to node - * if node crashed on/after submitting: - - restart until recovery succeeds - - check that utxo matches node3 using gettxoutsetinfo""" - -import errno -import http.client -import random -import time - -from test_framework.blocktools import COINBASE_MATURITY -from test_framework.messages import ( - COIN, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, -) -from test_framework.wallet import ( - MiniWallet, - getnewdestination, -) - - -class ChainstateWriteCrashTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 4 - self.rpc_timeout = 480 - self.supports_cli = False - - # Set -maxmempool=0 to turn off mempool memory sharing with dbcache - self.base_args = [ - "-limitdescendantsize=0", - "-maxmempool=0", - "-dbbatchsize=200000", - ] - - # Set different crash ratios and cache sizes. Note that not all of - # -dbcache goes to the in-memory coins cache. - self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args - self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args - self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args - - # Node3 is a normal node with default args, except will mine full blocks - # and txs with "dust" outputs - self.node3_args = ["-blockmaxweight=4000000", "-dustrelayfee=0"] - self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] - - def setup_network(self): - self.add_nodes(self.num_nodes, extra_args=self.extra_args) - self.start_nodes() - # Leave them unconnected, we'll use submitblock directly in this test - - def restart_node(self, node_index, expected_tip): - """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. - - Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up - after 60 seconds. Returns the utxo hash of the given node.""" - - time_start = time.time() - while time.time() - time_start < 120: - try: - # Any of these RPC calls could throw due to node crash - self.start_node(node_index) - self.nodes[node_index].waitforblock(expected_tip) - utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_3'] - return utxo_hash - except Exception: - # An exception here should mean the node is about to crash. - # If bitcoind exits, then try again. wait_for_node_exit() - # should raise an exception if bitcoind doesn't exit. - self.wait_for_node_exit(node_index, timeout=10) - self.crashed_on_restart += 1 - time.sleep(1) - - # If we got here, bitcoind isn't coming back up on restart. Could be a - # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- - # perhaps we generated a test case that blew up our cache? - # TODO: If this happens a lot, we should try to restart without -dbcrashratio - # and make sure that recovery happens. - raise AssertionError(f"Unable to successfully restart node {node_index} in allotted time") - - def submit_block_catch_error(self, node_index, block): - """Try submitting a block to the given node. - - Catch any exceptions that indicate the node has crashed. - Returns true if the block was submitted successfully; false otherwise.""" - - try: - self.nodes[node_index].submitblock(block) - return True - except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e: - self.log.debug(f"node {node_index} submitblock raised exception: {e}") - return False - except OSError as e: - self.log.debug(f"node {node_index} submitblock raised OSError exception: errno={e.errno}") - if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: - # The node has likely crashed - return False - else: - # Unexpected exception, raise - raise - - def sync_node3blocks(self, block_hashes): - """Use submitblock to sync node3's chain with the other nodes - - If submitblock fails, restart the node and get the new utxo hash. - If any nodes crash while updating, we'll compare utxo hashes to - ensure recovery was successful.""" - - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_3'] - - # Retrieve all the blocks from node3 - blocks = [] - for block_hash in block_hashes: - blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)]) - - # Deliver each block to each other node - for i in range(3): - nodei_utxo_hash = None - self.log.debug(f"Syncing blocks to node {i}") - for (block_hash, block) in blocks: - # Get the block from node3, and submit to node_i - self.log.debug(f"submitting block {block_hash}") - if not self.submit_block_catch_error(i, block): - # TODO: more carefully check that the crash is due to -dbcrashratio - # (change the exit code perhaps, and check that here?) - self.wait_for_node_exit(i, timeout=30) - self.log.debug(f"Restarting node {i} after block hash {block_hash}") - nodei_utxo_hash = self.restart_node(i, block_hash) - assert nodei_utxo_hash is not None - self.restart_counts[i] += 1 - else: - # Clear it out after successful submitblock calls -- the cached - # utxo hash will no longer be correct - nodei_utxo_hash = None - - # Check that the utxo hash matches node3's utxo set - # NOTE: we only check the utxo set if we had to restart the node - # after the last block submitted: - # - checking the utxo hash causes a cache flush, which we don't - # want to do every time; so - # - we only update the utxo cache after a node restart, since flushing - # the cache is a no-op at that point - if nodei_utxo_hash is not None: - self.log.debug(f"Checking txoutsetinfo matches for node {i}") - assert_equal(nodei_utxo_hash, node3_utxo_hash) - - def verify_utxo_hash(self): - """Verify that the utxo hash of each node matches node3. - - Restart any nodes that crash while querying.""" - node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_3'] - self.log.info("Verifying utxo hash matches for all nodes") - - for i in range(3): - try: - nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_3'] - except OSError: - # probably a crash on db flushing - nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) - assert_equal(nodei_utxo_hash, node3_utxo_hash) - - def generate_small_transactions(self, node, count, utxo_list): - FEE = 1000 # TODO: replace this with node relay fee based calculation - num_transactions = 0 - random.shuffle(utxo_list) - while len(utxo_list) >= 2 and num_transactions < count: - utxos_to_spend = [utxo_list.pop() for _ in range(2)] - input_amount = int(sum([utxo['value'] for utxo in utxos_to_spend]) * COIN) - if input_amount < FEE: - # Sanity check -- if we chose inputs that are too small, skip - continue - - self.wallet.send_self_transfer_multi( - from_node=node, - utxos_to_spend=utxos_to_spend, - num_outputs=3, - fee_per_output=FEE // 3, - ) - num_transactions += 1 - - def run_test(self): - self.wallet = MiniWallet(self.nodes[3]) - initial_height = self.nodes[3].getblockcount() - self.generate(self.nodes[3], COINBASE_MATURITY, sync_fun=self.no_op) - - # Track test coverage statistics - self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 - self.crashed_on_restart = 0 # Track count of crashes during recovery - - # Start by creating a lot of utxos on node3 - utxo_list = [] - for _ in range(5): - utxo_list.extend(self.wallet.send_self_transfer_multi(from_node=self.nodes[3], num_outputs=1000)['new_utxos']) - self.generate(self.nodes[3], 1, sync_fun=self.no_op) - assert_equal(len(self.nodes[3].getrawmempool()), 0) - self.log.info(f"Prepped {len(utxo_list)} utxo entries") - - # Sync these blocks with the other nodes - block_hashes_to_sync = [] - for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): - block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) - - self.log.debug(f"Syncing {len(block_hashes_to_sync)} blocks with other nodes") - # Syncing the blocks could cause nodes to crash, so the test begins here. - self.sync_node3blocks(block_hashes_to_sync) - - starting_tip_height = self.nodes[3].getblockcount() - - # Main test loop: - # each time through the loop, generate a bunch of transactions, - # and then either mine a single new block on the tip, or some-sized reorg. - for i in range(40): - self.log.info(f"Iteration {i}, generating 2500 transactions {self.restart_counts}") - # Generate a bunch of small-ish transactions - self.generate_small_transactions(self.nodes[3], 2500, utxo_list) - # Pick a random block between current tip, and starting tip - current_height = self.nodes[3].getblockcount() - random_height = random.randint(starting_tip_height, current_height) - self.log.debug(f"At height {current_height}, considering height {random_height}") - if random_height > starting_tip_height: - # Randomly reorg from this point with some probability (1/4 for - # tip, 1/5 for tip-1, ...) - if random.random() < 1.0 / (current_height + 4 - random_height): - self.log.debug(f"Invalidating block at height {random_height}") - self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height)) - - # Now generate new blocks until we pass the old tip height - self.log.debug("Mining longer tip") - block_hashes = [] - while current_height + 1 > self.nodes[3].getblockcount(): - block_hashes.extend(self.generatetoaddress( - self.nodes[3], - nblocks=min(10, current_height + 1 - self.nodes[3].getblockcount()), - # new address to avoid mining a block that has just been invalidated - address=getnewdestination()[2], - sync_fun=self.no_op, - )) - self.log.debug(f"Syncing {len(block_hashes)} new blocks...") - self.sync_node3blocks(block_hashes) - self.wallet.rescan_utxos() - utxo_list = self.wallet.get_utxos() - self.log.debug(f"MiniWallet utxo count: {len(utxo_list)}") - - # Check that the utxo hashes agree with node3 - # Useful side effect: each utxo cache gets flushed here, so that we - # won't get crashes on shutdown at the end of the test. - self.verify_utxo_hash() - - # Check the test coverage - self.log.info(f"Restarted nodes: {self.restart_counts}; crashes on restart: {self.crashed_on_restart}") - - # If no nodes were restarted, we didn't test anything. - assert self.restart_counts != [0, 0, 0] - - # Make sure we tested the case of crash-during-recovery. - assert self.crashed_on_restart > 0 - - # Warn if any of the nodes escaped restart. - for i in range(3): - if self.restart_counts[i] == 0: - self.log.warning(f"Node {i} never crashed during utxo flush!") - - -if __name__ == "__main__": - ChainstateWriteCrashTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2017-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test recovery from a crash during chainstate writing.) +) +- 4 nodes) + * node0, node1, and node2 will have different dbcrash ratios, and different) + dbcache sizes) + * node3 will be a regular node, with no crashing.) + * The nodes will not connect to each other.) +) +- use default test framework starting chain. initialize starting_tip_height to) + tip height.) +) +- Main loop:) + * generate lots of transactions on node3, enough to fill up a block.) + * uniformly randomly pick a tip height from starting_tip_height to) + tip_height; with probability 1/(height_difference+4), invalidate this block.) + * mine enough blocks to overtake tip_height at start of loop.) + * for each node in [node0,node1,node2]:) + - for each mined block:) + * submit block to node) + * if node crashed on/after submitting:) + - restart until recovery succeeds) + - check that utxo matches node3 using gettxoutsetinfo""") +) +import errno) +import http.client) +import random) +import time) +) +from test_framework.blocktools import COINBASE_MATURITY) +from test_framework.messages import () + COIN,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) +)) +from test_framework.wallet import () + MiniWallet,) + getnewdestination,) +)) +) +) +class ChainstateWriteCrashTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 4) + self.rpc_timeout = 480) + self.supports_cli = False) +) + # Set -maxmempool=0 to turn off mempool memory sharing with dbcache) + self.base_args = [) + "-limitdescendantsize=0",) + "-maxmempool=0",) + "-dbbatchsize=200000",) + ]) +) + # Set different crash ratios and cache sizes. Note that not all of) + # -dbcache goes to the in-memory coins cache.) + self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args) + self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args) + self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args) +) + # Node3 is a normal node with default args, except will mine full blocks) + # and txs with "dust" outputs) + self.node3_args = ["-blockmaxweight=4000000", "-dustrelayfee=0"]) + self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]) +) + def setup_network(self):) + self.add_nodes(self.num_nodes, extra_args=self.extra_args)) + self.start_nodes()) + # Leave them unconnected, we'll use submitblock directly in this test) +) + def restart_node(self, node_index, expected_tip):) + """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.) +) + Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up) + after 60 seconds. Returns the utxo hash of the given node.""") +) + time_start = time.time()) + while time.time() - time_start < 120:) + try:) + # Any of these RPC calls could throw due to node crash) + self.start_node(node_index)) + self.nodes[node_index].waitforblock(expected_tip)) + utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_3']) + return utxo_hash) + except Exception:) + # An exception here should mean the node is about to crash.) + # If bitcoind exits, then try again. wait_for_node_exit()) + # should raise an exception if bitcoind doesn't exit.) + self.wait_for_node_exit(node_index, timeout=10)) + self.crashed_on_restart += 1) + time.sleep(1)) +) + # If we got here, bitcoind isn't coming back up on restart. Could be a) + # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio --) + # perhaps we generated a test case that blew up our cache?) + # TODO: If this happens a lot, we should try to restart without -dbcrashratio) + # and make sure that recovery happens.) + raise AssertionError(f"Unable to successfully restart node {node_index} in allotted time")) +) + def submit_block_catch_error(self, node_index, block):) + """Try submitting a block to the given node.) +) + Catch any exceptions that indicate the node has crashed.) + Returns true if the block was submitted successfully; false otherwise.""") +) + try:) + self.nodes[node_index].submitblock(block)) + return True) + except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e:) + self.log.debug(f"node {node_index} submitblock raised exception: {e}")) + return False) + except OSError as e:) + self.log.debug(f"node {node_index} submitblock raised OSError exception: errno={e.errno}")) + if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:) + # The node has likely crashed) + return False) + else:) + # Unexpected exception, raise) + raise) +) + def sync_node3blocks(self, block_hashes):) + """Use submitblock to sync node3's chain with the other nodes) +) + If submitblock fails, restart the node and get the new utxo hash.) + If any nodes crash while updating, we'll compare utxo hashes to) + ensure recovery was successful.""") +) + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_3']) +) + # Retrieve all the blocks from node3) + blocks = []) + for block_hash in block_hashes:) + blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])) +) + # Deliver each block to each other node) + for i in range(3):) + nodei_utxo_hash = None) + self.log.debug(f"Syncing blocks to node {i}")) + for (block_hash, block) in blocks:) + # Get the block from node3, and submit to node_i) + self.log.debug(f"submitting block {block_hash}")) + if not self.submit_block_catch_error(i, block):) + # TODO: more carefully check that the crash is due to -dbcrashratio) + # (change the exit code perhaps, and check that here?)) + self.wait_for_node_exit(i, timeout=30)) + self.log.debug(f"Restarting node {i} after block hash {block_hash}")) + nodei_utxo_hash = self.restart_node(i, block_hash)) + assert nodei_utxo_hash is not None) + self.restart_counts[i] += 1) + else:) + # Clear it out after successful submitblock calls -- the cached) + # utxo hash will no longer be correct) + nodei_utxo_hash = None) +) + # Check that the utxo hash matches node3's utxo set) + # NOTE: we only check the utxo set if we had to restart the node) + # after the last block submitted:) + # - checking the utxo hash causes a cache flush, which we don't) + # want to do every time; so) + # - we only update the utxo cache after a node restart, since flushing) + # the cache is a no-op at that point) + if nodei_utxo_hash is not None:) + self.log.debug(f"Checking txoutsetinfo matches for node {i}")) + assert_equal(nodei_utxo_hash, node3_utxo_hash)) +) + def verify_utxo_hash(self):) + """Verify that the utxo hash of each node matches node3.) +) + Restart any nodes that crash while querying.""") + node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_3']) + self.log.info("Verifying utxo hash matches for all nodes")) +) + for i in range(3):) + try:) + nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_3']) + except OSError:) + # probably a crash on db flushing) + nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())) + assert_equal(nodei_utxo_hash, node3_utxo_hash)) +) + def generate_small_transactions(self, node, count, utxo_list):) + FEE = 1000 # TODO: replace this with node relay fee based calculation) + num_transactions = 0) + random.shuffle(utxo_list)) + while len(utxo_list) >= 2 and num_transactions < count:) + utxos_to_spend = [utxo_list.pop() for _ in range(2)]) + input_amount = int(sum([utxo['value'] for utxo in utxos_to_spend]) * COIN)) + if input_amount < FEE:) + # Sanity check -- if we chose inputs that are too small, skip) + continue) +) + self.wallet.send_self_transfer_multi() + from_node=node,) + utxos_to_spend=utxos_to_spend,) + num_outputs=3,) + fee_per_output=FEE // 3,) + )) + num_transactions += 1) +) + def run_test(self):) + self.wallet = MiniWallet(self.nodes[3])) + initial_height = self.nodes[3].getblockcount()) + self.generate(self.nodes[3], COINBASE_MATURITY, sync_fun=self.no_op)) +) + # Track test coverage statistics) + self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2) + self.crashed_on_restart = 0 # Track count of crashes during recovery) +) + # Start by creating a lot of utxos on node3) + utxo_list = []) + for _ in range(5):) + utxo_list.extend(self.wallet.send_self_transfer_multi(from_node=self.nodes[3], num_outputs=1000)['new_utxos'])) + self.generate(self.nodes[3], 1, sync_fun=self.no_op)) + assert_equal(len(self.nodes[3].getrawmempool()), 0)) + self.log.info(f"Prepped {len(utxo_list)} utxo entries")) +) + # Sync these blocks with the other nodes) + block_hashes_to_sync = []) + for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):) + block_hashes_to_sync.append(self.nodes[3].getblockhash(height))) +) + self.log.debug(f"Syncing {len(block_hashes_to_sync)} blocks with other nodes")) + # Syncing the blocks could cause nodes to crash, so the test begins here.) + self.sync_node3blocks(block_hashes_to_sync)) +) + starting_tip_height = self.nodes[3].getblockcount()) +) + # Main test loop:) + # each time through the loop, generate a bunch of transactions,) + # and then either mine a single new block on the tip, or some-sized reorg.) + for i in range(40):) + self.log.info(f"Iteration {i}, generating 2500 transactions {self.restart_counts}")) + # Generate a bunch of small-ish transactions) + self.generate_small_transactions(self.nodes[3], 2500, utxo_list)) + # Pick a random block between current tip, and starting tip) + current_height = self.nodes[3].getblockcount()) + random_height = random.randint(starting_tip_height, current_height)) + self.log.debug(f"At height {current_height}, considering height {random_height}")) + if random_height > starting_tip_height:) + # Randomly reorg from this point with some probability (1/4 for) + # tip, 1/5 for tip-1, ...)) + if random.random() < 1.0 / (current_height + 4 - random_height):) + self.log.debug(f"Invalidating block at height {random_height}")) + self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))) +) + # Now generate new blocks until we pass the old tip height) + self.log.debug("Mining longer tip")) + block_hashes = []) + while current_height + 1 > self.nodes[3].getblockcount():) + block_hashes.extend(self.generatetoaddress() + self.nodes[3],) + nblocks=min(10, current_height + 1 - self.nodes[3].getblockcount()),) + # new address to avoid mining a block that has just been invalidated) + address=getnewdestination()[2],) + sync_fun=self.no_op,) + ))) + self.log.debug(f"Syncing {len(block_hashes)} new blocks...")) + self.sync_node3blocks(block_hashes)) + self.wallet.rescan_utxos()) + utxo_list = self.wallet.get_utxos()) + self.log.debug(f"MiniWallet utxo count: {len(utxo_list)}")) +) + # Check that the utxo hashes agree with node3) + # Useful side effect: each utxo cache gets flushed here, so that we) + # won't get crashes on shutdown at the end of the test.) + self.verify_utxo_hash()) +) + # Check the test coverage) + self.log.info(f"Restarted nodes: {self.restart_counts}; crashes on restart: {self.crashed_on_restart}")) +) + # If no nodes were restarted, we didn't test anything.) + assert_not_equal(self.restart_counts, [0, 0, 0])) +) + # Make sure we tested the case of crash-during-recovery.) + assert self.crashed_on_restart > 0) +) + # Warn if any of the nodes escaped restart.) + for i in range(3):) + if self.restart_counts[i] == 0:) + self.log.warning(f"Node {i} never crashed during utxo flush!")) +) +) +if __name__ == "__main__":) + ChainstateWriteCrashTest(__file__).main()) diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 21ba4924612aeb..840129f2b104a2 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -1,484 +1,484 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test fee estimation code.""" -from copy import deepcopy -from decimal import Decimal, ROUND_DOWN -import os -import random -import time - -from test_framework.messages import ( - COIN, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises_rpc_error, - satoshi_round, -) -from test_framework.wallet import MiniWallet - -MAX_FILE_AGE = 60 -SECONDS_PER_HOUR = 60 * 60 - -def small_txpuzzle_randfee( - wallet, from_node, conflist, unconflist, amount, min_fee, fee_increment, batch_reqs -): - """Create and send a transaction with a random fee using MiniWallet. - - The function takes a list of confirmed outputs and unconfirmed outputs - and attempts to use the confirmed list first for its inputs. - It adds the newly created outputs to the unconfirmed list. - Returns (raw transaction, fee).""" - - # It's best to exponentially distribute our random fees - # because the buckets are exponentially spaced. - # Exponentially distributed from 1-128 * fee_increment - rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28)) - # Total fee ranges from min_fee to min_fee + 127*fee_increment - fee = min_fee - fee_increment + satoshi_round(rand_fee, rounding=ROUND_DOWN) - utxos_to_spend = [] - total_in = Decimal("0.00000000") - while total_in <= (amount + fee) and len(conflist) > 0: - t = conflist.pop(0) - total_in += t["value"] - utxos_to_spend.append(t) - while total_in <= (amount + fee) and len(unconflist) > 0: - t = unconflist.pop(0) - total_in += t["value"] - utxos_to_spend.append(t) - if total_in <= amount + fee: - raise RuntimeError(f"Insufficient funds: need {amount + fee}, have {total_in}") - tx = wallet.create_self_transfer_multi( - utxos_to_spend=utxos_to_spend, - fee_per_output=0, - )["tx"] - tx.vout[0].nValue = int((total_in - amount - fee) * COIN) - tx.vout.append(deepcopy(tx.vout[0])) - tx.vout[1].nValue = int(amount * COIN) - tx.rehash() - txid = tx.hash - tx_hex = tx.serialize().hex() - - batch_reqs.append(from_node.sendrawtransaction.get_request(hexstring=tx_hex, maxfeerate=0)) - unconflist.append({"txid": txid, "vout": 0, "value": total_in - amount - fee}) - unconflist.append({"txid": txid, "vout": 1, "value": amount}) - - return (tx.get_vsize(), fee) - - -def check_raw_estimates(node, fees_seen): - """Call estimaterawfee and verify that the estimates meet certain invariants.""" - - delta = 1.0e-6 # account for rounding error - for i in range(1, 26): - for _, e in node.estimaterawfee(i).items(): - feerate = float(e["feerate"]) - assert_greater_than(feerate, 0) - - if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen): - raise AssertionError( - f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})" - ) - - -def check_smart_estimates(node, fees_seen): - """Call estimatesmartfee and verify that the estimates meet certain invariants.""" - - delta = 1.0e-6 # account for rounding error - last_feerate = float(max(fees_seen)) - all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)] - mempoolMinFee = node.getmempoolinfo()["mempoolminfee"] - minRelaytxFee = node.getmempoolinfo()["minrelaytxfee"] - for i, e in enumerate(all_smart_estimates): # estimate is for i+1 - feerate = float(e["feerate"]) - assert_greater_than(feerate, 0) - assert_greater_than_or_equal(feerate, float(mempoolMinFee)) - assert_greater_than_or_equal(feerate, float(minRelaytxFee)) - - if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen): - raise AssertionError( - f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})" - ) - if feerate - delta > last_feerate: - raise AssertionError( - f"Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms" - ) - last_feerate = feerate - - if i == 0: - assert_equal(e["blocks"], 2) - else: - assert_greater_than_or_equal(i + 1, e["blocks"]) - - -def check_estimates(node, fees_seen): - check_raw_estimates(node, fees_seen) - check_smart_estimates(node, fees_seen) - - -def make_tx(wallet, utxo, feerate): - """Create a 1in-1out transaction with a specific input and feerate (sat/vb).""" - return wallet.create_self_transfer( - utxo_to_spend=utxo, - fee_rate=Decimal(feerate * 1000) / COIN, - ) - -def check_fee_estimates_btw_modes(node, expected_conservative, expected_economical): - fee_est_conservative = node.estimatesmartfee(1, estimate_mode="conservative")['feerate'] - fee_est_economical = node.estimatesmartfee(1, estimate_mode="economical")['feerate'] - fee_est_default = node.estimatesmartfee(1)['feerate'] - assert_equal(fee_est_conservative, expected_conservative) - assert_equal(fee_est_economical, expected_economical) - assert_equal(fee_est_default, expected_economical) - - -class EstimateFeeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 3 - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - self.extra_args = [ - [], - ["-blockmaxweight=68000"], - ["-blockmaxweight=32000"], - ] - - def setup_network(self): - """ - We'll setup the network to have 3 nodes that all mine with different parameters. - But first we need to use one node to create a lot of outputs - which we will use to generate our transactions. - """ - self.add_nodes(3, extra_args=self.extra_args) - # Use node0 to mine blocks for input splitting - # Node1 mines small blocks but that are bigger than the expected transaction rate. - # NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight, - # (68k weight is room enough for 120 or so transactions) - # Node2 is a stingy miner, that - # produces too small blocks (room for only 55 or so transactions) - - def transact_and_mine(self, numblocks, mining_node): - min_fee = Decimal("0.00001") - # We will now mine numblocks blocks generating on average 100 transactions between each block - # We shuffle our confirmed txout set before each set of transactions - # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible - # resorting to tx's that depend on the mempool when those run out - for _ in range(numblocks): - random.shuffle(self.confutxo) - batch_sendtx_reqs = [] - for _ in range(random.randrange(100 - 50, 100 + 50)): - from_index = random.randint(1, 2) - (tx_bytes, fee) = small_txpuzzle_randfee( - self.wallet, - self.nodes[from_index], - self.confutxo, - self.memutxo, - Decimal("0.005"), - min_fee, - min_fee, - batch_sendtx_reqs, - ) - tx_kbytes = tx_bytes / 1000.0 - self.fees_per_kb.append(float(fee) / tx_kbytes) - for node in self.nodes: - node.batch(batch_sendtx_reqs) - self.sync_mempools(wait=0.1) - mined = mining_node.getblock(self.generate(mining_node, 1)[0], True)["tx"] - # update which txouts are confirmed - newmem = [] - for utx in self.memutxo: - if utx["txid"] in mined: - self.confutxo.append(utx) - else: - newmem.append(utx) - self.memutxo = newmem - - def initial_split(self, node): - """Split two coinbase UTxOs into many small coins""" - self.confutxo = self.wallet.send_self_transfer_multi( - from_node=node, - utxos_to_spend=[self.wallet.get_utxo() for _ in range(2)], - num_outputs=2048)['new_utxos'] - while len(node.getrawmempool()) > 0: - self.generate(node, 1, sync_fun=self.no_op) - - def sanity_check_estimates_range(self): - """Populate estimation buckets, assert estimates are in a sane range and - are strictly increasing as the target decreases.""" - self.fees_per_kb = [] - self.memutxo = [] - self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") - - for _ in range(2): - self.log.info( - "Creating transactions and mining them with a block size that can't keep up" - ) - # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine - self.transact_and_mine(10, self.nodes[2]) - check_estimates(self.nodes[1], self.fees_per_kb) - - self.log.info( - "Creating transactions and mining them at a block size that is just big enough" - ) - # Generate transactions while mining 10 more blocks, this time with node1 - # which mines blocks with capacity just above the rate that transactions are being created - self.transact_and_mine(10, self.nodes[1]) - check_estimates(self.nodes[1], self.fees_per_kb) - - # Finish by mining a normal-sized block: - while len(self.nodes[1].getrawmempool()) > 0: - self.generate(self.nodes[1], 1) - - self.log.info("Final estimates after emptying mempools") - check_estimates(self.nodes[1], self.fees_per_kb) - - def test_feerate_mempoolminfee(self): - high_val = 3 * self.nodes[1].estimatesmartfee(1)["feerate"] - self.restart_node(1, extra_args=[f"-minrelaytxfee={high_val}"]) - check_estimates(self.nodes[1], self.fees_per_kb) - self.restart_node(1) - - def sanity_check_rbf_estimates(self, utxos): - """During 5 blocks, broadcast low fee transactions. Only 10% of them get - confirmed and the remaining ones get RBF'd with a high fee transaction at - the next block. - The block policy estimator should return the high feerate. - """ - # The broadcaster and block producer - node = self.nodes[0] - miner = self.nodes[1] - # In sat/vb - low_feerate = 1 - high_feerate = 10 - # Cache the utxos of which to replace the spender after it failed to get - # confirmed - utxos_to_respend = [] - txids_to_replace = [] - - assert_greater_than_or_equal(len(utxos), 250) - for _ in range(5): - # Broadcast 45 low fee transactions that will need to be RBF'd - txs = [] - for _ in range(45): - u = utxos.pop(0) - tx = make_tx(self.wallet, u, low_feerate) - utxos_to_respend.append(u) - txids_to_replace.append(tx["txid"]) - txs.append(tx) - # Broadcast 5 low fee transaction which don't need to - for _ in range(5): - tx = make_tx(self.wallet, utxos.pop(0), low_feerate) - txs.append(tx) - batch_send_tx = [node.sendrawtransaction.get_request(tx["hex"]) for tx in txs] - for n in self.nodes: - n.batch(batch_send_tx) - # Mine the transactions on another node - self.sync_mempools(wait=0.1, nodes=[node, miner]) - for txid in txids_to_replace: - miner.prioritisetransaction(txid=txid, fee_delta=-COIN) - self.generate(miner, 1) - # RBF the low-fee transactions - while len(utxos_to_respend) > 0: - u = utxos_to_respend.pop(0) - tx = make_tx(self.wallet, u, high_feerate) - node.sendrawtransaction(tx["hex"]) - txs.append(tx) - dec_txs = [res["result"] for res in node.batch([node.decoderawtransaction.get_request(tx["hex"]) for tx in txs])] - self.wallet.scan_txs(dec_txs) - - - # Mine the last replacement txs - self.sync_mempools(wait=0.1, nodes=[node, miner]) - self.generate(miner, 1) - - # Only 10% of the transactions were really confirmed with a low feerate, - # the rest needed to be RBF'd. We must return the 90% conf rate feerate. - high_feerate_kvb = Decimal(high_feerate) / COIN * 10 ** 3 - est_feerate = node.estimatesmartfee(2)["feerate"] - assert_equal(est_feerate, high_feerate_kvb) - - def test_old_fee_estimate_file(self): - # Get the initial fee rate while node is running - fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"] - - # Restart node to ensure fee_estimate.dat file is read - self.restart_node(0) - assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate) - - fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" - - # Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE - self.stop_node(0) - last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR - os.utime(fee_dat, (last_modified_time, last_modified_time)) - - # Start node and ensure the fee_estimates.dat file was not read - self.start_node(0) - assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"]) - - - def test_estimate_dat_is_flushed_periodically(self): - fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" - os.remove(fee_dat) if os.path.exists(fee_dat) else None - - # Verify that fee_estimates.dat does not exist - assert_equal(os.path.isfile(fee_dat), False) - - # Verify if the string "Flushed fee estimates to fee_estimates.dat." is present in the debug log file. - # If present, it indicates that fee estimates have been successfully flushed to disk. - with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1): - # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat - self.nodes[0].mockscheduler(SECONDS_PER_HOUR) - - # Verify that fee estimates were flushed and fee_estimates.dat file is created - assert_equal(os.path.isfile(fee_dat), True) - - # Verify that the estimates remain the same if there are no blocks in the flush interval - block_hash_before = self.nodes[0].getbestblockhash() - fee_dat_initial_content = open(fee_dat, "rb").read() - with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1): - # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat - self.nodes[0].mockscheduler(SECONDS_PER_HOUR) - - # Verify that there were no blocks in between the flush interval - assert_equal(block_hash_before, self.nodes[0].getbestblockhash()) - - fee_dat_current_content = open(fee_dat, "rb").read() - assert_equal(fee_dat_current_content, fee_dat_initial_content) - - # Verify that the estimates remain the same after shutdown with no blocks before shutdown - self.restart_node(0) - fee_dat_current_content = open(fee_dat, "rb").read() - assert_equal(fee_dat_current_content, fee_dat_initial_content) - - # Verify that the estimates are not the same if new blocks were produced in the flush interval - with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1): - # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat - self.generate(self.nodes[0], 5, sync_fun=self.no_op) - self.nodes[0].mockscheduler(SECONDS_PER_HOUR) - - fee_dat_current_content = open(fee_dat, "rb").read() - assert fee_dat_current_content != fee_dat_initial_content - - fee_dat_initial_content = fee_dat_current_content - - # Generate blocks before shutdown and verify that the fee estimates are not the same - self.generate(self.nodes[0], 5, sync_fun=self.no_op) - self.restart_node(0) - fee_dat_current_content = open(fee_dat, "rb").read() - assert fee_dat_current_content != fee_dat_initial_content - - - def test_acceptstalefeeestimates_option(self): - # Get the initial fee rate while node is running - fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"] - - self.stop_node(0) - - fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" - - # Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE - last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR - os.utime(fee_dat, (last_modified_time, last_modified_time)) - - # Restart node with -acceptstalefeeestimates option to ensure fee_estimate.dat file is read - self.start_node(0,extra_args=["-acceptstalefeeestimates"]) - assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate) - - def clear_estimates(self): - self.log.info("Restarting node with fresh estimation") - self.stop_node(0) - fee_dat = self.nodes[0].chain_path / "fee_estimates.dat" - os.remove(fee_dat) - self.start_node(0) - self.connect_nodes(0, 1) - self.connect_nodes(0, 2) - self.sync_blocks() - assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"]) - - def broadcast_and_mine(self, broadcaster, miner, feerate, count): - """Broadcast and mine some number of transactions with a specified fee rate.""" - for _ in range(count): - self.wallet.send_self_transfer(from_node=broadcaster, fee_rate=feerate) - self.sync_mempools() - self.generate(miner, 1) - - def test_estimation_modes(self): - low_feerate = Decimal("0.001") - high_feerate = Decimal("0.005") - tx_count = 24 - # Broadcast and mine high fee transactions for the first 12 blocks. - for _ in range(12): - self.broadcast_and_mine(self.nodes[1], self.nodes[2], high_feerate, tx_count) - check_fee_estimates_btw_modes(self.nodes[0], high_feerate, high_feerate) - - # We now track 12 blocks; short horizon stats will start decaying. - # Broadcast and mine low fee transactions for the next 4 blocks. - for _ in range(4): - self.broadcast_and_mine(self.nodes[1], self.nodes[2], low_feerate, tx_count) - # conservative mode will consider longer time horizons while economical mode does not - # Check the fee estimates for both modes after mining low fee transactions. - check_fee_estimates_btw_modes(self.nodes[0], high_feerate, low_feerate) - - - def run_test(self): - self.log.info("This test is time consuming, please be patient") - self.log.info("Splitting inputs so we can generate tx's") - - # Split two coinbases into many small utxos - self.start_node(0) - self.wallet = MiniWallet(self.nodes[0]) - self.initial_split(self.nodes[0]) - self.log.info("Finished splitting") - - # Now we can connect the other nodes, didn't want to connect them earlier - # so the estimates would not be affected by the splitting transactions - self.start_node(1) - self.start_node(2) - self.connect_nodes(1, 0) - self.connect_nodes(0, 2) - self.connect_nodes(2, 1) - self.sync_all() - - self.log.info("Testing estimates with single transactions.") - self.sanity_check_estimates_range() - - self.log.info("Test fee_estimates.dat is flushed periodically") - self.test_estimate_dat_is_flushed_periodically() - - # check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee - self.log.info( - "Test fee rate estimation after restarting node with high MempoolMinFee" - ) - self.test_feerate_mempoolminfee() - - self.log.info("Test acceptstalefeeestimates option") - self.test_acceptstalefeeestimates_option() - - self.log.info("Test reading old fee_estimates.dat") - self.test_old_fee_estimate_file() - - self.clear_estimates() - - self.log.info("Testing estimates with RBF.") - self.sanity_check_rbf_estimates(self.confutxo + self.memutxo) - - self.clear_estimates() - self.log.info("Test estimatesmartfee modes") - self.test_estimation_modes() - - self.log.info("Testing that fee estimation is disabled in blocksonly.") - self.restart_node(0, ["-blocksonly"]) - assert_raises_rpc_error( - -32603, "Fee estimation disabled", self.nodes[0].estimatesmartfee, 2 - ) - - -if __name__ == "__main__": - EstimateFeeTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2014-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test fee estimation code.""") +from copy import deepcopy) +from decimal import Decimal, ROUND_DOWN) +import os) +import random) +import time) +) +from test_framework.messages import () + COIN,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_greater_than,) + assert_greater_than_or_equal,) + assert_raises_rpc_error,) + satoshi_round,) +)) +from test_framework.wallet import MiniWallet) +) +MAX_FILE_AGE = 60) +SECONDS_PER_HOUR = 60 * 60) +) +def small_txpuzzle_randfee() + wallet, from_node, conflist, unconflist, amount, min_fee, fee_increment, batch_reqs) +):) + """Create and send a transaction with a random fee using MiniWallet.) +) + The function takes a list of confirmed outputs and unconfirmed outputs) + and attempts to use the confirmed list first for its inputs.) + It adds the newly created outputs to the unconfirmed list.) + Returns (raw transaction, fee).""") +) + # It's best to exponentially distribute our random fees) + # because the buckets are exponentially spaced.) + # Exponentially distributed from 1-128 * fee_increment) + rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))) + # Total fee ranges from min_fee to min_fee + 127*fee_increment) + fee = min_fee - fee_increment + satoshi_round(rand_fee, rounding=ROUND_DOWN)) + utxos_to_spend = []) + total_in = Decimal("0.00000000")) + while total_in <= (amount + fee) and len(conflist) > 0:) + t = conflist.pop(0)) + total_in += t["value"]) + utxos_to_spend.append(t)) + while total_in <= (amount + fee) and len(unconflist) > 0:) + t = unconflist.pop(0)) + total_in += t["value"]) + utxos_to_spend.append(t)) + if total_in <= amount + fee:) + raise RuntimeError(f"Insufficient funds: need {amount + fee}, have {total_in}")) + tx = wallet.create_self_transfer_multi() + utxos_to_spend=utxos_to_spend,) + fee_per_output=0,) + )["tx"]) + tx.vout[0].nValue = int((total_in - amount - fee) * COIN)) + tx.vout.append(deepcopy(tx.vout[0]))) + tx.vout[1].nValue = int(amount * COIN)) + tx.rehash()) + txid = tx.hash) + tx_hex = tx.serialize().hex()) +) + batch_reqs.append(from_node.sendrawtransaction.get_request(hexstring=tx_hex, maxfeerate=0))) + unconflist.append({"txid": txid, "vout": 0, "value": total_in - amount - fee})) + unconflist.append({"txid": txid, "vout": 1, "value": amount})) +) + return (tx.get_vsize(), fee)) +) +) +def check_raw_estimates(node, fees_seen):) + """Call estimaterawfee and verify that the estimates meet certain invariants.""") +) + delta = 1.0e-6 # account for rounding error) + for i in range(1, 26):) + for _, e in node.estimaterawfee(i).items():) + feerate = float(e["feerate"])) + assert_greater_than(feerate, 0)) +) + if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):) + raise AssertionError() + f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})") + )) +) +) +def check_smart_estimates(node, fees_seen):) + """Call estimatesmartfee and verify that the estimates meet certain invariants.""") +) + delta = 1.0e-6 # account for rounding error) + last_feerate = float(max(fees_seen))) + all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]) + mempoolMinFee = node.getmempoolinfo()["mempoolminfee"]) + minRelaytxFee = node.getmempoolinfo()["minrelaytxfee"]) + for i, e in enumerate(all_smart_estimates): # estimate is for i+1) + feerate = float(e["feerate"])) + assert_greater_than(feerate, 0)) + assert_greater_than_or_equal(feerate, float(mempoolMinFee))) + assert_greater_than_or_equal(feerate, float(minRelaytxFee))) +) + if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):) + raise AssertionError() + f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})") + )) + if feerate - delta > last_feerate:) + raise AssertionError() + f"Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms") + )) + last_feerate = feerate) +) + if i == 0:) + assert_equal(e["blocks"], 2)) + else:) + assert_greater_than_or_equal(i + 1, e["blocks"])) +) +) +def check_estimates(node, fees_seen):) + check_raw_estimates(node, fees_seen)) + check_smart_estimates(node, fees_seen)) +) +) +def make_tx(wallet, utxo, feerate):) + """Create a 1in-1out transaction with a specific input and feerate (sat/vb).""") + return wallet.create_self_transfer() + utxo_to_spend=utxo,) + fee_rate=Decimal(feerate * 1000) / COIN,) + )) +) +def check_fee_estimates_btw_modes(node, expected_conservative, expected_economical):) + fee_est_conservative = node.estimatesmartfee(1, estimate_mode="conservative")['feerate']) + fee_est_economical = node.estimatesmartfee(1, estimate_mode="economical")['feerate']) + fee_est_default = node.estimatesmartfee(1)['feerate']) + assert_equal(fee_est_conservative, expected_conservative)) + assert_equal(fee_est_economical, expected_economical)) + assert_equal(fee_est_default, expected_economical)) +) +) +class EstimateFeeTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 3) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) + self.extra_args = [) + [],) + ["-blockmaxweight=68000"],) + ["-blockmaxweight=32000"],) + ]) +) + def setup_network(self):) + """) + We'll setup the network to have 3 nodes that all mine with different parameters.) + But first we need to use one node to create a lot of outputs) + which we will use to generate our transactions.) + """) + self.add_nodes(3, extra_args=self.extra_args)) + # Use node0 to mine blocks for input splitting) + # Node1 mines small blocks but that are bigger than the expected transaction rate.) + # NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,) + # (68k weight is room enough for 120 or so transactions)) + # Node2 is a stingy miner, that) + # produces too small blocks (room for only 55 or so transactions)) +) + def transact_and_mine(self, numblocks, mining_node):) + min_fee = Decimal("0.00001")) + # We will now mine numblocks blocks generating on average 100 transactions between each block) + # We shuffle our confirmed txout set before each set of transactions) + # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible) + # resorting to tx's that depend on the mempool when those run out) + for _ in range(numblocks):) + random.shuffle(self.confutxo)) + batch_sendtx_reqs = []) + for _ in range(random.randrange(100 - 50, 100 + 50)):) + from_index = random.randint(1, 2)) + (tx_bytes, fee) = small_txpuzzle_randfee() + self.wallet,) + self.nodes[from_index],) + self.confutxo,) + self.memutxo,) + Decimal("0.005"),) + min_fee,) + min_fee,) + batch_sendtx_reqs,) + )) + tx_kbytes = tx_bytes / 1000.0) + self.fees_per_kb.append(float(fee) / tx_kbytes)) + for node in self.nodes:) + node.batch(batch_sendtx_reqs)) + self.sync_mempools(wait=0.1)) + mined = mining_node.getblock(self.generate(mining_node, 1)[0], True)["tx"]) + # update which txouts are confirmed) + newmem = []) + for utx in self.memutxo:) + if utx["txid"] in mined:) + self.confutxo.append(utx)) + else:) + newmem.append(utx)) + self.memutxo = newmem) +) + def initial_split(self, node):) + """Split two coinbase UTxOs into many small coins""") + self.confutxo = self.wallet.send_self_transfer_multi() + from_node=node,) + utxos_to_spend=[self.wallet.get_utxo() for _ in range(2)],) + num_outputs=2048)['new_utxos']) + while len(node.getrawmempool()) > 0:) + self.generate(node, 1, sync_fun=self.no_op)) +) + def sanity_check_estimates_range(self):) + """Populate estimation buckets, assert estimates are in a sane range and) + are strictly increasing as the target decreases.""") + self.fees_per_kb = []) + self.memutxo = []) + self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")) +) + for _ in range(2):) + self.log.info() + "Creating transactions and mining them with a block size that can't keep up") + )) + # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine) + self.transact_and_mine(10, self.nodes[2])) + check_estimates(self.nodes[1], self.fees_per_kb)) +) + self.log.info() + "Creating transactions and mining them at a block size that is just big enough") + )) + # Generate transactions while mining 10 more blocks, this time with node1) + # which mines blocks with capacity just above the rate that transactions are being created) + self.transact_and_mine(10, self.nodes[1])) + check_estimates(self.nodes[1], self.fees_per_kb)) +) + # Finish by mining a normal-sized block:) + while len(self.nodes[1].getrawmempool()) > 0:) + self.generate(self.nodes[1], 1)) +) + self.log.info("Final estimates after emptying mempools")) + check_estimates(self.nodes[1], self.fees_per_kb)) +) + def test_feerate_mempoolminfee(self):) + high_val = 3 * self.nodes[1].estimatesmartfee(1)["feerate"]) + self.restart_node(1, extra_args=[f"-minrelaytxfee={high_val}"])) + check_estimates(self.nodes[1], self.fees_per_kb)) + self.restart_node(1)) +) + def sanity_check_rbf_estimates(self, utxos):) + """During 5 blocks, broadcast low fee transactions. Only 10% of them get) + confirmed and the remaining ones get RBF'd with a high fee transaction at) + the next block.) + The block policy estimator should return the high feerate.) + """) + # The broadcaster and block producer) + node = self.nodes[0]) + miner = self.nodes[1]) + # In sat/vb) + low_feerate = 1) + high_feerate = 10) + # Cache the utxos of which to replace the spender after it failed to get) + # confirmed) + utxos_to_respend = []) + txids_to_replace = []) +) + assert_greater_than_or_equal(len(utxos), 250)) + for _ in range(5):) + # Broadcast 45 low fee transactions that will need to be RBF'd) + txs = []) + for _ in range(45):) + u = utxos.pop(0)) + tx = make_tx(self.wallet, u, low_feerate)) + utxos_to_respend.append(u)) + txids_to_replace.append(tx["txid"])) + txs.append(tx)) + # Broadcast 5 low fee transaction which don't need to) + for _ in range(5):) + tx = make_tx(self.wallet, utxos.pop(0), low_feerate)) + txs.append(tx)) + batch_send_tx = [node.sendrawtransaction.get_request(tx["hex"]) for tx in txs]) + for n in self.nodes:) + n.batch(batch_send_tx)) + # Mine the transactions on another node) + self.sync_mempools(wait=0.1, nodes=[node, miner])) + for txid in txids_to_replace:) + miner.prioritisetransaction(txid=txid, fee_delta=-COIN)) + self.generate(miner, 1)) + # RBF the low-fee transactions) + while len(utxos_to_respend) > 0:) + u = utxos_to_respend.pop(0)) + tx = make_tx(self.wallet, u, high_feerate)) + node.sendrawtransaction(tx["hex"])) + txs.append(tx)) + dec_txs = [res["result"] for res in node.batch([node.decoderawtransaction.get_request(tx["hex"]) for tx in txs])]) + self.wallet.scan_txs(dec_txs)) +) +) + # Mine the last replacement txs) + self.sync_mempools(wait=0.1, nodes=[node, miner])) + self.generate(miner, 1)) +) + # Only 10% of the transactions were really confirmed with a low feerate,) + # the rest needed to be RBF'd. We must return the 90% conf rate feerate.) + high_feerate_kvb = Decimal(high_feerate) / COIN * 10 ** 3) + est_feerate = node.estimatesmartfee(2)["feerate"]) + assert_equal(est_feerate, high_feerate_kvb)) +) + def test_old_fee_estimate_file(self):) + # Get the initial fee rate while node is running) + fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"]) +) + # Restart node to ensure fee_estimate.dat file is read) + self.restart_node(0)) + assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate)) +) + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat") +) + # Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE) + self.stop_node(0)) + last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR) + os.utime(fee_dat, (last_modified_time, last_modified_time))) +) + # Start node and ensure the fee_estimates.dat file was not read) + self.start_node(0)) + assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"])) +) +) + def test_estimate_dat_is_flushed_periodically(self):) + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat") + os.remove(fee_dat) if os.path.exists(fee_dat) else None) +) + # Verify that fee_estimates.dat does not exist) + assert_equal(os.path.isfile(fee_dat), False)) +) + # Verify if the string "Flushed fee estimates to fee_estimates.dat." is present in the debug log file.) + # If present, it indicates that fee estimates have been successfully flushed to disk.) + with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1):) + # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat) + self.nodes[0].mockscheduler(SECONDS_PER_HOUR)) +) + # Verify that fee estimates were flushed and fee_estimates.dat file is created) + assert_equal(os.path.isfile(fee_dat), True)) +) + # Verify that the estimates remain the same if there are no blocks in the flush interval) + block_hash_before = self.nodes[0].getbestblockhash()) + fee_dat_initial_content = open(fee_dat, "rb").read()) + with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1):) + # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat) + self.nodes[0].mockscheduler(SECONDS_PER_HOUR)) +) + # Verify that there were no blocks in between the flush interval) + assert_equal(block_hash_before, self.nodes[0].getbestblockhash())) +) + fee_dat_current_content = open(fee_dat, "rb").read()) + assert_equal(fee_dat_current_content, fee_dat_initial_content)) +) + # Verify that the estimates remain the same after shutdown with no blocks before shutdown) + self.restart_node(0)) + fee_dat_current_content = open(fee_dat, "rb").read()) + assert_equal(fee_dat_current_content, fee_dat_initial_content)) +) + # Verify that the estimates are not the same if new blocks were produced in the flush interval) + with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1):) + # Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat) + self.generate(self.nodes[0], 5, sync_fun=self.no_op)) + self.nodes[0].mockscheduler(SECONDS_PER_HOUR)) +) + fee_dat_current_content = open(fee_dat, "rb").read()) + assert_not_equal(fee_dat_current_content, fee_dat_initial_content)) +) + fee_dat_initial_content = fee_dat_current_content) +) + # Generate blocks before shutdown and verify that the fee estimates are not the same) + self.generate(self.nodes[0], 5, sync_fun=self.no_op)) + self.restart_node(0)) + fee_dat_current_content = open(fee_dat, "rb").read()) + assert_not_equal(fee_dat_current_content, fee_dat_initial_content)) +) +) + def test_acceptstalefeeestimates_option(self):) + # Get the initial fee rate while node is running) + fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"]) +) + self.stop_node(0)) +) + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat") +) + # Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE) + last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR) + os.utime(fee_dat, (last_modified_time, last_modified_time))) +) + # Restart node with -acceptstalefeeestimates option to ensure fee_estimate.dat file is read) + self.start_node(0,extra_args=["-acceptstalefeeestimates"])) + assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate)) +) + def clear_estimates(self):) + self.log.info("Restarting node with fresh estimation")) + self.stop_node(0)) + fee_dat = self.nodes[0].chain_path / "fee_estimates.dat") + os.remove(fee_dat)) + self.start_node(0)) + self.connect_nodes(0, 1)) + self.connect_nodes(0, 2)) + self.sync_blocks()) + assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"])) +) + def broadcast_and_mine(self, broadcaster, miner, feerate, count):) + """Broadcast and mine some number of transactions with a specified fee rate.""") + for _ in range(count):) + self.wallet.send_self_transfer(from_node=broadcaster, fee_rate=feerate)) + self.sync_mempools()) + self.generate(miner, 1)) +) + def test_estimation_modes(self):) + low_feerate = Decimal("0.001")) + high_feerate = Decimal("0.005")) + tx_count = 24) + # Broadcast and mine high fee transactions for the first 12 blocks.) + for _ in range(12):) + self.broadcast_and_mine(self.nodes[1], self.nodes[2], high_feerate, tx_count)) + check_fee_estimates_btw_modes(self.nodes[0], high_feerate, high_feerate)) +) + # We now track 12 blocks; short horizon stats will start decaying.) + # Broadcast and mine low fee transactions for the next 4 blocks.) + for _ in range(4):) + self.broadcast_and_mine(self.nodes[1], self.nodes[2], low_feerate, tx_count)) + # conservative mode will consider longer time horizons while economical mode does not) + # Check the fee estimates for both modes after mining low fee transactions.) + check_fee_estimates_btw_modes(self.nodes[0], high_feerate, low_feerate)) +) +) + def run_test(self):) + self.log.info("This test is time consuming, please be patient")) + self.log.info("Splitting inputs so we can generate tx's")) +) + # Split two coinbases into many small utxos) + self.start_node(0)) + self.wallet = MiniWallet(self.nodes[0])) + self.initial_split(self.nodes[0])) + self.log.info("Finished splitting")) +) + # Now we can connect the other nodes, didn't want to connect them earlier) + # so the estimates would not be affected by the splitting transactions) + self.start_node(1)) + self.start_node(2)) + self.connect_nodes(1, 0)) + self.connect_nodes(0, 2)) + self.connect_nodes(2, 1)) + self.sync_all()) +) + self.log.info("Testing estimates with single transactions.")) + self.sanity_check_estimates_range()) +) + self.log.info("Test fee_estimates.dat is flushed periodically")) + self.test_estimate_dat_is_flushed_periodically()) +) + # check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee) + self.log.info() + "Test fee rate estimation after restarting node with high MempoolMinFee") + )) + self.test_feerate_mempoolminfee()) +) + self.log.info("Test acceptstalefeeestimates option")) + self.test_acceptstalefeeestimates_option()) +) + self.log.info("Test reading old fee_estimates.dat")) + self.test_old_fee_estimate_file()) +) + self.clear_estimates()) +) + self.log.info("Testing estimates with RBF.")) + self.sanity_check_rbf_estimates(self.confutxo + self.memutxo)) +) + self.clear_estimates()) + self.log.info("Test estimatesmartfee modes")) + self.test_estimation_modes()) +) + self.log.info("Testing that fee estimation is disabled in blocksonly.")) + self.restart_node(0, ["-blocksonly"])) + assert_raises_rpc_error() + -32603, "Fee estimation disabled", self.nodes[0].estimatesmartfee, 2) + )) +) +) +if __name__ == "__main__":) + EstimateFeeTest(__file__).main()) diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py index 62bb488f7cc068..33d2f009a05da6 100755 --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -1,121 +1,121 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for setting nMinimumChainWork on command line. - -Nodes don't consider themselves out of "initial block download" until -their active chain has more work than nMinimumChainWork. - -Nodes don't download blocks from a peer unless the peer's best known block -has more work than nMinimumChainWork. - -While in initial block download, nodes won't relay blocks to their peers, so -test that this parameter functions as intended by verifying that block relay -only succeeds past a given node once its nMinimumChainWork has been exceeded. -""" - -import time - -from test_framework.p2p import P2PInterface, msg_getheaders -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_not_equal, -) - -# 2 hashes required per regtest block (with no difficulty adjustment) -REGTEST_WORK_PER_BLOCK = 2 - -class MinimumChainWorkTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - - self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] - self.node_min_work = [0, 101, 101] - - def setup_network(self): - # This test relies on the chain setup being: - # node0 <- node1 <- node2 - # Before leaving IBD, nodes prefer to download blocks from outbound - # peers, so ensure that we're mining on an outbound peer and testing - # block relay to inbound peers. - self.setup_nodes() - for i in range(self.num_nodes-1): - self.connect_nodes(i+1, i) - - # Set clock of node2 2 days ahead, to keep it in IBD during this test. - self.nodes[2].setmocktime(int(time.time()) + 48*60*60) - - def run_test(self): - # Start building a chain on node0. node2 shouldn't be able to sync until node1's - # minchainwork is exceeded - starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work - self.log.info(f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})") - - starting_blockcount = self.nodes[2].getblockcount() - - num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) - self.log.info(f"Generating {num_blocks_to_generate} blocks on node0") - hashes = self.generate(self.nodes[0], num_blocks_to_generate, sync_fun=self.no_op) - - self.log.info(f"Node0 current chain work: {self.nodes[0].getblockheader(hashes[-1])['chainwork']}") - - # Sleep a few seconds and verify that node2 didn't get any new blocks - # or headers. We sleep, rather than sync_blocks(node0, node1) because - # it's reasonable either way for node1 to get the blocks, or not get - # them (since they're below node1's minchainwork). - time.sleep(3) - - self.log.info("Verifying node 2 has no more blocks than before") - self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}") - # Node2 shouldn't have any new headers yet, because node1 should not - # have relayed anything. - assert_equal(len(self.nodes[2].getchaintips()), 1) - assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) - - assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash() - assert_equal(self.nodes[2].getblockcount(), starting_blockcount) - - self.log.info("Check that getheaders requests to node2 are ignored") - peer = self.nodes[2].add_p2p_connection(P2PInterface()) - msg = msg_getheaders() - msg.locator.vHave = [int(self.nodes[2].getbestblockhash(), 16)] - msg.hashstop = 0 - peer.send_and_ping(msg) - time.sleep(5) - assert "headers" not in peer.last_message or len(peer.last_message["headers"].headers) == 0 - - self.log.info("Generating one more block") - self.generate(self.nodes[0], 1) - - self.log.info("Verifying nodes are all synced") - - # Because nodes in regtest are all manual connections (eg using - # addnode), node1 should not have disconnected node0. If not for that, - # we'd expect node1 to have disconnected node0 for serving an - # insufficient work chain, in which case we'd need to reconnect them to - # continue the test. - - self.sync_all() - self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}") - - self.log.info("Test that getheaders requests to node2 are not ignored") - peer.send_and_ping(msg) - assert "headers" in peer.last_message - - # Verify that node2 is in fact still in IBD (otherwise this test may - # not be exercising the logic we want!) - assert_equal(self.nodes[2].getblockchaininfo()['initialblockdownload'], True) - - self.log.info("Test -minimumchainwork with a non-hex value") - self.stop_node(0) - self.nodes[0].assert_start_raises_init_error( - ["-minimumchainwork=test"], - expected_msg='Error: Invalid minimum work specified (test), must be up to 64 hex digits', - ) - - -if __name__ == '__main__': - MinimumChainWorkTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2017-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test logic for setting nMinimumChainWork on command line.) +) +Nodes don't consider themselves out of "initial block download" until) +their active chain has more work than nMinimumChainWork.) +) +Nodes don't download blocks from a peer unless the peer's best known block) +has more work than nMinimumChainWork.) +) +While in initial block download, nodes won't relay blocks to their peers, so) +test that this parameter functions as intended by verifying that block relay) +only succeeds past a given node once its nMinimumChainWork has been exceeded.) +""") +) +import time) +) +from test_framework.p2p import P2PInterface, msg_getheaders) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_equal,) + assert_not_equal,) +)) +) +# 2 hashes required per regtest block (with no difficulty adjustment)) +REGTEST_WORK_PER_BLOCK = 2) +) +class MinimumChainWorkTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 3) +) + self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]) + self.node_min_work = [0, 101, 101]) +) + def setup_network(self):) + # This test relies on the chain setup being:) + # node0 <- node1 <- node2) + # Before leaving IBD, nodes prefer to download blocks from outbound) + # peers, so ensure that we're mining on an outbound peer and testing) + # block relay to inbound peers.) + self.setup_nodes()) + for i in range(self.num_nodes-1):) + self.connect_nodes(i+1, i)) +) + # Set clock of node2 2 days ahead, to keep it in IBD during this test.) + self.nodes[2].setmocktime(int(time.time()) + 48*60*60)) +) + def run_test(self):) + # Start building a chain on node0. node2 shouldn't be able to sync until node1's) + # minchainwork is exceeded) + starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work) + self.log.info(f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})")) +) + starting_blockcount = self.nodes[2].getblockcount()) +) + num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)) + self.log.info(f"Generating {num_blocks_to_generate} blocks on node0")) + hashes = self.generate(self.nodes[0], num_blocks_to_generate, sync_fun=self.no_op)) +) + self.log.info(f"Node0 current chain work: {self.nodes[0].getblockheader(hashes[-1])['chainwork']}")) +) + # Sleep a few seconds and verify that node2 didn't get any new blocks) + # or headers. We sleep, rather than sync_blocks(node0, node1) because) + # it's reasonable either way for node1 to get the blocks, or not get) + # them (since they're below node1's minchainwork).) + time.sleep(3)) +) + self.log.info("Verifying node 2 has no more blocks than before")) + self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")) + # Node2 shouldn't have any new headers yet, because node1 should not) + # have relayed anything.) + assert_equal(len(self.nodes[2].getchaintips()), 1)) + assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)) +) + assert_not_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())) + assert_equal(self.nodes[2].getblockcount(), starting_blockcount)) +) + self.log.info("Check that getheaders requests to node2 are ignored")) + peer = self.nodes[2].add_p2p_connection(P2PInterface())) + msg = msg_getheaders()) + msg.locator.vHave = [int(self.nodes[2].getbestblockhash(), 16)]) + msg.hashstop = 0) + peer.send_and_ping(msg)) + time.sleep(5)) + assert "headers" not in peer.last_message or len(peer.last_message["headers"].headers) == 0) +) + self.log.info("Generating one more block")) + self.generate(self.nodes[0], 1)) +) + self.log.info("Verifying nodes are all synced")) +) + # Because nodes in regtest are all manual connections (eg using) + # addnode), node1 should not have disconnected node0. If not for that,) + # we'd expect node1 to have disconnected node0 for serving an) + # insufficient work chain, in which case we'd need to reconnect them to) + # continue the test.) +) + self.sync_all()) + self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")) +) + self.log.info("Test that getheaders requests to node2 are not ignored")) + peer.send_and_ping(msg)) + assert "headers" in peer.last_message) +) + # Verify that node2 is in fact still in IBD (otherwise this test may) + # not be exercising the logic we want!)) + assert_equal(self.nodes[2].getblockchaininfo()['initialblockdownload'], True)) +) + self.log.info("Test -minimumchainwork with a non-hex value")) + self.stop_node(0)) + self.nodes[0].assert_start_raises_init_error() + ["-minimumchainwork=test"],) + expected_msg='Error: Invalid minimum work specified (test), must be up to 64 hex digits',) + )) +) +) +if __name__ == '__main__':) + MinimumChainWorkTest(__file__).main()) diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 7f64588eb1239f..f37d8f18376825 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -1,1770 +1,1770 @@ -#!/usr/bin/env python3 -# Copyright (c) 2019-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# Test Taproot softfork (BIPs 340-342) - -from test_framework.blocktools import ( - COINBASE_MATURITY, - create_coinbase, - create_block, - add_witness_commitment, - MAX_BLOCK_SIGOPS_WEIGHT, -) -from test_framework.messages import ( - COutPoint, - CTransaction, - CTxIn, - CTxInWitness, - CTxOut, - SEQUENCE_FINAL, - tx_from_hex, - WITNESS_SCALE_FACTOR, -) -from test_framework.script import ( - ANNEX_TAG, - BIP341_sha_amounts, - BIP341_sha_outputs, - BIP341_sha_prevouts, - BIP341_sha_scriptpubkeys, - BIP341_sha_sequences, - CScript, - CScriptNum, - CScriptOp, - hash256, - LEAF_VERSION_TAPSCRIPT, - LegacySignatureMsg, - LOCKTIME_THRESHOLD, - MAX_SCRIPT_ELEMENT_SIZE, - OP_0, - OP_1, - OP_2, - OP_3, - OP_4, - OP_5, - OP_6, - OP_7, - OP_8, - OP_9, - OP_10, - OP_11, - OP_12, - OP_16, - OP_2DROP, - OP_2DUP, - OP_CHECKMULTISIG, - OP_CHECKMULTISIGVERIFY, - OP_CHECKSIG, - OP_CHECKSIGADD, - OP_CHECKSIGVERIFY, - OP_CODESEPARATOR, - OP_DROP, - OP_DUP, - OP_ELSE, - OP_ENDIF, - OP_EQUAL, - OP_EQUALVERIFY, - OP_IF, - OP_NOP, - OP_NOT, - OP_NOTIF, - OP_PUSHDATA1, - OP_RETURN, - OP_SWAP, - OP_VERIFY, - SIGHASH_DEFAULT, - SIGHASH_ALL, - SIGHASH_NONE, - SIGHASH_SINGLE, - SIGHASH_ANYONECANPAY, - SegwitV0SignatureMsg, - TaggedHash, - TaprootSignatureMsg, - is_op_success, - taproot_construct, -) -from test_framework.script_util import ( - key_to_p2pk_script, - key_to_p2pkh_script, - key_to_p2wpkh_script, - keyhash_to_p2pkh_script, - script_to_p2sh_script, - script_to_p2wsh_script, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_raises_rpc_error, - assert_equal, -) -from test_framework.wallet_util import generate_keypair -from test_framework.key import ( - generate_privkey, - compute_xonly_pubkey, - sign_schnorr, - tweak_add_privkey, - ECKey, -) -from test_framework.crypto import secp256k1 -from test_framework.address import ( - hash160, - program_to_witness, -) -from collections import OrderedDict, namedtuple -import json -import hashlib -import os -import random - -# Whether or not to output generated test vectors, in JSON format. -GEN_TEST_VECTORS = False - -# === Framework for building spending transactions. === -# -# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that -# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing -# process can be overridden. -# -# Specifically, a context object is a dict that maps names to compositions of: -# - values -# - lists of values -# - callables which, when fed the context object as argument, produce any of these -# -# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs. -# -# The get(ctx, name) function can evaluate a name, and cache its result in the context. -# getter(name) can be used to construct a callable that evaluates name. For example: -# -# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']} -# -# creates a context where the script inputs are a signature plus the bytes 0x01. -# -# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively -# modified context. For example: -# -# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)} -# -# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from -# -# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT} -# -# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature) -# while ctx2 only uses the modified hashtype inside the sighash calculation. - -def deep_eval(ctx, expr): - """Recursively replace any callables c in expr (including inside lists) with c(ctx).""" - while callable(expr): - expr = expr(ctx) - if isinstance(expr, list): - expr = [deep_eval(ctx, x) for x in expr] - return expr - -# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them). -Final = namedtuple("Final", "value") - -def get(ctx, name): - """Evaluate name in context ctx.""" - assert name in ctx, "Missing '%s' in context" % name - expr = ctx[name] - if not isinstance(expr, Final): - # Evaluate and cache the result. - expr = Final(deep_eval(ctx, expr)) - ctx[name] = expr - return expr.value - -def getter(name): - """Return a callable that evaluates name in its passed context.""" - return lambda ctx: get(ctx, name) - -def override(expr, **kwargs): - """Return a callable that evaluates expr in a modified context.""" - return lambda ctx: deep_eval({**ctx, **kwargs}, expr) - -# === Implementations for the various default expressions in DEFAULT_CONTEXT === - -def default_hashtype(ctx): - """Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise.""" - mode = get(ctx, "mode") - if mode == "taproot": - return SIGHASH_DEFAULT - else: - return SIGHASH_ALL - -def default_tapleaf(ctx): - """Default expression for "tapleaf": looking up leaf in tap[2].""" - return get(ctx, "tap").leaves[get(ctx, "leaf")] - -def default_script_taproot(ctx): - """Default expression for "script_taproot": tapleaf.script.""" - return get(ctx, "tapleaf").script - -def default_leafversion(ctx): - """Default expression for "leafversion": tapleaf.version""" - return get(ctx, "tapleaf").version - -def default_negflag(ctx): - """Default expression for "negflag": tap.negflag.""" - return get(ctx, "tap").negflag - -def default_pubkey_internal(ctx): - """Default expression for "pubkey_internal": tap.internal_pubkey.""" - return get(ctx, "tap").internal_pubkey - -def default_merklebranch(ctx): - """Default expression for "merklebranch": tapleaf.merklebranch.""" - return get(ctx, "tapleaf").merklebranch - -def default_controlblock(ctx): - """Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch.""" - return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch") - -def default_sigmsg(ctx): - """Default expression for "sigmsg": depending on mode, compute BIP341, BIP143, or legacy sigmsg.""" - tx = get(ctx, "tx") - idx = get(ctx, "idx") - hashtype = get(ctx, "hashtype_actual") - mode = get(ctx, "mode") - if mode == "taproot": - # BIP341 signature hash - utxos = get(ctx, "utxos") - annex = get(ctx, "annex") - if get(ctx, "leaf") is not None: - codeseppos = get(ctx, "codeseppos") - leaf_ver = get(ctx, "leafversion") - script = get(ctx, "script_taproot") - return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=True, leaf_script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex) - else: - return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=False, annex=annex) - elif mode == "witv0": - # BIP143 signature hash - scriptcode = get(ctx, "scriptcode") - utxos = get(ctx, "utxos") - return SegwitV0SignatureMsg(scriptcode, tx, idx, hashtype, utxos[idx].nValue) - else: - # Pre-segwit signature hash - scriptcode = get(ctx, "scriptcode") - return LegacySignatureMsg(scriptcode, tx, idx, hashtype)[0] - -def default_sighash(ctx): - """Default expression for "sighash": depending on mode, compute tagged hash or dsha256 of sigmsg.""" - msg = get(ctx, "sigmsg") - mode = get(ctx, "mode") - if mode == "taproot": - return TaggedHash("TapSighash", msg) - else: - if msg is None: - return (1).to_bytes(32, 'little') - else: - return hash256(msg) - -def default_tweak(ctx): - """Default expression for "tweak": None if a leaf is specified, tap[0] otherwise.""" - if get(ctx, "leaf") is None: - return get(ctx, "tap").tweak - return None - -def default_key_tweaked(ctx): - """Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise.""" - key = get(ctx, "key") - tweak = get(ctx, "tweak") - if tweak is None: - return key - else: - return tweak_add_privkey(key, tweak) - -def default_signature(ctx): - """Default expression for "signature": BIP340 signature or ECDSA signature depending on mode.""" - sighash = get(ctx, "sighash") - deterministic = get(ctx, "deterministic") - if get(ctx, "mode") == "taproot": - key = get(ctx, "key_tweaked") - flip_r = get(ctx, "flag_flip_r") - flip_p = get(ctx, "flag_flip_p") - aux = bytes([0] * 32) - if not deterministic: - aux = random.getrandbits(256).to_bytes(32, 'big') - return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p, aux=aux) - else: - key = get(ctx, "key") - return key.sign_ecdsa(sighash, rfc6979=deterministic) - -def default_hashtype_actual(ctx): - """Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot.""" - hashtype = get(ctx, "hashtype") - mode = get(ctx, "mode") - if mode != "taproot": - return hashtype - idx = get(ctx, "idx") - tx = get(ctx, "tx") - if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout): - return (hashtype & ~3) | SIGHASH_NONE - return hashtype - -def default_bytes_hashtype(ctx): - """Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise.""" - return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0]) - -def default_sign(ctx): - """Default expression for "sign": concatenation of signature and bytes_hashtype.""" - return get(ctx, "signature") + get(ctx, "bytes_hashtype") - -def default_inputs_keypath(ctx): - """Default expression for "inputs_keypath": a signature.""" - return [get(ctx, "sign")] - -def default_witness_taproot(ctx): - """Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed.""" - annex = get(ctx, "annex") - suffix_annex = [] - if annex is not None: - suffix_annex = [annex] - if get(ctx, "leaf") is None: - return get(ctx, "inputs_keypath") + suffix_annex - else: - return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex - -def default_witness_witv0(ctx): - """Default expression for "witness_witv0", consisting of inputs and witness script, as needed.""" - script = get(ctx, "script_witv0") - inputs = get(ctx, "inputs") - if script is None: - return inputs - else: - return inputs + [script] - -def default_witness(ctx): - """Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed.""" - mode = get(ctx, "mode") - if mode == "taproot": - return get(ctx, "witness_taproot") - elif mode == "witv0": - return get(ctx, "witness_witv0") - else: - return [] - -def default_scriptsig(ctx): - """Default expression for "scriptsig", consisting of inputs and redeemscript, as needed.""" - scriptsig = [] - mode = get(ctx, "mode") - if mode == "legacy": - scriptsig = get(ctx, "inputs") - redeemscript = get(ctx, "script_p2sh") - if redeemscript is not None: - scriptsig += [bytes(redeemscript)] - return scriptsig - -# The default context object. -DEFAULT_CONTEXT = { - # == The main expressions to evaluate. Only override these for unusual or invalid spends. == - # The overall witness stack, as a list of bytes objects. - "witness": default_witness, - # The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed) - "scriptsig": default_scriptsig, - - # == Expressions you'll generally only override for intentionally invalid spends. == - # The witness stack for spending a taproot output. - "witness_taproot": default_witness_taproot, - # The witness stack for spending a P2WPKH/P2WSH output. - "witness_witv0": default_witness_witv0, - # The script inputs for a taproot key path spend. - "inputs_keypath": default_inputs_keypath, - # The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed). - "hashtype_actual": default_hashtype_actual, - # The bytes object for a full signature (including hashtype byte, if needed). - "bytes_hashtype": default_bytes_hashtype, - # A full script signature (bytes including hashtype, if needed) - "sign": default_sign, - # An ECDSA or Schnorr signature (excluding hashtype byte). - "signature": default_signature, - # The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends). - "key_tweaked": default_key_tweaked, - # The tweak to use (None for script path spends, the actual tweak for key path spends). - "tweak": default_tweak, - # The sigmsg value (preimage of sighash) - "sigmsg": default_sigmsg, - # The sighash value (32 bytes) - "sighash": default_sighash, - # The information about the chosen script path spend (TaprootLeafInfo object). - "tapleaf": default_tapleaf, - # The script to push, and include in the sighash, for a taproot script path spend. - "script_taproot": default_script_taproot, - # The internal pubkey for a taproot script path spend (32 bytes). - "pubkey_internal": default_pubkey_internal, - # The negation flag of the internal pubkey for a taproot script path spend. - "negflag": default_negflag, - # The leaf version to include in the sighash (this does not affect the one in the control block). - "leafversion": default_leafversion, - # The Merkle path to include in the control block for a script path spend. - "merklebranch": default_merklebranch, - # The control block to push for a taproot script path spend. - "controlblock": default_controlblock, - # Whether to produce signatures with invalid P sign (Schnorr signatures only). - "flag_flip_p": False, - # Whether to produce signatures with invalid R sign (Schnorr signatures only). - "flag_flip_r": False, - - # == Parameters that can be changed without invalidating, but do have a default: == - # The hashtype (as an integer). - "hashtype": default_hashtype, - # The annex (only when mode=="taproot"). - "annex": None, - # The codeseparator position (only when mode=="taproot"). - "codeseppos": -1, - # The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH). - "script_p2sh": None, - # The script to add to the witness in (if P2WSH; None implies P2WPKH) - "script_witv0": None, - # The leaf to use in taproot spends (if script path spend; None implies key path spend). - "leaf": None, - # The input arguments to provide to the executed script - "inputs": [], - # Use deterministic signing nonces - "deterministic": False, - - # == Parameters to be set before evaluation: == - # - mode: what spending style to use ("taproot", "witv0", or "legacy"). - # - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr). - # - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot"). - # - tx: the transaction to sign. - # - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot"). - # - idx: the input position being signed. - # - scriptcode: the scriptcode to include in legacy and witv0 sighashes. -} - -def flatten(lst): - ret = [] - for elem in lst: - if isinstance(elem, list): - ret += flatten(elem) - else: - ret.append(elem) - return ret - - -def spend(tx, idx, utxos, **kwargs): - """Sign transaction input idx of tx, provided utxos is the list of outputs being spent. - - Additional arguments may be provided that override any aspect of the signing process. - See DEFAULT_CONTEXT above for what can be overridden, and what must be provided. - """ - - ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs} - - def to_script(elem): - """If fed a CScript, return it; if fed bytes, return a CScript that pushes it.""" - if isinstance(elem, CScript): - return elem - else: - return CScript([elem]) - - scriptsig_list = flatten(get(ctx, "scriptsig")) - scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list)) - witness_stack = flatten(get(ctx, "witness")) - return (scriptsig, witness_stack) - - -# === Spender objects === -# -# Each spender is a tuple of: -# - A scriptPubKey which is to be spent from (CScript) -# - A comment describing the test (string) -# - Whether the spending (on itself) is expected to be standard (bool) -# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs: -# - A transaction to sign (CTransaction) -# - An input position (int) -# - The spent UTXOs by this transaction (list of CTxOut) -# - Whether to produce a valid spend (bool) -# - A string with an expected error message for failure case if known -# - The (pre-taproot) sigops weight consumed by a successful spend -# - Whether this spend cannot fail -# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior) - -Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch") - - -def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs): - """Helper for constructing Spender objects using the context signing framework. - - * tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script) - * witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh) - * script: the actual script executed (for bare/P2WSH/P2SH spending) - * pkh: the public key for P2PKH or P2WPKH spending - * p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered) - * spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it) - * failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set) - * standard: whether the (valid version of) spending is expected to be standard - * err_msg: a string with an expected error message for failure (or None, if not cared about) - * sigops_weight: the pre-taproot sigops weight consumed by a successful spend - * need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding - transaction output. - """ - - conf = dict() - - # Compute scriptPubKey and set useful defaults based on the inputs. - if witv0: - assert tap is None - conf["mode"] = "witv0" - if pkh is not None: - # P2WPKH - assert script is None - pubkeyhash = hash160(pkh) - spk = key_to_p2wpkh_script(pkh) - conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash) - conf["script_witv0"] = None - conf["inputs"] = [getter("sign"), pkh] - elif script is not None: - # P2WSH - spk = script_to_p2wsh_script(script) - conf["scriptcode"] = script - conf["script_witv0"] = script - else: - assert False - elif tap is None: - conf["mode"] = "legacy" - if pkh is not None: - # P2PKH - assert script is None - pubkeyhash = hash160(pkh) - spk = keyhash_to_p2pkh_script(pubkeyhash) - conf["scriptcode"] = spk - conf["inputs"] = [getter("sign"), pkh] - elif script is not None: - # bare - spk = script - conf["scriptcode"] = script - else: - assert False - else: - assert script is None - conf["mode"] = "taproot" - conf["tap"] = tap - spk = tap.scriptPubKey - - if spk_mutate_pre_p2sh is not None: - spk = spk_mutate_pre_p2sh(spk) - - if p2sh: - # P2SH wrapper can be combined with anything else - conf["script_p2sh"] = spk - spk = script_to_p2sh_script(spk) - - conf = {**conf, **kwargs} - - def sat_fn(tx, idx, utxos, valid): - if valid: - return spend(tx, idx, utxos, **conf) - else: - assert failure is not None - return spend(tx, idx, utxos, **{**conf, **failure}) - - return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch) - -def add_spender(spenders, *args, **kwargs): - """Make a spender using make_spender, and add it to spenders.""" - spenders.append(make_spender(*args, **kwargs)) - -# === Helpers for the test === - -def random_checksig_style(pubkey): - """Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack.""" - opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD]) - if opcode == OP_CHECKSIGVERIFY: - ret = CScript([pubkey, opcode, OP_1]) - elif opcode == OP_CHECKSIGADD: - num = random.choice([0, 0x7fffffff, -0x7fffffff]) - ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL]) - else: - ret = CScript([pubkey, opcode]) - return bytes(ret) - -def bitflipper(expr): - """Return a callable that evaluates expr and returns it with a random bitflip.""" - def fn(ctx): - sub = deep_eval(ctx, expr) - assert isinstance(sub, bytes) - return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little') - return fn - -def zero_appender(expr): - """Return a callable that evaluates expr and returns it with a zero added.""" - return lambda ctx: deep_eval(ctx, expr) + b"\x00" - -def byte_popper(expr): - """Return a callable that evaluates expr and returns it with its last byte removed.""" - return lambda ctx: deep_eval(ctx, expr)[:-1] - -# Expected error strings - -ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"} -ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"} -ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"} -ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"} -ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"} -ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"} -ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"} -ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"} -ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"} -ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"} -ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"} -ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"} -ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"} -ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"} -ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"} -ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"} -ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"} -ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"} -ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"} - -VALID_SIGHASHES_ECDSA = [ - SIGHASH_ALL, - SIGHASH_NONE, - SIGHASH_SINGLE, - SIGHASH_ANYONECANPAY + SIGHASH_ALL, - SIGHASH_ANYONECANPAY + SIGHASH_NONE, - SIGHASH_ANYONECANPAY + SIGHASH_SINGLE -] - -VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA - -VALID_SIGHASHES_TAPROOT_SINGLE = [ - SIGHASH_SINGLE, - SIGHASH_ANYONECANPAY + SIGHASH_SINGLE -] - -VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE] - -SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}} -SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}} -SINGLE_SIG = {"inputs": [getter("sign")]} -SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}} - -DUST_LIMIT = 600 -MIN_FEE = 50000 - -# === Actual test cases === - - -def spenders_taproot_active(): - """Return a list of Spenders for testing post-Taproot activation behavior.""" - - secs = [generate_privkey() for _ in range(8)] - pubs = [compute_xonly_pubkey(sec)[0] for sec in secs] - - spenders = [] - - # == Tests for BIP340 signature validation. == - # These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp. - # Some things are tested programmatically as well here. - - tap = taproot_construct(pubs[0]) - # Test with key with bit flipped. - add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR) - # Test with sighash with bit flipped. - add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR) - # Test with invalid R sign. - add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR) - # Test with invalid P sign. - add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR) - # Test with signature with bit flipped. - add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR) - - # == Test involving an internal public key not on the curve == - - # X-only public keys are 32 bytes, but not every 32-byte array is a valid public key; only - # around 50% of them are. This does not affect users using correct software; these "keys" have - # no corresponding private key, and thus will never appear as output of key - # generation/derivation/tweaking. - # - # Using an invalid public key as P2TR output key makes the UTXO unspendable. Revealing an - # invalid public key as internal key in a P2TR script path spend also makes the spend invalid. - # These conditions are explicitly spelled out in BIP341. - # - # It is however hard to create test vectors for this, because it involves "guessing" how a - # hypothetical incorrect implementation deals with an obviously-invalid condition, and making - # sure that guessed behavior (accepting it in certain condition) doesn't occur. - # - # The test case added here tries to detect a very specific bug a verifier could have: if they - # don't verify whether or not a revealed internal public key in a script path spend is valid, - # and (correctly) implement output_key == tweak(internal_key, tweakval) but (incorrectly) treat - # tweak(invalid_key, tweakval) as equal the public key corresponding to private key tweakval. - # This may seem like a far-fetched edge condition to test for, but in fact, the BIP341 wallet - # pseudocode did exactly that (but obviously only triggerable by someone invoking the tweaking - # function with an invalid public key, which shouldn't happen). - - # Generate an invalid public key - while True: - invalid_pub = random.randbytes(32) - if not secp256k1.GE.is_valid_x(int.from_bytes(invalid_pub, 'big')): - break - - # Implement a test case that detects validation logic which maps invalid public keys to the - # point at infinity in the tweaking logic. - tap = taproot_construct(invalid_pub, [("true", CScript([OP_1]))], treat_internal_as_infinity=True) - add_spender(spenders, "output/invalid_x", tap=tap, key_tweaked=tap.tweak, failure={"leaf": "true", "inputs": []}, **ERR_WITNESS_PROGRAM_MISMATCH) - - # Do the same thing without invalid point, to make sure there is no mistake in the test logic. - tap = taproot_construct(pubs[0], [("true", CScript([OP_1]))]) - add_spender(spenders, "output/invalid_x_mock", tap=tap, key=secs[0], leaf="true", inputs=[]) - - # == Tests for signature hashing == - - # Run all tests once with no annex, and once with a valid random annex. - for annex in [None, lambda _: bytes([ANNEX_TAG]) + random.randbytes(random.randrange(0, 250))]: - # Non-empty annex is non-standard - no_annex = annex is None - - # Sighash mutation tests (test all sighash combinations) - for hashtype in VALID_SIGHASHES_TAPROOT: - common = {"annex": annex, "hashtype": hashtype, "standard": no_annex} - - # Pure pubkey - tap = taproot_construct(pubs[0]) - add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - - # Pubkey/P2PK script combination - scripts = [("s0", CScript(random_checksig_style(pubs[1])))] - tap = taproot_construct(pubs[0], scripts) - add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - - # Test SIGHASH_SINGLE behavior in combination with mismatching outputs - if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE: - add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True) - add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True) - - # Test OP_CODESEPARATOR impact on sighashing. - hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT) - common = {"annex": annex, "hashtype": hashtype, "standard": no_annex} - scripts = [ - ("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig - ("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig - ("branched_codesep", CScript([random.randbytes(random.randrange(2, 511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep - # Note that the first data push in the "branched_codesep" script has the purpose of - # randomizing the sighash, both by varying script size and content. In order to - # avoid MINIMALDATA script verification errors caused by not-minimal-encoded data - # pushes (e.g. `OP_PUSH1 1` instead of `OP_1`), we set a minimum data size of 2 bytes. - ] - random.shuffle(scripts) - tap = taproot_construct(pubs[0], scripts) - add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - - # Reusing the scripts above, test that various features affect the sighash. - add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != LEAF_VERSION_TAPSCRIPT]))}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR) - - # Test that invalid hashtypes don't work, both in key path and script path spends - hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT) - for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]: - add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE) - add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE) - - # Test that hashtype 0 cannot have a hashtype byte, and 1 must have one. - add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE) - add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE) - add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) - # Test that hashtype 0 and hashtype 1 cannot be transmuted into each other. - add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) - add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR) - - # Test aspects of signatures with unusual lengths - for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]: - scripts = [ - ("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])), - ("cs_pos", CScript([pubs[2], OP_CHECKSIG])), - ("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])), - ("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])), - ("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL])) - ] - random.shuffle(scripts) - tap = taproot_construct(pubs[3], scripts) - # Empty signatures - add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE) - add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY) - add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS) - add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS) - add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random.randbytes(random.randrange(1, 63))}, **ERR_SIG_SIZE) - add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random.randbytes(random.randrange(66, 100))}, **ERR_SIG_SIZE) - # Appending a zero byte to signatures invalidates them - add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) - add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) - add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) - add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) - add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) - add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE)) - # Removing the last byte from signatures invalidates them - add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) - add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) - add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) - add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) - add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) - add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR)) - # Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail. - add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR) - add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR) - - # == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH == - - for p2sh in [False, True]: - for witver in range(1, 17): - for witlen in [20, 31, 32, 33]: - def mutate(spk): - prog = spk[2:] - assert len(prog) == 32 - if witlen < 32: - prog = prog[0:witlen] - elif witlen > 32: - prog += bytes([0 for _ in range(witlen - 32)]) - return CScript([CScriptOp.encode_op_n(witver), prog]) - scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))] - tap = taproot_construct(pubs[1], scripts) - if not p2sh and witver == 1 and witlen == 32: - add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) - add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN) - else: - add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False) - add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False) - - # == Test various aspects of BIP341 spending paths == - - # A set of functions that compute the hashing partner in a Merkle tree, designed to exercise - # edge cases. This relies on the taproot_construct feature that a lambda can be passed in - # instead of a subtree, to compute the partner to be hashed with. - PARTNER_MERKLE_FN = [ - # Combine with itself - lambda h: h, - # Combine with hash 0 - lambda h: bytes([0 for _ in range(32)]), - # Combine with hash 2^256-1 - lambda h: bytes([0xff for _ in range(32)]), - # Combine with itself-1 (BE) - lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'), - # Combine with itself+1 (BE) - lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'), - # Combine with itself-1 (LE) - lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'), - # Combine with itself+1 (LE) - lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'), - # Combine with random bitflipped version of self. - lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little') - ] - # Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep". - scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]] - # Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths. - for _ in range(127): - scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] - tap = taproot_construct(pubs[0], scripts) - # Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it). - add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE) - # Test that flipping the negation bit invalidates spends. - add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH) - # Test that bitflips in the Merkle branch invalidate it. - add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH) - # Test that bitflips in the internal pubkey invalidate it. - add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH) - # Test that empty witnesses are invalid. - add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS) - # Test that adding garbage to the control block invalidates it. - add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random.randbytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE) - # Test that truncating the control block invalidates it. - add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE) - - scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))] - tap = taproot_construct(pubs[1], scripts) - # Test that adding garbage to the control block invalidates it. - add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random.randbytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE) - # Test that truncating the control block invalidates it. - add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE) - # Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it - add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE) - - # == Test BIP342 edge cases == - - csa_low_val = random.randrange(0, 17) # Within range for OP_n - csa_low_result = csa_low_val + 1 - - csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range - csa_high_result = csa_high_val + 1 - - OVERSIZE_NUMBER = 2**31 - assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6) - assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5) - - big_choices = [] - big_scriptops = [] - for i in range(1000): - r = random.randrange(len(pubs)) - big_choices.append(r) - big_scriptops += [pubs[r], OP_CHECKSIGVERIFY] - - - def big_spend_inputs(ctx): - """Helper function to construct the script input for t33/t34 below.""" - # Instead of signing 999 times, precompute signatures for every (key, hashtype) combination - sigs = {} - for ht in VALID_SIGHASHES_TAPROOT: - for k in range(len(pubs)): - sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx) - num = get(ctx, "num") - return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)] - - # Various BIP342 features - scripts = [ - # 0) drop stack element and OP_CHECKSIG - ("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])), - # 1) normal OP_CHECKSIG - ("t1", CScript([pubs[1], OP_CHECKSIG])), - # 2) normal OP_CHECKSIGVERIFY - ("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])), - # 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input - ("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])), - # 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input - ("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])), - # 5) OP_IF script that needs a true input - ("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])), - # 6) OP_NOTIF script that needs a true input - ("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])), - # 7) OP_CHECKSIG with an empty key - ("t7", CScript([OP_0, OP_CHECKSIG])), - # 8) OP_CHECKSIGVERIFY with an empty key - ("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])), - # 9) normal OP_CHECKSIGADD that also ensures return value is correct - ("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])), - # 10) OP_CHECKSIGADD with empty key - ("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])), - # 11) OP_CHECKSIGADD with missing counter stack element - ("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])), - # 12) OP_CHECKSIG that needs invalid signature - ("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])), - # 13) OP_CHECKSIG with empty key that needs invalid signature - ("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])), - # 14) OP_CHECKSIGADD that needs invalid signature - ("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])), - # 15) OP_CHECKSIGADD with empty key that needs invalid signature - ("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])), - # 16) OP_CHECKSIG with unknown pubkey type - ("t16", CScript([OP_1, OP_CHECKSIG])), - # 17) OP_CHECKSIGADD with unknown pubkey type - ("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])), - # 18) OP_CHECKSIGVERIFY with unknown pubkey type - ("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])), - # 19) script longer than 10000 bytes and over 201 non-push opcodes - ("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])), - # 20) OP_CHECKSIGVERIFY with empty key - ("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])), - # 21) Script that grows the stack to 1000 elements - ("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)), - # 22) Script that grows the stack to 1001 elements - ("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)), - # 23) Script that expects an input stack of 1000 elements - ("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])), - # 24) Script that expects an input stack of 1001 elements - ("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])), - # 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element - ("t25", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])), - # 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element - ("t26", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])), - # 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes - ("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])), - # 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result - ("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])), - # 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes - ("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])), - # 30) Variant of t1 with "normal" 33-byte pubkey - ("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])), - # 31) Variant of t2 with "normal" 33-byte pubkey - ("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])), - # 32) Variant of t28 with "normal" 33-byte pubkey - ("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])), - # 33) 999-of-999 multisig - ("t33", CScript(big_scriptops[:1998] + [OP_1])), - # 34) 1000-of-1000 multisig - ("t34", CScript(big_scriptops[:2000] + [OP_1])), - # 35) Variant of t9 that uses a non-minimally encoded input arg - ("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])), - # 36) Empty script - ("t36", CScript([])), - ] - # Add many dummies to test huge trees - for j in range(100000): - scripts.append((None, CScript([OP_RETURN, random.randrange(100000)]))) - random.shuffle(scripts) - tap = taproot_construct(pubs[0], scripts) - common = { - "hashtype": hashtype, - "key": secs[1], - "tap": tap, - } - # Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not). - add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random.randbytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT) - add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random.randbytes(80)]) - add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random.randbytes(81)]) - # Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work. - add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG) - add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG) - # Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript) - add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF) - add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF) - add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF) - add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF) - # Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid. - add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY) - # Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case. - add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR) - add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR) - add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR) - # Test that 0-byte public keys are not acceptable. - add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) - # Test that OP_CHECKSIGADD results are as expected - add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error") - add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error") - # Test that OP_CHECKSIGADD requires 3 stack elements. - add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY) - # Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY) - add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY) - add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY) - # Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable. - add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common) - # Test that a stack size of 1000 elements is permitted, but 1001 isn't. - add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE) - # Test that an input stack size of 1000 elements is permitted, but 1001 isn't. - add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE) - # Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not. - add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT) - # Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits) - add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE) - # Test that the CLEANSTACK rule is consensus critical in tapscript - add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK) - - # == Test for sigops ratio limit == - - # Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as - # input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and - # will execute sigops signature checks. - SIGOPS_RATIO_SCRIPTS = [ - # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIG. - lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1), - # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY. - lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1), - # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG. - lambda n, pk: (CScript([random.randbytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1), - # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD. - lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1), - # n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature. - lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1), - # n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature. - lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1), - ] - for annex in [None, bytes([ANNEX_TAG]) + random.randbytes(random.randrange(1000))]: - for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]: - for pubkey in [pubs[1], random.randbytes(random.choice([x for x in range(2, 81) if x != 32]))]: - for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS): - merkledepth = random.randrange(129) - - - def predict_sigops_ratio(n, dummy_size): - """Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test.""" - script, sigops = fn(n, pubkey) - # Predict the size of the witness for a given choice of n - stacklen_size = 1 - sig_size = 64 + (hashtype != SIGHASH_DEFAULT) - siglen_size = 1 - dummylen_size = 1 + 2 * (dummy_size >= 253) - script_size = len(script) - scriptlen_size = 1 + 2 * (script_size >= 253) - control_size = 33 + 32 * merkledepth - controllen_size = 1 + 2 * (control_size >= 253) - annex_size = 0 if annex is None else len(annex) - annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253) - witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size - # sigops ratio test - return witsize + 50 >= 50 * sigops - # Make sure n is high enough that with empty dummy, the script is not valid - n = 0 - while predict_sigops_ratio(n, 0): - n += 1 - # But allow picking a bit higher still - n += random.randrange(5) - # Now pick dummy size *just* large enough that the overall construction passes - dummylen = 0 - while not predict_sigops_ratio(n, dummylen): - dummylen += 1 - scripts = [("s", fn(n, pubkey)[0])] - for _ in range(merkledepth): - scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] - tap = taproot_construct(pubs[0], scripts) - standard = annex is None and dummylen <= 80 and len(pubkey) == 32 - add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random.randbytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random.randbytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO) - - # Future leaf versions - for leafver in range(0, 0x100, 2): - if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG: - # Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version - continue - scripts = [ - ("bare_c0", CScript([OP_NOP])), - ("bare_unkver", CScript([OP_NOP]), leafver), - ("return_c0", CScript([OP_RETURN])), - ("return_unkver", CScript([OP_RETURN]), leafver), - ("undecodable_c0", CScript([OP_PUSHDATA1])), - ("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver), - ("bigpush_c0", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])), - ("bigpush_unkver", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver), - ("1001push_c0", CScript([OP_0] * 1001)), - ("1001push_unkver", CScript([OP_0] * 1001), leafver), - ] - random.shuffle(scripts) - tap = taproot_construct(pubs[0], scripts) - add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK) - add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN) - add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE) - add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT) - add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE) - add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE) - - # OP_SUCCESSx tests. - hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT) - for opval in range(76, 0x100): - opcode = CScriptOp(opval) - if not is_op_success(opcode): - continue - scripts = [ - ("bare_success", CScript([opcode])), - ("bare_nop", CScript([OP_NOP])), - ("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])), - ("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])), - ("return_success", CScript([OP_RETURN, opcode])), - ("return_nop", CScript([OP_RETURN, OP_NOP])), - ("undecodable_success", CScript([opcode, OP_PUSHDATA1])), - ("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])), - ("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])), - ("bigpush_success", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])), - ("bigpush_nop", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])), - ("1001push_success", CScript([OP_0] * 1001 + [opcode])), - ("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])), - ] - random.shuffle(scripts) - tap = taproot_construct(pubs[0], scripts) - add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK) - add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK) - add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN) - add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE) - add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE) - add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT) - add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE) - add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE) - - # Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx) - for opval in range(0, 0x100): - opcode = CScriptOp(opval) - if is_op_success(opcode): - continue - scripts = [ - ("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)), - ("op_success", CScript([OP_RETURN, CScriptOp(0x50)])) - ] - tap = taproot_construct(pubs[0], scripts) - add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode - - # == Test case for https://github.com/bitcoin/bitcoin/issues/24765 == - - zero_fn = lambda h: bytes([0 for _ in range(32)]) - tap = taproot_construct(pubs[0], [("leaf", CScript([pubs[1], OP_CHECKSIG, pubs[1], OP_CHECKSIGADD, OP_2, OP_EQUAL])), zero_fn]) - add_spender(spenders, "case24765", tap=tap, leaf="leaf", inputs=[getter("sign"), getter("sign")], key=secs[1], no_fail=True) - - # == Legacy tests == - - # Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too. - for compressed in [False, True]: - eckey1, pubkey1 = generate_keypair(compressed=compressed) - eckey2, _ = generate_keypair(compressed=compressed) - for p2sh in [False, True]: - for witv0 in [False, True]: - for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]: - standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0) - add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=key_to_p2pk_script(pubkey1), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS) - add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS) - - # Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic. - for p2sh in [False, True]: - for witv0 in [False, True]: - for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]: - standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0) - add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE) - - return spenders - - -def spenders_taproot_nonstandard(): - """Spenders for testing that post-activation Taproot rules may be nonstandard.""" - - spenders = [] - - sec = generate_privkey() - pub, _ = compute_xonly_pubkey(sec) - scripts = [ - ("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2), - ("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])), - ] - tap = taproot_construct(pub, scripts) - - # Test that features like annex, leaf versions, or OP_SUCCESS are valid but non-standard - add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")]) - add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash)) - add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")]) - add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash)) - - return spenders - -# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix. -LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY" -# Consensus validation flags to use in dumps for all other tests. -TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT" - -def dump_json_test(tx, input_utxos, idx, success, failure): - spender = input_utxos[idx].spender - # Determine flags to dump - flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS - - fields = [ - ("tx", tx.serialize().hex()), - ("prevouts", [x.output.serialize().hex() for x in input_utxos]), - ("index", idx), - ("flags", flags), - ("comment", spender.comment) - ] - - # The "final" field indicates that a spend should be always valid, even with more validation flags enabled - # than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate). - if spender.is_standard: - fields.append(("final", True)) - - def dump_witness(wit): - return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])]) - if success is not None: - fields.append(("success", dump_witness(success))) - if failure is not None: - fields.append(("failure", dump_witness(failure))) - - # Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the - # file naming scheme compatible with fuzzing infrastructure). - dump = json.dumps(OrderedDict(fields)) + ",\n" - sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest() - dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0]) - os.makedirs(dirname, exist_ok=True) - with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f: - f.write(dump) - -# Data type to keep track of UTXOs, where they were created, and how to spend them. -UTXOData = namedtuple('UTXOData', 'outpoint,output,spender') - - -class TaprootTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true", - help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable") - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - self.extra_args = [["-par=1"]] - - def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False): - - # Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output. - # It is not impossible to fit enough tapscript sigops to hit the old 80k limit without - # busting txin-level limits. We simply have to account for the p2pk outputs in all - # transactions. - extra_output_script = CScript(bytes([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))) - - coinbase_tx = create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees) - block = create_block(self.tip, coinbase_tx, self.lastblocktime + 1, txlist=txs) - witness and add_witness_commitment(block) - block.solve() - block_response = node.submitblock(block.serialize().hex()) - if err_msg is not None: - assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg) - if accept: - assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response) - self.tip = block.sha256 - self.lastblockhash = block.hash - self.lastblocktime += 1 - self.lastblockheight += 1 - else: - assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg - - def init_blockinfo(self, node): - # Initialize variables used by block_submit(). - self.lastblockhash = node.getbestblockhash() - self.tip = int(self.lastblockhash, 16) - block = node.getblock(self.lastblockhash) - self.lastblockheight = block['height'] - self.lastblocktime = block['time'] - - def test_spenders(self, node, spenders, input_counts): - """Run randomized tests with a number of "spenders". - - Steps: - 1) Generate an appropriate UTXO for each spender to test spend conditions - 2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh - 3) Select random number of inputs from (1) - 4) Select random number of addresses from (2) as outputs - - Each spender embodies a test; in a large randomized test, it is verified - that toggling the valid argument to each lambda toggles the validity of - the transaction. This is accomplished by constructing transactions consisting - of all valid inputs, except one invalid one. - """ - - # Construct a bunch of sPKs that send coins back to the host wallet - self.log.info("- Constructing addresses for returning coins") - host_spks = [] - host_pubkeys = [] - for i in range(16): - addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"])) - info = node.getaddressinfo(addr) - spk = bytes.fromhex(info['scriptPubKey']) - host_spks.append(spk) - host_pubkeys.append(bytes.fromhex(info['pubkey'])) - - self.init_blockinfo(node) - - # Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and - # one change output at the end. The transaction is constructed on the Python side to enable - # having multiple outputs to the same address and outputs with no assigned address. The wallet - # is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the - # Python side (to bypass standardness rules). - self.log.info("- Creating test UTXOs...") - random.shuffle(spenders) - normal_utxos = [] - mismatching_utxos = [] # UTXOs with input that requires mismatching output position - done = 0 - while done < len(spenders): - # Compute how many UTXOs to create with this transaction - count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000) - - fund_tx = CTransaction() - # Add the 50 highest-value inputs - unspents = node.listunspent() - random.shuffle(unspents) - unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True) - if len(unspents) > 50: - unspents = unspents[:50] - random.shuffle(unspents) - balance = 0 - for unspent in unspents: - balance += int(unspent["amount"] * 100000000) - txid = int(unspent["txid"], 16) - fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript())) - # Add outputs - cur_progress = done / len(spenders) - next_progress = (done + count_this_tx) / len(spenders) - change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance - self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001)) - for i in range(count_this_tx): - avg = (balance - change_goal) / (count_this_tx - i) - amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5) - balance -= amount - fund_tx.vout.append(CTxOut(amount, spenders[done + i].script)) - # Add change - fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks))) - # Ask the wallet to sign - fund_tx = tx_from_hex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"]) - # Construct UTXOData entries - fund_tx.rehash() - for i in range(count_this_tx): - utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done]) - if utxodata.spender.need_vin_vout_mismatch: - mismatching_utxos.append(utxodata) - else: - normal_utxos.append(utxodata) - done += 1 - # Mine into a block - self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True) - - # Consume groups of choice(input_coins) from utxos in a tx, testing the spenders. - self.log.info("- Running %i spending tests" % done) - random.shuffle(normal_utxos) - random.shuffle(mismatching_utxos) - assert done == len(normal_utxos) + len(mismatching_utxos) - - left = done - while left: - # Construct CTransaction with random version, nLocktime - tx = CTransaction() - tx.version = random.choice([1, 2, random.getrandbits(32)]) - min_sequence = (tx.version != 1 and tx.version != 0) * 0x80000000 # The minimum sequence number to disable relative locktime - if random.choice([True, False]): - tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past - else: - tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past - - # Decide how many UTXOs to test with. - acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)] - num_inputs = random.choice(acceptable) - - # If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those - # unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one - # normal UTXO to go in the first position), and we don't want to run out of normal UTXOs. - input_utxos = [] - while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1): - input_utxos.append(mismatching_utxos.pop()) - left -= 1 - - # Top up until we hit num_inputs (but include at least one normal UTXO always). - for _ in range(max(1, num_inputs - len(input_utxos))): - input_utxos.append(normal_utxos.pop()) - left -= 1 - - # The first input cannot require a mismatching output (as there is at least one output). - while True: - random.shuffle(input_utxos) - if not input_utxos[0].spender.need_vin_vout_mismatch: - break - first_mismatch_input = None - for i in range(len(input_utxos)): - if input_utxos[i].spender.need_vin_vout_mismatch: - first_mismatch_input = i - assert first_mismatch_input is None or first_mismatch_input > 0 - - # Decide fee, and add CTxIns to tx. - amount = sum(utxo.output.nValue for utxo in input_utxos) - fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee - in_value = amount - fee - tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos] - tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))] - sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos) - self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos))) - - # Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs) - num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input))) - assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE - for i in range(num_outputs): - tx.vout.append(CTxOut()) - if in_value <= DUST_LIMIT: - tx.vout[-1].nValue = DUST_LIMIT - elif i < num_outputs - 1: - tx.vout[-1].nValue = in_value - else: - tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value) - in_value -= tx.vout[-1].nValue - tx.vout[-1].scriptPubKey = random.choice(host_spks) - sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR - fee += in_value - assert fee >= 0 - - # Select coinbase pubkey - cb_pubkey = random.choice(host_pubkeys) - sigops_weight += 1 * WITNESS_SCALE_FACTOR - - # Precompute one satisfying and one failing scriptSig/witness for each input. - input_data = [] - for i in range(len(input_utxos)): - fn = input_utxos[i].spender.sat_function - fail = None - success = fn(tx, i, [utxo.output for utxo in input_utxos], True) - if not input_utxos[i].spender.no_fail: - fail = fn(tx, i, [utxo.output for utxo in input_utxos], False) - input_data.append((fail, success)) - if self.options.dump_tests: - dump_json_test(tx, input_utxos, i, success, fail) - - # Sign each input incorrectly once on each complete signing pass, except the very last. - for fail_input in list(range(len(input_utxos))) + [None]: - # Skip trying to fail at spending something that can't be made to fail. - if fail_input is not None and input_utxos[fail_input].spender.no_fail: - continue - # Expected message with each input failure, may be None(which is ignored) - expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg - # Fill inputs/witnesses - for i in range(len(input_utxos)): - tx.vin[i].scriptSig = input_data[i][i != fail_input][0] - tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1] - # Submit to mempool to check standardness - is_standard_tx = ( - fail_input is None # Must be valid to be standard - and (all(utxo.spender.is_standard for utxo in input_utxos)) # All inputs must be standard - and tx.version >= 1 # The tx version must be standard - and tx.version <= 2) - tx.rehash() - msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos)) - if is_standard_tx: - node.sendrawtransaction(tx.serialize().hex(), 0) - assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg - else: - assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0) - # Submit in a block - self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg) - - if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200: - self.log.info(" - %i tests done" % (len(spenders) - left)) - - assert left == 0 - assert len(normal_utxos) == 0 - assert len(mismatching_utxos) == 0 - self.log.info(" - Done") - - def gen_test_vectors(self): - """Run a scenario that corresponds (and optionally produces) to BIP341 test vectors.""" - - self.log.info("Unit test scenario...") - - # Deterministically mine coins to OP_TRUE in block 1 - assert_equal(self.nodes[0].getblockcount(), 0) - coinbase = CTransaction() - coinbase.version = 1 - coinbase.vin = [CTxIn(COutPoint(0, 0xffffffff), CScript([OP_1, OP_1]), SEQUENCE_FINAL)] - coinbase.vout = [CTxOut(5000000000, CScript([OP_1]))] - coinbase.nLockTime = 0 - coinbase.rehash() - assert coinbase.hash == "f60c73405d499a956d3162e3483c395526ef78286458a4cb17b125aa92e49b20" - # Mine it - block = create_block(hashprev=int(self.nodes[0].getbestblockhash(), 16), coinbase=coinbase) - block.rehash() - block.solve() - self.nodes[0].submitblock(block.serialize().hex()) - assert_equal(self.nodes[0].getblockcount(), 1) - self.generate(self.nodes[0], COINBASE_MATURITY) - - SEED = 317 - VALID_LEAF_VERS = list(range(0xc0, 0x100, 2)) + [0x66, 0x7e, 0x80, 0x84, 0x96, 0x98, 0xba, 0xbc, 0xbe] - # Generate private keys - prvs = [hashlib.sha256(SEED.to_bytes(2, 'big') + bytes([i])).digest() for i in range(100)] - # Generate corresponding public x-only pubkeys - pubs = [compute_xonly_pubkey(prv)[0] for prv in prvs] - # Generate taproot objects - inner_keys = [pubs[i] for i in range(7)] - - script_lists = [ - None, - [("0", CScript([pubs[50], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)], - [("0", CScript([pubs[51], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)], - [("0", CScript([pubs[52], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("1", CScript([b"BIP341"]), VALID_LEAF_VERS[pubs[99][0] % 41])], - [("0", CScript([pubs[53], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("1", CScript([b"Taproot"]), VALID_LEAF_VERS[pubs[99][1] % 41])], - [("0", CScript([pubs[54], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), - [("1", CScript([pubs[55], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("2", CScript([pubs[56], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)] - ], - [("0", CScript([pubs[57], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), - [("1", CScript([pubs[58], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("2", CScript([pubs[59], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)] - ], - ] - taps = [taproot_construct(inner_keys[i], script_lists[i]) for i in range(len(inner_keys))] - - # Require negated taps[0] - assert taps[0].negflag - # Require one negated and one non-negated in taps 1 and 2. - assert taps[1].negflag != taps[2].negflag - # Require one negated and one non-negated in taps 3 and 4. - assert taps[3].negflag != taps[4].negflag - # Require one negated and one non-negated in taps 5 and 6. - assert taps[5].negflag != taps[6].negflag - - cblks = [{leaf: get({**DEFAULT_CONTEXT, 'tap': taps[i], 'leaf': leaf}, 'controlblock') for leaf in taps[i].leaves} for i in range(7)] - # Require one swapped and one unswapped in taps 3 and 4. - assert (cblks[3]['0'][33:65] < cblks[3]['1'][33:65]) != (cblks[4]['0'][33:65] < cblks[4]['1'][33:65]) - # Require one swapped and one unswapped in taps 5 and 6, both at the top and child level. - assert (cblks[5]['0'][33:65] < cblks[5]['1'][65:]) != (cblks[6]['0'][33:65] < cblks[6]['1'][65:]) - assert (cblks[5]['1'][33:65] < cblks[5]['2'][33:65]) != (cblks[6]['1'][33:65] < cblks[6]['2'][33:65]) - # Require within taps 5 (and thus also 6) that one level is swapped and the other is not. - assert (cblks[5]['0'][33:65] < cblks[5]['1'][65:]) != (cblks[5]['1'][33:65] < cblks[5]['2'][33:65]) - - # Compute a deterministic set of scriptPubKeys - tap_spks = [] - old_spks = [] - spend_info = {} - # First, taproot scriptPubKeys, for the tap objects constructed above - for i, tap in enumerate(taps): - tap_spks.append(tap.scriptPubKey) - d = {'key': prvs[i], 'tap': tap, 'mode': 'taproot'} - spend_info[tap.scriptPubKey] = d - # Then, a number of deterministically generated (keys 0x1,0x2,0x3) with 2x P2PKH, 1x P2WPKH spks. - for i in range(1, 4): - prv = ECKey() - prv.set(i.to_bytes(32, 'big'), True) - pub = prv.get_pubkey().get_bytes() - d = {"key": prv} - d["scriptcode"] = key_to_p2pkh_script(pub) - d["inputs"] = [getter("sign"), pub] - if i < 3: - # P2PKH - d['spk'] = key_to_p2pkh_script(pub) - d['mode'] = 'legacy' - else: - # P2WPKH - d['spk'] = key_to_p2wpkh_script(pub) - d['mode'] = 'witv0' - old_spks.append(d['spk']) - spend_info[d['spk']] = d - - # Construct a deterministic chain of transactions creating UTXOs to the test's spk's (so that they - # come from distinct txids). - txn = [] - lasttxid = coinbase.sha256 - amount = 5000000000 - for i, spk in enumerate(old_spks + tap_spks): - val = 42000000 * (i + 7) - tx = CTransaction() - tx.version = 1 - tx.vin = [CTxIn(COutPoint(lasttxid, i & 1), CScript([]), SEQUENCE_FINAL)] - tx.vout = [CTxOut(val, spk), CTxOut(amount - val, CScript([OP_1]))] - if i & 1: - tx.vout = list(reversed(tx.vout)) - tx.nLockTime = 0 - tx.rehash() - amount -= val - lasttxid = tx.sha256 - txn.append(tx) - spend_info[spk]['prevout'] = COutPoint(tx.sha256, i & 1) - spend_info[spk]['utxo'] = CTxOut(val, spk) - # Mine those transactions - self.init_blockinfo(self.nodes[0]) - self.block_submit(self.nodes[0], txn, "Crediting txn", None, sigops_weight=10, accept=True) - - # scriptPubKey computation - tests = {"version": 1} - spk_tests = tests.setdefault("scriptPubKey", []) - for i, tap in enumerate(taps): - test_case = {} - given = test_case.setdefault("given", {}) - given['internalPubkey'] = tap.internal_pubkey.hex() - - def pr(node): - if node is None: - return None - elif isinstance(node, tuple): - return {"id": int(node[0]), "script": node[1].hex(), "leafVersion": node[2]} - elif len(node) == 1: - return pr(node[0]) - elif len(node) == 2: - return [pr(node[0]), pr(node[1])] - else: - assert False - - given['scriptTree'] = pr(script_lists[i]) - intermediary = test_case.setdefault("intermediary", {}) - if len(tap.leaves): - leafhashes = intermediary.setdefault('leafHashes', [None] * len(tap.leaves)) - for leaf in tap.leaves: - leafhashes[int(leaf)] = tap.leaves[leaf].leaf_hash.hex() - intermediary['merkleRoot'] = tap.merkle_root.hex() if tap.merkle_root else None - intermediary['tweak'] = tap.tweak.hex() - intermediary['tweakedPubkey'] = tap.output_pubkey.hex() - expected = test_case.setdefault("expected", {}) - expected['scriptPubKey'] = tap.scriptPubKey.hex() - expected['bip350Address'] = program_to_witness(1, bytes(tap.output_pubkey), True) - if len(tap.leaves): - control_blocks = expected.setdefault("scriptPathControlBlocks", [None] * len(tap.leaves)) - for leaf in tap.leaves: - ctx = {**DEFAULT_CONTEXT, 'tap': tap, 'leaf': leaf} - control_blocks[int(leaf)] = get(ctx, "controlblock").hex() - spk_tests.append(test_case) - - # Construct a deterministic transaction spending all outputs created above. - tx = CTransaction() - tx.version = 2 - tx.vin = [] - inputs = [] - input_spks = [tap_spks[0], tap_spks[1], old_spks[0], tap_spks[2], tap_spks[5], old_spks[2], tap_spks[6], tap_spks[3], tap_spks[4]] - sequences = [0, SEQUENCE_FINAL, SEQUENCE_FINAL, 0xfffffffe, 0xfffffffe, 0, 0, SEQUENCE_FINAL, SEQUENCE_FINAL] - hashtypes = [SIGHASH_SINGLE, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, SIGHASH_ALL, SIGHASH_ALL, SIGHASH_DEFAULT, SIGHASH_ALL, SIGHASH_NONE, SIGHASH_NONE|SIGHASH_ANYONECANPAY, SIGHASH_ALL|SIGHASH_ANYONECANPAY] - for i, spk in enumerate(input_spks): - tx.vin.append(CTxIn(spend_info[spk]['prevout'], CScript(), sequences[i])) - inputs.append(spend_info[spk]['utxo']) - tx.vout.append(CTxOut(1000000000, old_spks[1])) - tx.vout.append(CTxOut(3410000000, pubs[98])) - tx.nLockTime = 500000000 - precomputed = { - "hashAmounts": BIP341_sha_amounts(inputs), - "hashPrevouts": BIP341_sha_prevouts(tx), - "hashScriptPubkeys": BIP341_sha_scriptpubkeys(inputs), - "hashSequences": BIP341_sha_sequences(tx), - "hashOutputs": BIP341_sha_outputs(tx) - } - keypath_tests = tests.setdefault("keyPathSpending", []) - tx_test = {} - global_given = tx_test.setdefault("given", {}) - global_given['rawUnsignedTx'] = tx.serialize().hex() - utxos_spent = global_given.setdefault("utxosSpent", []) - for i in range(len(input_spks)): - utxos_spent.append({"scriptPubKey": inputs[i].scriptPubKey.hex(), "amountSats": inputs[i].nValue}) - global_intermediary = tx_test.setdefault("intermediary", {}) - for key in sorted(precomputed.keys()): - global_intermediary[key] = precomputed[key].hex() - test_list = tx_test.setdefault('inputSpending', []) - for i in range(len(input_spks)): - ctx = { - **DEFAULT_CONTEXT, - **spend_info[input_spks[i]], - 'tx': tx, - 'utxos': inputs, - 'idx': i, - 'hashtype': hashtypes[i], - 'deterministic': True - } - if ctx['mode'] == 'taproot': - test_case = {} - given = test_case.setdefault("given", {}) - given['txinIndex'] = i - given['internalPrivkey'] = get(ctx, 'key').hex() - if get(ctx, "tap").merkle_root != bytes(): - given['merkleRoot'] = get(ctx, "tap").merkle_root.hex() - else: - given['merkleRoot'] = None - given['hashType'] = get(ctx, "hashtype") - intermediary = test_case.setdefault("intermediary", {}) - intermediary['internalPubkey'] = get(ctx, "tap").internal_pubkey.hex() - intermediary['tweak'] = get(ctx, "tap").tweak.hex() - intermediary['tweakedPrivkey'] = get(ctx, "key_tweaked").hex() - sigmsg = get(ctx, "sigmsg") - intermediary['sigMsg'] = sigmsg.hex() - intermediary['precomputedUsed'] = [key for key in sorted(precomputed.keys()) if sigmsg.count(precomputed[key])] - intermediary['sigHash'] = get(ctx, "sighash").hex() - expected = test_case.setdefault("expected", {}) - expected['witness'] = [get(ctx, "sign").hex()] - test_list.append(test_case) - tx.wit.vtxinwit.append(CTxInWitness()) - tx.vin[i].scriptSig = CScript(flatten(get(ctx, "scriptsig"))) - tx.wit.vtxinwit[i].scriptWitness.stack = flatten(get(ctx, "witness")) - aux = tx_test.setdefault("auxiliary", {}) - aux['fullySignedTx'] = tx.serialize().hex() - keypath_tests.append(tx_test) - assert_equal(hashlib.sha256(tx.serialize()).hexdigest(), "24bab662cb55a7f3bae29b559f651674c62bcc1cd442d44715c0133939107b38") - # Mine the spending transaction - self.block_submit(self.nodes[0], [tx], "Spending txn", None, sigops_weight=10000, accept=True, witness=True) - - if GEN_TEST_VECTORS: - print(json.dumps(tests, indent=4, sort_keys=False)) - - def run_test(self): - self.gen_test_vectors() - - self.log.info("Post-activation tests...") - self.test_spenders(self.nodes[0], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3]) - # Run each test twice; once in isolation, and once combined with others. Testing in isolation - # means that the standardness is verified in every test (as combined transactions are only standard - # when all their inputs are standard). - self.test_spenders(self.nodes[0], spenders_taproot_nonstandard(), input_counts=[1]) - self.test_spenders(self.nodes[0], spenders_taproot_nonstandard(), input_counts=[2, 3]) - - -if __name__ == '__main__': - TaprootTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2019-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +# Test Taproot softfork (BIPs 340-342)) +) +from test_framework.blocktools import () + COINBASE_MATURITY,) + create_coinbase,) + create_block,) + add_witness_commitment,) + MAX_BLOCK_SIGOPS_WEIGHT,) +)) +from test_framework.messages import () + COutPoint,) + CTransaction,) + CTxIn,) + CTxInWitness,) + CTxOut,) + SEQUENCE_FINAL,) + tx_from_hex,) + WITNESS_SCALE_FACTOR,) +)) +from test_framework.script import () + ANNEX_TAG,) + BIP341_sha_amounts,) + BIP341_sha_outputs,) + BIP341_sha_prevouts,) + BIP341_sha_scriptpubkeys,) + BIP341_sha_sequences,) + CScript,) + CScriptNum,) + CScriptOp,) + hash256,) + LEAF_VERSION_TAPSCRIPT,) + LegacySignatureMsg,) + LOCKTIME_THRESHOLD,) + MAX_SCRIPT_ELEMENT_SIZE,) + OP_0,) + OP_1,) + OP_2,) + OP_3,) + OP_4,) + OP_5,) + OP_6,) + OP_7,) + OP_8,) + OP_9,) + OP_10,) + OP_11,) + OP_12,) + OP_16,) + OP_2DROP,) + OP_2DUP,) + OP_CHECKMULTISIG,) + OP_CHECKMULTISIGVERIFY,) + OP_CHECKSIG,) + OP_CHECKSIGADD,) + OP_CHECKSIGVERIFY,) + OP_CODESEPARATOR,) + OP_DROP,) + OP_DUP,) + OP_ELSE,) + OP_ENDIF,) + OP_EQUAL,) + OP_EQUALVERIFY,) + OP_IF,) + OP_NOP,) + OP_NOT,) + OP_NOTIF,) + OP_PUSHDATA1,) + OP_RETURN,) + OP_SWAP,) + OP_VERIFY,) + SIGHASH_DEFAULT,) + SIGHASH_ALL,) + SIGHASH_NONE,) + SIGHASH_SINGLE,) + SIGHASH_ANYONECANPAY,) + SegwitV0SignatureMsg,) + TaggedHash,) + TaprootSignatureMsg,) + is_op_success,) + taproot_construct,) +)) +from test_framework.script_util import () + key_to_p2pk_script,) + key_to_p2pkh_script,) + key_to_p2wpkh_script,) + keyhash_to_p2pkh_script,) + script_to_p2sh_script,) + script_to_p2wsh_script,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_raises_rpc_error,) + assert_equal,) +)) +from test_framework.wallet_util import generate_keypair) +from test_framework.key import () + generate_privkey,) + compute_xonly_pubkey,) + sign_schnorr,) + tweak_add_privkey,) + ECKey,) +)) +from test_framework.crypto import secp256k1) +from test_framework.address import () + hash160,) + program_to_witness,) +)) +from collections import OrderedDict, namedtuple) +import json) +import hashlib) +import os) +import random) +) +# Whether or not to output generated test vectors, in JSON format.) +GEN_TEST_VECTORS = False) +) +# === Framework for building spending transactions. ===) +#) +# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that) +# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing) +# process can be overridden.) +#) +# Specifically, a context object is a dict that maps names to compositions of:) +# - values) +# - lists of values) +# - callables which, when fed the context object as argument, produce any of these) +#) +# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.) +#) +# The get(ctx, name) function can evaluate a name, and cache its result in the context.) +# getter(name) can be used to construct a callable that evaluates name. For example:) +#) +# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}) +#) +# creates a context where the script inputs are a signature plus the bytes 0x01.) +#) +# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively) +# modified context. For example:) +#) +# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}) +#) +# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from) +#) +# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}) +#) +# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)) +# while ctx2 only uses the modified hashtype inside the sighash calculation.) +) +def deep_eval(ctx, expr):) + """Recursively replace any callables c in expr (including inside lists) with c(ctx).""") + while callable(expr):) + expr = expr(ctx)) + if isinstance(expr, list):) + expr = [deep_eval(ctx, x) for x in expr]) + return expr) +) +# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).) +Final = namedtuple("Final", "value")) +) +def get(ctx, name):) + """Evaluate name in context ctx.""") + assert name in ctx, "Missing '%s' in context" % name) + expr = ctx[name]) + if not isinstance(expr, Final):) + # Evaluate and cache the result.) + expr = Final(deep_eval(ctx, expr))) + ctx[name] = expr) + return expr.value) +) +def getter(name):) + """Return a callable that evaluates name in its passed context.""") + return lambda ctx: get(ctx, name)) +) +def override(expr, **kwargs):) + """Return a callable that evaluates expr in a modified context.""") + return lambda ctx: deep_eval({**ctx, **kwargs}, expr)) +) +# === Implementations for the various default expressions in DEFAULT_CONTEXT ===) +) +def default_hashtype(ctx):) + """Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise.""") + mode = get(ctx, "mode")) + if mode == "taproot":) + return SIGHASH_DEFAULT) + else:) + return SIGHASH_ALL) +) +def default_tapleaf(ctx):) + """Default expression for "tapleaf": looking up leaf in tap[2].""") + return get(ctx, "tap").leaves[get(ctx, "leaf")]) +) +def default_script_taproot(ctx):) + """Default expression for "script_taproot": tapleaf.script.""") + return get(ctx, "tapleaf").script) +) +def default_leafversion(ctx):) + """Default expression for "leafversion": tapleaf.version""") + return get(ctx, "tapleaf").version) +) +def default_negflag(ctx):) + """Default expression for "negflag": tap.negflag.""") + return get(ctx, "tap").negflag) +) +def default_pubkey_internal(ctx):) + """Default expression for "pubkey_internal": tap.internal_pubkey.""") + return get(ctx, "tap").internal_pubkey) +) +def default_merklebranch(ctx):) + """Default expression for "merklebranch": tapleaf.merklebranch.""") + return get(ctx, "tapleaf").merklebranch) +) +def default_controlblock(ctx):) + """Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch.""") + return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")) +) +def default_sigmsg(ctx):) + """Default expression for "sigmsg": depending on mode, compute BIP341, BIP143, or legacy sigmsg.""") + tx = get(ctx, "tx")) + idx = get(ctx, "idx")) + hashtype = get(ctx, "hashtype_actual")) + mode = get(ctx, "mode")) + if mode == "taproot":) + # BIP341 signature hash) + utxos = get(ctx, "utxos")) + annex = get(ctx, "annex")) + if get(ctx, "leaf") is not None:) + codeseppos = get(ctx, "codeseppos")) + leaf_ver = get(ctx, "leafversion")) + script = get(ctx, "script_taproot")) + return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=True, leaf_script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)) + else:) + return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)) + elif mode == "witv0":) + # BIP143 signature hash) + scriptcode = get(ctx, "scriptcode")) + utxos = get(ctx, "utxos")) + return SegwitV0SignatureMsg(scriptcode, tx, idx, hashtype, utxos[idx].nValue)) + else:) + # Pre-segwit signature hash) + scriptcode = get(ctx, "scriptcode")) + return LegacySignatureMsg(scriptcode, tx, idx, hashtype)[0]) +) +def default_sighash(ctx):) + """Default expression for "sighash": depending on mode, compute tagged hash or dsha256 of sigmsg.""") + msg = get(ctx, "sigmsg")) + mode = get(ctx, "mode")) + if mode == "taproot":) + return TaggedHash("TapSighash", msg)) + else:) + if msg is None:) + return (1).to_bytes(32, 'little')) + else:) + return hash256(msg)) +) +def default_tweak(ctx):) + """Default expression for "tweak": None if a leaf is specified, tap[0] otherwise.""") + if get(ctx, "leaf") is None:) + return get(ctx, "tap").tweak) + return None) +) +def default_key_tweaked(ctx):) + """Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise.""") + key = get(ctx, "key")) + tweak = get(ctx, "tweak")) + if tweak is None:) + return key) + else:) + return tweak_add_privkey(key, tweak)) +) +def default_signature(ctx):) + """Default expression for "signature": BIP340 signature or ECDSA signature depending on mode.""") + sighash = get(ctx, "sighash")) + deterministic = get(ctx, "deterministic")) + if get(ctx, "mode") == "taproot":) + key = get(ctx, "key_tweaked")) + flip_r = get(ctx, "flag_flip_r")) + flip_p = get(ctx, "flag_flip_p")) + aux = bytes([0] * 32)) + if not deterministic:) + aux = random.getrandbits(256).to_bytes(32, 'big')) + return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p, aux=aux)) + else:) + key = get(ctx, "key")) + return key.sign_ecdsa(sighash, rfc6979=deterministic)) +) +def default_hashtype_actual(ctx):) + """Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot.""") + hashtype = get(ctx, "hashtype")) + mode = get(ctx, "mode")) + if mode,"taproot":) + return hashtype) + idx = get(ctx, "idx")) + tx = get(ctx, "tx")) + if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):) + return (hashtype & ~3) | SIGHASH_NONE) + return hashtype) +) +def default_bytes_hashtype(ctx):) + """Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise.""") + return bytes([x for x in [get(ctx, "hashtype_actual")] if x,0])) +) +def default_sign(ctx):) + """Default expression for "sign": concatenation of signature and bytes_hashtype.""") + return get(ctx, "signature") + get(ctx, "bytes_hashtype")) +) +def default_inputs_keypath(ctx):) + """Default expression for "inputs_keypath": a signature.""") + return [get(ctx, "sign")]) +) +def default_witness_taproot(ctx):) + """Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed.""") + annex = get(ctx, "annex")) + suffix_annex = []) + if annex is not None:) + suffix_annex = [annex]) + if get(ctx, "leaf") is None:) + return get(ctx, "inputs_keypath") + suffix_annex) + else:) + return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex) +) +def default_witness_witv0(ctx):) + """Default expression for "witness_witv0", consisting of inputs and witness script, as needed.""") + script = get(ctx, "script_witv0")) + inputs = get(ctx, "inputs")) + if script is None:) + return inputs) + else:) + return inputs + [script]) +) +def default_witness(ctx):) + """Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed.""") + mode = get(ctx, "mode")) + if mode == "taproot":) + return get(ctx, "witness_taproot")) + elif mode == "witv0":) + return get(ctx, "witness_witv0")) + else:) + return []) +) +def default_scriptsig(ctx):) + """Default expression for "scriptsig", consisting of inputs and redeemscript, as needed.""") + scriptsig = []) + mode = get(ctx, "mode")) + if mode == "legacy":) + scriptsig = get(ctx, "inputs")) + redeemscript = get(ctx, "script_p2sh")) + if redeemscript is not None:) + scriptsig += [bytes(redeemscript)]) + return scriptsig) +) +# The default context object.) +DEFAULT_CONTEXT = {) + # == The main expressions to evaluate. Only override these for unusual or invalid spends. ==) + # The overall witness stack, as a list of bytes objects.) + "witness": default_witness,) + # The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)) + "scriptsig": default_scriptsig,) +) + # == Expressions you'll generally only override for intentionally invalid spends. ==) + # The witness stack for spending a taproot output.) + "witness_taproot": default_witness_taproot,) + # The witness stack for spending a P2WPKH/P2WSH output.) + "witness_witv0": default_witness_witv0,) + # The script inputs for a taproot key path spend.) + "inputs_keypath": default_inputs_keypath,) + # The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).) + "hashtype_actual": default_hashtype_actual,) + # The bytes object for a full signature (including hashtype byte, if needed).) + "bytes_hashtype": default_bytes_hashtype,) + # A full script signature (bytes including hashtype, if needed)) + "sign": default_sign,) + # An ECDSA or Schnorr signature (excluding hashtype byte).) + "signature": default_signature,) + # The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).) + "key_tweaked": default_key_tweaked,) + # The tweak to use (None for script path spends, the actual tweak for key path spends).) + "tweak": default_tweak,) + # The sigmsg value (preimage of sighash)) + "sigmsg": default_sigmsg,) + # The sighash value (32 bytes)) + "sighash": default_sighash,) + # The information about the chosen script path spend (TaprootLeafInfo object).) + "tapleaf": default_tapleaf,) + # The script to push, and include in the sighash, for a taproot script path spend.) + "script_taproot": default_script_taproot,) + # The internal pubkey for a taproot script path spend (32 bytes).) + "pubkey_internal": default_pubkey_internal,) + # The negation flag of the internal pubkey for a taproot script path spend.) + "negflag": default_negflag,) + # The leaf version to include in the sighash (this does not affect the one in the control block).) + "leafversion": default_leafversion,) + # The Merkle path to include in the control block for a script path spend.) + "merklebranch": default_merklebranch,) + # The control block to push for a taproot script path spend.) + "controlblock": default_controlblock,) + # Whether to produce signatures with invalid P sign (Schnorr signatures only).) + "flag_flip_p": False,) + # Whether to produce signatures with invalid R sign (Schnorr signatures only).) + "flag_flip_r": False,) +) + # == Parameters that can be changed without invalidating, but do have a default: ==) + # The hashtype (as an integer).) + "hashtype": default_hashtype,) + # The annex (only when mode=="taproot").) + "annex": None,) + # The codeseparator position (only when mode=="taproot").) + "codeseppos": -1,) + # The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).) + "script_p2sh": None,) + # The script to add to the witness in (if P2WSH; None implies P2WPKH)) + "script_witv0": None,) + # The leaf to use in taproot spends (if script path spend; None implies key path spend).) + "leaf": None,) + # The input arguments to provide to the executed script) + "inputs": [],) + # Use deterministic signing nonces) + "deterministic": False,) +) + # == Parameters to be set before evaluation: ==) + # - mode: what spending style to use ("taproot", "witv0", or "legacy").) + # - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).) + # - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").) + # - tx: the transaction to sign.) + # - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").) + # - idx: the input position being signed.) + # - scriptcode: the scriptcode to include in legacy and witv0 sighashes.) +}) +) +def flatten(lst):) + ret = []) + for elem in lst:) + if isinstance(elem, list):) + ret += flatten(elem)) + else:) + ret.append(elem)) + return ret) +) +) +def spend(tx, idx, utxos, **kwargs):) + """Sign transaction input idx of tx, provided utxos is the list of outputs being spent.) +) + Additional arguments may be provided that override any aspect of the signing process.) + See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.) + """) +) + ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}) +) + def to_script(elem):) + """If fed a CScript, return it; if fed bytes, return a CScript that pushes it.""") + if isinstance(elem, CScript):) + return elem) + else:) + return CScript([elem])) +) + scriptsig_list = flatten(get(ctx, "scriptsig"))) + scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))) + witness_stack = flatten(get(ctx, "witness"))) + return (scriptsig, witness_stack)) +) +) +# === Spender objects ===) +#) +# Each spender is a tuple of:) +# - A scriptPubKey which is to be spent from (CScript)) +# - A comment describing the test (string)) +# - Whether the spending (on itself) is expected to be standard (bool)) +# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:) +# - A transaction to sign (CTransaction)) +# - An input position (int)) +# - The spent UTXOs by this transaction (list of CTxOut)) +# - Whether to produce a valid spend (bool)) +# - A string with an expected error message for failure case if known) +# - The (pre-taproot) sigops weight consumed by a successful spend) +# - Whether this spend cannot fail) +# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)) +) +Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")) +) +) +def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):) + """Helper for constructing Spender objects using the context signing framework.) +) + * tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)) + * witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)) + * script: the actual script executed (for bare/P2WSH/P2SH spending)) + * pkh: the public key for P2PKH or P2WPKH spending) + * p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)) + * spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)) + * failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)) + * standard: whether the (valid version of) spending is expected to be standard) + * err_msg: a string with an expected error message for failure (or None, if not cared about)) + * sigops_weight: the pre-taproot sigops weight consumed by a successful spend) + * need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding) + transaction output.) + """) +) + conf = dict()) +) + # Compute scriptPubKey and set useful defaults based on the inputs.) + if witv0:) + assert tap is None) + conf["mode"] = "witv0") + if pkh is not None:) + # P2WPKH) + assert script is None) + pubkeyhash = hash160(pkh)) + spk = key_to_p2wpkh_script(pkh)) + conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)) + conf["script_witv0"] = None) + conf["inputs"] = [getter("sign"), pkh]) + elif script is not None:) + # P2WSH) + spk = script_to_p2wsh_script(script)) + conf["scriptcode"] = script) + conf["script_witv0"] = script) + else:) + assert False) + elif tap is None:) + conf["mode"] = "legacy") + if pkh is not None:) + # P2PKH) + assert script is None) + pubkeyhash = hash160(pkh)) + spk = keyhash_to_p2pkh_script(pubkeyhash)) + conf["scriptcode"] = spk) + conf["inputs"] = [getter("sign"), pkh]) + elif script is not None:) + # bare) + spk = script) + conf["scriptcode"] = script) + else:) + assert False) + else:) + assert script is None) + conf["mode"] = "taproot") + conf["tap"] = tap) + spk = tap.scriptPubKey) +) + if spk_mutate_pre_p2sh is not None:) + spk = spk_mutate_pre_p2sh(spk)) +) + if p2sh:) + # P2SH wrapper can be combined with anything else) + conf["script_p2sh"] = spk) + spk = script_to_p2sh_script(spk)) +) + conf = {**conf, **kwargs}) +) + def sat_fn(tx, idx, utxos, valid):) + if valid:) + return spend(tx, idx, utxos, **conf)) + else:) + assert failure is not None) + return spend(tx, idx, utxos, **{**conf, **failure})) +) + return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)) +) +def add_spender(spenders, *args, **kwargs):) + """Make a spender using make_spender, and add it to spenders.""") + spenders.append(make_spender(*args, **kwargs))) +) +# === Helpers for the test ===) +) +def random_checksig_style(pubkey):) + """Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack.""") + opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])) + if opcode == OP_CHECKSIGVERIFY:) + ret = CScript([pubkey, opcode, OP_1])) + elif opcode == OP_CHECKSIGADD:) + num = random.choice([0, 0x7fffffff, -0x7fffffff])) + ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])) + else:) + ret = CScript([pubkey, opcode])) + return bytes(ret)) +) +def bitflipper(expr):) + """Return a callable that evaluates expr and returns it with a random bitflip.""") + def fn(ctx):) + sub = deep_eval(ctx, expr)) + assert isinstance(sub, bytes)) + return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')) + return fn) +) +def zero_appender(expr):) + """Return a callable that evaluates expr and returns it with a zero added.""") + return lambda ctx: deep_eval(ctx, expr) + b"\x00") +) +def byte_popper(expr):) + """Return a callable that evaluates expr and returns it with its last byte removed.""") + return lambda ctx: deep_eval(ctx, expr)[:-1]) +) +# Expected error strings) +) +ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}) +ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}) +ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}) +ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}) +ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}) +ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}) +ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}) +ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}) +ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}) +ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}) +ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}) +ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}) +ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}) +ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}) +ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}) +ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}) +ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}) +ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}) +ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}) +) +VALID_SIGHASHES_ECDSA = [) + SIGHASH_ALL,) + SIGHASH_NONE,) + SIGHASH_SINGLE,) + SIGHASH_ANYONECANPAY + SIGHASH_ALL,) + SIGHASH_ANYONECANPAY + SIGHASH_NONE,) + SIGHASH_ANYONECANPAY + SIGHASH_SINGLE) +]) +) +VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA) +) +VALID_SIGHASHES_TAPROOT_SINGLE = [) + SIGHASH_SINGLE,) + SIGHASH_ANYONECANPAY + SIGHASH_SINGLE) +]) +) +VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]) +) +SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}) +SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}) +SINGLE_SIG = {"inputs": [getter("sign")]}) +SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}) +) +DUST_LIMIT = 600) +MIN_FEE = 50000) +) +# === Actual test cases ===) +) +) +def spenders_taproot_active():) + """Return a list of Spenders for testing post-Taproot activation behavior.""") +) + secs = [generate_privkey() for _ in range(8)]) + pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]) +) + spenders = []) +) + # == Tests for BIP340 signature validation. ==) + # These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.) + # Some things are tested programmatically as well here.) +) + tap = taproot_construct(pubs[0])) + # Test with key with bit flipped.) + add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)) + # Test with sighash with bit flipped.) + add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)) + # Test with invalid R sign.) + add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)) + # Test with invalid P sign.) + add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)) + # Test with signature with bit flipped.) + add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)) +) + # == Test involving an internal public key not on the curve ==) +) + # X-only public keys are 32 bytes, but not every 32-byte array is a valid public key; only) + # around 50% of them are. This does not affect users using correct software; these "keys" have) + # no corresponding private key, and thus will never appear as output of key) + # generation/derivation/tweaking.) + #) + # Using an invalid public key as P2TR output key makes the UTXO unspendable. Revealing an) + # invalid public key as internal key in a P2TR script path spend also makes the spend invalid.) + # These conditions are explicitly spelled out in BIP341.) + #) + # It is however hard to create test vectors for this, because it involves "guessing" how a) + # hypothetical incorrect implementation deals with an obviously-invalid condition, and making) + # sure that guessed behavior (accepting it in certain condition) doesn't occur.) + #) + # The test case added here tries to detect a very specific bug a verifier could have: if they) + # don't verify whether or not a revealed internal public key in a script path spend is valid,) + # and (correctly) implement output_key == tweak(internal_key, tweakval) but (incorrectly) treat) + # tweak(invalid_key, tweakval) as equal the public key corresponding to private key tweakval.) + # This may seem like a far-fetched edge condition to test for, but in fact, the BIP341 wallet) + # pseudocode did exactly that (but obviously only triggerable by someone invoking the tweaking) + # function with an invalid public key, which shouldn't happen).) +) + # Generate an invalid public key) + while True:) + invalid_pub = random.randbytes(32)) + if not secp256k1.GE.is_valid_x(int.from_bytes(invalid_pub, 'big')):) + break) +) + # Implement a test case that detects validation logic which maps invalid public keys to the) + # point at infinity in the tweaking logic.) + tap = taproot_construct(invalid_pub, [("true", CScript([OP_1]))], treat_internal_as_infinity=True)) + add_spender(spenders, "output/invalid_x", tap=tap, key_tweaked=tap.tweak, failure={"leaf": "true", "inputs": []}, **ERR_WITNESS_PROGRAM_MISMATCH)) +) + # Do the same thing without invalid point, to make sure there is no mistake in the test logic.) + tap = taproot_construct(pubs[0], [("true", CScript([OP_1]))])) + add_spender(spenders, "output/invalid_x_mock", tap=tap, key=secs[0], leaf="true", inputs=[])) +) + # == Tests for signature hashing ==) +) + # Run all tests once with no annex, and once with a valid random annex.) + for annex in [None, lambda _: bytes([ANNEX_TAG]) + random.randbytes(random.randrange(0, 250))]:) + # Non-empty annex is non-standard) + no_annex = annex is None) +) + # Sighash mutation tests (test all sighash combinations)) + for hashtype in VALID_SIGHASHES_TAPROOT:) + common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}) +) + # Pure pubkey) + tap = taproot_construct(pubs[0])) + add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) +) + # Pubkey/P2PK script combination) + scripts = [("s0", CScript(random_checksig_style(pubs[1])))]) + tap = taproot_construct(pubs[0], scripts)) + add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) +) + # Test SIGHASH_SINGLE behavior in combination with mismatching outputs) + if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:) + add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)) + add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)) +) + # Test OP_CODESEPARATOR impact on sighashing.) + hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)) + common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}) + scripts = [) + ("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig) + ("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig) + ("branched_codesep", CScript([random.randbytes(random.randrange(2, 511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep) + # Note that the first data push in the "branched_codesep" script has the purpose of) + # randomizing the sighash, both by varying script size and content. In order to) + # avoid MINIMALDATA script verification errors caused by not-minimal-encoded data) + # pushes (e.g. `OP_PUSH1 1` instead of `OP_1`), we set a minimum data size of 2 bytes.) + ]) + random.shuffle(scripts)) + tap = taproot_construct(pubs[0], scripts)) + add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) +) + # Reusing the scripts above, test that various features affect the sighash.) + add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE,LEAF_VERSION_TAPSCRIPT]))}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)) +) + # Test that invalid hashtypes don't work, both in key path and script path spends) + hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)) + for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:) + add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)) + add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)) +) + # Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.) + add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)) + add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)) + add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)) + # Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.) + add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)) +) + # Test aspects of signatures with unusual lengths) + for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:) + scripts = [) + ("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),) + ("cs_pos", CScript([pubs[2], OP_CHECKSIG])),) + ("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),) + ("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),) + ("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))) + ]) + random.shuffle(scripts)) + tap = taproot_construct(pubs[3], scripts)) + # Empty signatures) + add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)) + add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)) + add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)) + add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)) + add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random.randbytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)) + add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random.randbytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)) + # Appending a zero byte to signatures invalidates them) + add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))) + add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))) + add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))) + add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))) + add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))) + add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))) + # Removing the last byte from signatures invalidates them) + add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))) + add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))) + add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))) + add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))) + add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))) + add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))) + # Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.) + add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)) +) + # == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==) +) + for p2sh in [False, True]:) + for witver in range(1, 17):) + for witlen in [20, 31, 32, 33]:) + def mutate(spk):) + prog = spk[2:]) + assert len(prog) == 32) + if witlen < 32:) + prog = prog[0:witlen]) + elif witlen > 32:) + prog += bytes([0 for _ in range(witlen - 32)])) + return CScript([CScriptOp.encode_op_n(witver), prog])) + scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]) + tap = taproot_construct(pubs[1], scripts)) + if not p2sh and witver == 1 and witlen == 32:) + add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)) + add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)) + else:) + add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)) + add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)) +) + # == Test various aspects of BIP341 spending paths ==) +) + # A set of functions that compute the hashing partner in a Merkle tree, designed to exercise) + # edge cases. This relies on the taproot_construct feature that a lambda can be passed in) + # instead of a subtree, to compute the partner to be hashed with.) + PARTNER_MERKLE_FN = [) + # Combine with itself) + lambda h: h,) + # Combine with hash 0) + lambda h: bytes([0 for _ in range(32)]),) + # Combine with hash 2^256-1) + lambda h: bytes([0xff for _ in range(32)]),) + # Combine with itself-1 (BE)) + lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),) + # Combine with itself+1 (BE)) + lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),) + # Combine with itself-1 (LE)) + lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),) + # Combine with itself+1 (LE)) + lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),) + # Combine with random bitflipped version of self.) + lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')) + ]) + # Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".) + scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]) + # Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.) + for _ in range(127):) + scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]) + tap = taproot_construct(pubs[0], scripts)) + # Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).) + add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)) + # Test that flipping the negation bit invalidates spends.) + add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)) + # Test that bitflips in the Merkle branch invalidate it.) + add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)) + # Test that bitflips in the internal pubkey invalidate it.) + add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)) + # Test that empty witnesses are invalid.) + add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)) + # Test that adding garbage to the control block invalidates it.) + add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random.randbytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)) + # Test that truncating the control block invalidates it.) + add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)) +) + scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]) + tap = taproot_construct(pubs[1], scripts)) + # Test that adding garbage to the control block invalidates it.) + add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random.randbytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)) + # Test that truncating the control block invalidates it.) + add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)) + # Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it) + add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)) +) + # == Test BIP342 edge cases ==) +) + csa_low_val = random.randrange(0, 17) # Within range for OP_n) + csa_low_result = csa_low_val + 1) +) + csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range) + csa_high_result = csa_high_val + 1) +) + OVERSIZE_NUMBER = 2**31) + assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)) + assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)) +) + big_choices = []) + big_scriptops = []) + for i in range(1000):) + r = random.randrange(len(pubs))) + big_choices.append(r)) + big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]) +) +) + def big_spend_inputs(ctx):) + """Helper function to construct the script input for t33/t34 below.""") + # Instead of signing 999 times, precompute signatures for every (key, hashtype) combination) + sigs = {}) + for ht in VALID_SIGHASHES_TAPROOT:) + for k in range(len(pubs)):) + sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)) + num = get(ctx, "num")) + return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]) +) + # Various BIP342 features) + scripts = [) + # 0) drop stack element and OP_CHECKSIG) + ("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),) + # 1) normal OP_CHECKSIG) + ("t1", CScript([pubs[1], OP_CHECKSIG])),) + # 2) normal OP_CHECKSIGVERIFY) + ("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),) + # 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input) + ("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),) + # 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input) + ("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),) + # 5) OP_IF script that needs a true input) + ("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),) + # 6) OP_NOTIF script that needs a true input) + ("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),) + # 7) OP_CHECKSIG with an empty key) + ("t7", CScript([OP_0, OP_CHECKSIG])),) + # 8) OP_CHECKSIGVERIFY with an empty key) + ("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),) + # 9) normal OP_CHECKSIGADD that also ensures return value is correct) + ("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),) + # 10) OP_CHECKSIGADD with empty key) + ("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),) + # 11) OP_CHECKSIGADD with missing counter stack element) + ("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),) + # 12) OP_CHECKSIG that needs invalid signature) + ("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),) + # 13) OP_CHECKSIG with empty key that needs invalid signature) + ("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),) + # 14) OP_CHECKSIGADD that needs invalid signature) + ("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),) + # 15) OP_CHECKSIGADD with empty key that needs invalid signature) + ("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),) + # 16) OP_CHECKSIG with unknown pubkey type) + ("t16", CScript([OP_1, OP_CHECKSIG])),) + # 17) OP_CHECKSIGADD with unknown pubkey type) + ("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),) + # 18) OP_CHECKSIGVERIFY with unknown pubkey type) + ("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),) + # 19) script longer than 10000 bytes and over 201 non-push opcodes) + ("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),) + # 20) OP_CHECKSIGVERIFY with empty key) + ("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),) + # 21) Script that grows the stack to 1000 elements) + ("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),) + # 22) Script that grows the stack to 1001 elements) + ("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),) + # 23) Script that expects an input stack of 1000 elements) + ("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),) + # 24) Script that expects an input stack of 1001 elements) + ("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),) + # 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element) + ("t25", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),) + # 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element) + ("t26", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),) + # 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes) + ("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),) + # 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result) + ("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),) + # 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes) + ("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),) + # 30) Variant of t1 with "normal" 33-byte pubkey) + ("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),) + # 31) Variant of t2 with "normal" 33-byte pubkey) + ("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),) + # 32) Variant of t28 with "normal" 33-byte pubkey) + ("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),) + # 33) 999-of-999 multisig) + ("t33", CScript(big_scriptops[:1998] + [OP_1])),) + # 34) 1000-of-1000 multisig) + ("t34", CScript(big_scriptops[:2000] + [OP_1])),) + # 35) Variant of t9 that uses a non-minimally encoded input arg) + ("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),) + # 36) Empty script) + ("t36", CScript([])),) + ]) + # Add many dummies to test huge trees) + for j in range(100000):) + scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))) + random.shuffle(scripts)) + tap = taproot_construct(pubs[0], scripts)) + common = {) + "hashtype": hashtype,) + "key": secs[1],) + "tap": tap,) + }) + # Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).) + add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random.randbytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)) + add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random.randbytes(80)])) + add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random.randbytes(81)])) + # Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.) + add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)) + add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)) + # Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)) + add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)) + add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)) + add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)) + add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)) + # Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.) + add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)) + # Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.) + add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)) + add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)) + # Test that 0-byte public keys are not acceptable.) + add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)) + # Test that OP_CHECKSIGADD results are as expected) + add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")) + add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")) + # Test that OP_CHECKSIGADD requires 3 stack elements.) + add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)) + # Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)) + add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)) + add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)) + # Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.) + add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)) + # Test that a stack size of 1000 elements is permitted, but 1001 isn't.) + add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)) + # Test that an input stack size of 1000 elements is permitted, but 1001 isn't.) + add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)) + # Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.) + add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)) + # Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)) + add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)) + # Test that the CLEANSTACK rule is consensus critical in tapscript) + add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)) +) + # == Test for sigops ratio limit ==) +) + # Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as) + # input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and) + # will execute sigops signature checks.) + SIGOPS_RATIO_SCRIPTS = [) + # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIG.) + lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),) + # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.) + lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),) + # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.) + lambda n, pk: (CScript([random.randbytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),) + # n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.) + lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),) + # n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.) + lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),) + # n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.) + lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),) + ]) + for annex in [None, bytes([ANNEX_TAG]) + random.randbytes(random.randrange(1000))]:) + for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:) + for pubkey in [pubs[1], random.randbytes(random.choice([x for x in range(2, 81) if x,32]))]:) + for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):) + merkledepth = random.randrange(129)) +) +) + def predict_sigops_ratio(n, dummy_size):) + """Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test.""") + script, sigops = fn(n, pubkey)) + # Predict the size of the witness for a given choice of n) + stacklen_size = 1) + sig_size = 64 + (hashtype,SIGHASH_DEFAULT)) + siglen_size = 1) + dummylen_size = 1 + 2 * (dummy_size >= 253)) + script_size = len(script)) + scriptlen_size = 1 + 2 * (script_size >= 253)) + control_size = 33 + 32 * merkledepth) + controllen_size = 1 + 2 * (control_size >= 253)) + annex_size = 0 if annex is None else len(annex)) + annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)) + witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size) + # sigops ratio test) + return witsize + 50 >= 50 * sigops) + # Make sure n is high enough that with empty dummy, the script is not valid) + n = 0) + while predict_sigops_ratio(n, 0):) + n += 1) + # But allow picking a bit higher still) + n += random.randrange(5)) + # Now pick dummy size *just* large enough that the overall construction passes) + dummylen = 0) + while not predict_sigops_ratio(n, dummylen):) + dummylen += 1) + scripts = [("s", fn(n, pubkey)[0])]) + for _ in range(merkledepth):) + scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]) + tap = taproot_construct(pubs[0], scripts)) + standard = annex is None and dummylen <= 80 and len(pubkey) == 32) + add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random.randbytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random.randbytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)) +) + # Future leaf versions) + for leafver in range(0, 0x100, 2):) + if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:) + # Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version) + continue) + scripts = [) + ("bare_c0", CScript([OP_NOP])),) + ("bare_unkver", CScript([OP_NOP]), leafver),) + ("return_c0", CScript([OP_RETURN])),) + ("return_unkver", CScript([OP_RETURN]), leafver),) + ("undecodable_c0", CScript([OP_PUSHDATA1])),) + ("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),) + ("bigpush_c0", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),) + ("bigpush_unkver", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),) + ("1001push_c0", CScript([OP_0] * 1001)),) + ("1001push_unkver", CScript([OP_0] * 1001), leafver),) + ]) + random.shuffle(scripts)) + tap = taproot_construct(pubs[0], scripts)) + add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)) + add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)) + add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)) + add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)) + add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)) + add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)) +) + # OP_SUCCESSx tests.) + hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)) + for opval in range(76, 0x100):) + opcode = CScriptOp(opval)) + if not is_op_success(opcode):) + continue) + scripts = [) + ("bare_success", CScript([opcode])),) + ("bare_nop", CScript([OP_NOP])),) + ("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),) + ("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),) + ("return_success", CScript([OP_RETURN, opcode])),) + ("return_nop", CScript([OP_RETURN, OP_NOP])),) + ("undecodable_success", CScript([opcode, OP_PUSHDATA1])),) + ("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),) + ("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),) + ("bigpush_success", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),) + ("bigpush_nop", CScript([random.randbytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),) + ("1001push_success", CScript([OP_0] * 1001 + [opcode])),) + ("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),) + ]) + random.shuffle(scripts)) + tap = taproot_construct(pubs[0], scripts)) + add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)) + add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)) + add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)) + add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)) + add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)) + add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)) + add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)) + add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)) +) + # Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)) + for opval in range(0, 0x100):) + opcode = CScriptOp(opval)) + if is_op_success(opcode):) + continue) + scripts = [) + ("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),) + ("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))) + ]) + tap = taproot_construct(pubs[0], scripts)) + add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode) +) + # == Test case for https://github.com/bitcoin/bitcoin/issues/24765 ==) +) + zero_fn = lambda h: bytes([0 for _ in range(32)])) + tap = taproot_construct(pubs[0], [("leaf", CScript([pubs[1], OP_CHECKSIG, pubs[1], OP_CHECKSIGADD, OP_2, OP_EQUAL])), zero_fn])) + add_spender(spenders, "case24765", tap=tap, leaf="leaf", inputs=[getter("sign"), getter("sign")], key=secs[1], no_fail=True)) +) + # == Legacy tests ==) +) + # Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.) + for compressed in [False, True]:) + eckey1, pubkey1 = generate_keypair(compressed=compressed)) + eckey2, _ = generate_keypair(compressed=compressed)) + for p2sh in [False, True]:) + for witv0 in [False, True]:) + for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:) + standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)) + add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=key_to_p2pk_script(pubkey1), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)) + add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)) +) + # Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.) + for p2sh in [False, True]:) + for witv0 in [False, True]:) + for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:) + standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)) + add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)) +) + return spenders) +) +) +def spenders_taproot_nonstandard():) + """Spenders for testing that post-activation Taproot rules may be nonstandard.""") +) + spenders = []) +) + sec = generate_privkey()) + pub, _ = compute_xonly_pubkey(sec)) + scripts = [) + ("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),) + ("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),) + ]) + tap = taproot_construct(pub, scripts)) +) + # Test that features like annex, leaf versions, or OP_SUCCESS are valid but non-standard) + add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])) + add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))) + add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])) + add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))) +) + return spenders) +) +# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.) +LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY") +# Consensus validation flags to use in dumps for all other tests.) +TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT") +) +def dump_json_test(tx, input_utxos, idx, success, failure):) + spender = input_utxos[idx].spender) + # Determine flags to dump) + flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS) +) + fields = [) + ("tx", tx.serialize().hex()),) + ("prevouts", [x.output.serialize().hex() for x in input_utxos]),) + ("index", idx),) + ("flags", flags),) + ("comment", spender.comment)) + ]) +) + # The "final" field indicates that a spend should be always valid, even with more validation flags enabled) + # than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).) + if spender.is_standard:) + fields.append(("final", True))) +) + def dump_witness(wit):) + return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])) + if success is not None:) + fields.append(("success", dump_witness(success)))) + if failure is not None:) + fields.append(("failure", dump_witness(failure)))) +) + # Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the) + # file naming scheme compatible with fuzzing infrastructure).) + dump = json.dumps(OrderedDict(fields)) + ",\n") + sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()) + dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])) + os.makedirs(dirname, exist_ok=True)) + with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:) + f.write(dump)) +) +# Data type to keep track of UTXOs, where they were created, and how to spend them.) +UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')) +) +) +class TaprootTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) + parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",) + help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def set_test_params(self):) + self.num_nodes = 1) + self.setup_clean_chain = True) + self.extra_args = [["-par=1"]]) +) + def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):) +) + # Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.) + # It is not impossible to fit enough tapscript sigops to hit the old 80k limit without) + # busting txin-level limits. We simply have to account for the p2pk outputs in all) + # transactions.) + extra_output_script = CScript(bytes([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR)))) +) + coinbase_tx = create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees)) + block = create_block(self.tip, coinbase_tx, self.lastblocktime + 1, txlist=txs)) + witness and add_witness_commitment(block)) + block.solve()) + block_response = node.submitblock(block.serialize().hex())) + if err_msg is not None:) + assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)) + if accept:) + assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)) + self.tip = block.sha256) + self.lastblockhash = block.hash) + self.lastblocktime += 1) + self.lastblockheight += 1) + else:) + assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg) +) + def init_blockinfo(self, node):) + # Initialize variables used by block_submit().) + self.lastblockhash = node.getbestblockhash()) + self.tip = int(self.lastblockhash, 16)) + block = node.getblock(self.lastblockhash)) + self.lastblockheight = block['height']) + self.lastblocktime = block['time']) +) + def test_spenders(self, node, spenders, input_counts):) + """Run randomized tests with a number of "spenders".) +) + Steps:) + 1) Generate an appropriate UTXO for each spender to test spend conditions) + 2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh) + 3) Select random number of inputs from (1)) + 4) Select random number of addresses from (2) as outputs) +) + Each spender embodies a test; in a large randomized test, it is verified) + that toggling the valid argument to each lambda toggles the validity of) + the transaction. This is accomplished by constructing transactions consisting) + of all valid inputs, except one invalid one.) + """) +) + # Construct a bunch of sPKs that send coins back to the host wallet) + self.log.info("- Constructing addresses for returning coins")) + host_spks = []) + host_pubkeys = []) + for i in range(16):) + addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))) + info = node.getaddressinfo(addr)) + spk = bytes.fromhex(info['scriptPubKey'])) + host_spks.append(spk)) + host_pubkeys.append(bytes.fromhex(info['pubkey']))) +) + self.init_blockinfo(node)) +) + # Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and) + # one change output at the end. The transaction is constructed on the Python side to enable) + # having multiple outputs to the same address and outputs with no assigned address. The wallet) + # is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the) + # Python side (to bypass standardness rules).) + self.log.info("- Creating test UTXOs...")) + random.shuffle(spenders)) + normal_utxos = []) + mismatching_utxos = [] # UTXOs with input that requires mismatching output position) + done = 0) + while done < len(spenders):) + # Compute how many UTXOs to create with this transaction) + count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)) +) + fund_tx = CTransaction()) + # Add the 50 highest-value inputs) + unspents = node.listunspent()) + random.shuffle(unspents)) + unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)) + if len(unspents) > 50:) + unspents = unspents[:50]) + random.shuffle(unspents)) + balance = 0) + for unspent in unspents:) + balance += int(unspent["amount"] * 100000000)) + txid = int(unspent["txid"], 16)) + fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))) + # Add outputs) + cur_progress = done / len(spenders)) + next_progress = (done + count_this_tx) / len(spenders)) + change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance) + self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))) + for i in range(count_this_tx):) + avg = (balance - change_goal) / (count_this_tx - i)) + amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)) + balance -= amount) + fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))) + # Add change) + fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))) + # Ask the wallet to sign) + fund_tx = tx_from_hex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"])) + # Construct UTXOData entries) + fund_tx.rehash()) + for i in range(count_this_tx):) + utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])) + if utxodata.spender.need_vin_vout_mismatch:) + mismatching_utxos.append(utxodata)) + else:) + normal_utxos.append(utxodata)) + done += 1) + # Mine into a block) + self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)) +) + # Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.) + self.log.info("- Running %i spending tests" % done)) + random.shuffle(normal_utxos)) + random.shuffle(mismatching_utxos)) + assert done == len(normal_utxos) + len(mismatching_utxos)) +) + left = done) + while left:) + # Construct CTransaction with random version, nLocktime) + tx = CTransaction()) + tx.version = random.choice([1, 2, random.getrandbits(32)])) + min_sequence = (tx.version,1 and tx.version != 0) * 0x80000000 # The minimum sequence number to disable relative locktime) + if random.choice([True, False]):) + tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past) + else:) + tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past) +) + # Decide how many UTXOs to test with.) + acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]) + num_inputs = random.choice(acceptable)) +) + # If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those) + # unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one) + # normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.) + input_utxos = []) + while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):) + input_utxos.append(mismatching_utxos.pop())) + left -= 1) +) + # Top up until we hit num_inputs (but include at least one normal UTXO always).) + for _ in range(max(1, num_inputs - len(input_utxos))):) + input_utxos.append(normal_utxos.pop())) + left -= 1) +) + # The first input cannot require a mismatching output (as there is at least one output).) + while True:) + random.shuffle(input_utxos)) + if not input_utxos[0].spender.need_vin_vout_mismatch:) + break) + first_mismatch_input = None) + for i in range(len(input_utxos)):) + if input_utxos[i].spender.need_vin_vout_mismatch:) + first_mismatch_input = i) + assert first_mismatch_input is None or first_mismatch_input > 0) +) + # Decide fee, and add CTxIns to tx.) + amount = sum(utxo.output.nValue for utxo in input_utxos)) + fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee) + in_value = amount - fee) + tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]) + tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]) + sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)) + self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))) +) + # Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)) + num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))) + assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE) + for i in range(num_outputs):) + tx.vout.append(CTxOut())) + if in_value <= DUST_LIMIT:) + tx.vout[-1].nValue = DUST_LIMIT) + elif i < num_outputs - 1:) + tx.vout[-1].nValue = in_value) + else:) + tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)) + in_value -= tx.vout[-1].nValue) + tx.vout[-1].scriptPubKey = random.choice(host_spks)) + sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR) + fee += in_value) + assert fee >= 0) +) + # Select coinbase pubkey) + cb_pubkey = random.choice(host_pubkeys)) + sigops_weight += 1 * WITNESS_SCALE_FACTOR) +) + # Precompute one satisfying and one failing scriptSig/witness for each input.) + input_data = []) + for i in range(len(input_utxos)):) + fn = input_utxos[i].spender.sat_function) + fail = None) + success = fn(tx, i, [utxo.output for utxo in input_utxos], True)) + if not input_utxos[i].spender.no_fail:) + fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)) + input_data.append((fail, success))) + if self.options.dump_tests:) + dump_json_test(tx, input_utxos, i, success, fail)) +) + # Sign each input incorrectly once on each complete signing pass, except the very last.) + for fail_input in list(range(len(input_utxos))) + [None]:) + # Skip trying to fail at spending something that can't be made to fail.) + if fail_input is not None and input_utxos[fail_input].spender.no_fail:) + continue) + # Expected message with each input failure, may be None(which is ignored)) + expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg) + # Fill inputs/witnesses) + for i in range(len(input_utxos)):) + tx.vin[i].scriptSig = input_data[i][i,fail_input][0]) + tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i,fail_input][1]) + # Submit to mempool to check standardness) + is_standard_tx = () + fail_input is None # Must be valid to be standard) + and (all(utxo.spender.is_standard for utxo in input_utxos)) # All inputs must be standard) + and tx.version >= 1 # The tx version must be standard) + and tx.version <= 2)) + tx.rehash()) + msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))) + if is_standard_tx:) + node.sendrawtransaction(tx.serialize().hex(), 0)) + assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg) + else:) + assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)) + # Submit in a block) + self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)) +) + if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:) + self.log.info(" - %i tests done" % (len(spenders) - left))) +) + assert left == 0) + assert len(normal_utxos) == 0) + assert len(mismatching_utxos) == 0) + self.log.info(" - Done")) +) + def gen_test_vectors(self):) + """Run a scenario that corresponds (and optionally produces) to BIP341 test vectors.""") +) + self.log.info("Unit test scenario...")) +) + # Deterministically mine coins to OP_TRUE in block 1) + assert_equal(self.nodes[0].getblockcount(), 0)) + coinbase = CTransaction()) + coinbase.version = 1) + coinbase.vin = [CTxIn(COutPoint(0, 0xffffffff), CScript([OP_1, OP_1]), SEQUENCE_FINAL)]) + coinbase.vout = [CTxOut(5000000000, CScript([OP_1]))]) + coinbase.nLockTime = 0) + coinbase.rehash()) + assert coinbase.hash == "f60c73405d499a956d3162e3483c395526ef78286458a4cb17b125aa92e49b20") + # Mine it) + block = create_block(hashprev=int(self.nodes[0].getbestblockhash(), 16), coinbase=coinbase)) + block.rehash()) + block.solve()) + self.nodes[0].submitblock(block.serialize().hex())) + assert_equal(self.nodes[0].getblockcount(), 1)) + self.generate(self.nodes[0], COINBASE_MATURITY)) +) + SEED = 317) + VALID_LEAF_VERS = list(range(0xc0, 0x100, 2)) + [0x66, 0x7e, 0x80, 0x84, 0x96, 0x98, 0xba, 0xbc, 0xbe]) + # Generate private keys) + prvs = [hashlib.sha256(SEED.to_bytes(2, 'big') + bytes([i])).digest() for i in range(100)]) + # Generate corresponding public x-only pubkeys) + pubs = [compute_xonly_pubkey(prv)[0] for prv in prvs]) + # Generate taproot objects) + inner_keys = [pubs[i] for i in range(7)]) +) + script_lists = [) + None,) + [("0", CScript([pubs[50], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)],) + [("0", CScript([pubs[51], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)],) + [("0", CScript([pubs[52], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("1", CScript([b"BIP341"]), VALID_LEAF_VERS[pubs[99][0] % 41])],) + [("0", CScript([pubs[53], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("1", CScript([b"Taproot"]), VALID_LEAF_VERS[pubs[99][1] % 41])],) + [("0", CScript([pubs[54], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT),) + [("1", CScript([pubs[55], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("2", CScript([pubs[56], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)]) + ],) + [("0", CScript([pubs[57], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT),) + [("1", CScript([pubs[58], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT), ("2", CScript([pubs[59], OP_CHECKSIG]), LEAF_VERSION_TAPSCRIPT)]) + ],) + ]) + taps = [taproot_construct(inner_keys[i], script_lists[i]) for i in range(len(inner_keys))]) +) + # Require negated taps[0]) + assert taps[0].negflag) + # Require one negated and one non-negated in taps 1 and 2.) + assert_not_equal(taps[1].negflag, taps[2].negflag)) + # Require one negated and one non-negated in taps 3 and 4.) + assert_not_equal(taps[3].negflag, taps[4].negflag)) + # Require one negated and one non-negated in taps 5 and 6.) + assert_not_equal(taps[5].negflag, taps[6].negflag)) +) + cblks = [{leaf: get({**DEFAULT_CONTEXT, 'tap': taps[i], 'leaf': leaf}, 'controlblock') for leaf in taps[i].leaves} for i in range(7)]) + # Require one swapped and one unswapped in taps 3 and 4.) + assert_not_equal((cblks[3]['0'][33:65] < cblks[3]['1'][33:65]), (cblks[4]['0'][33:65] < cblks[4]['1'][33:65]))) + # Require one swapped and one unswapped in taps 5 and 6, both at the top and child level.) + assert_not_equal((cblks[5]['0'][33:65] < cblks[5]['1'][65:]), (cblks[6]['0'][33:65] < cblks[6]['1'][65:]))) + assert_not_equal((cblks[5]['1'][33:65] < cblks[5]['2'][33:65]), (cblks[6]['1'][33:65] < cblks[6]['2'][33:65]))) + # Require within taps 5 (and thus also 6) that one level is swapped and the other is not.) + assert_not_equal((cblks[5]['0'][33:65] < cblks[5]['1'][65:]), (cblks[5]['1'][33:65] < cblks[5]['2'][33:65]))) +) + # Compute a deterministic set of scriptPubKeys) + tap_spks = []) + old_spks = []) + spend_info = {}) + # First, taproot scriptPubKeys, for the tap objects constructed above) + for i, tap in enumerate(taps):) + tap_spks.append(tap.scriptPubKey)) + d = {'key': prvs[i], 'tap': tap, 'mode': 'taproot'}) + spend_info[tap.scriptPubKey] = d) + # Then, a number of deterministically generated (keys 0x1,0x2,0x3) with 2x P2PKH, 1x P2WPKH spks.) + for i in range(1, 4):) + prv = ECKey()) + prv.set(i.to_bytes(32, 'big'), True)) + pub = prv.get_pubkey().get_bytes()) + d = {"key": prv}) + d["scriptcode"] = key_to_p2pkh_script(pub)) + d["inputs"] = [getter("sign"), pub]) + if i < 3:) + # P2PKH) + d['spk'] = key_to_p2pkh_script(pub)) + d['mode'] = 'legacy') + else:) + # P2WPKH) + d['spk'] = key_to_p2wpkh_script(pub)) + d['mode'] = 'witv0') + old_spks.append(d['spk'])) + spend_info[d['spk']] = d) +) + # Construct a deterministic chain of transactions creating UTXOs to the test's spk's (so that they) + # come from distinct txids).) + txn = []) + lasttxid = coinbase.sha256) + amount = 5000000000) + for i, spk in enumerate(old_spks + tap_spks):) + val = 42000000 * (i + 7)) + tx = CTransaction()) + tx.version = 1) + tx.vin = [CTxIn(COutPoint(lasttxid, i & 1), CScript([]), SEQUENCE_FINAL)]) + tx.vout = [CTxOut(val, spk), CTxOut(amount - val, CScript([OP_1]))]) + if i & 1:) + tx.vout = list(reversed(tx.vout))) + tx.nLockTime = 0) + tx.rehash()) + amount -= val) + lasttxid = tx.sha256) + txn.append(tx)) + spend_info[spk]['prevout'] = COutPoint(tx.sha256, i & 1)) + spend_info[spk]['utxo'] = CTxOut(val, spk)) + # Mine those transactions) + self.init_blockinfo(self.nodes[0])) + self.block_submit(self.nodes[0], txn, "Crediting txn", None, sigops_weight=10, accept=True)) +) + # scriptPubKey computation) + tests = {"version": 1}) + spk_tests = tests.setdefault("scriptPubKey", [])) + for i, tap in enumerate(taps):) + test_case = {}) + given = test_case.setdefault("given", {})) + given['internalPubkey'] = tap.internal_pubkey.hex()) +) + def pr(node):) + if node is None:) + return None) + elif isinstance(node, tuple):) + return {"id": int(node[0]), "script": node[1].hex(), "leafVersion": node[2]}) + elif len(node) == 1:) + return pr(node[0])) + elif len(node) == 2:) + return [pr(node[0]), pr(node[1])]) + else:) + assert False) +) + given['scriptTree'] = pr(script_lists[i])) + intermediary = test_case.setdefault("intermediary", {})) + if len(tap.leaves):) + leafhashes = intermediary.setdefault('leafHashes', [None] * len(tap.leaves))) + for leaf in tap.leaves:) + leafhashes[int(leaf)] = tap.leaves[leaf].leaf_hash.hex()) + intermediary['merkleRoot'] = tap.merkle_root.hex() if tap.merkle_root else None) + intermediary['tweak'] = tap.tweak.hex()) + intermediary['tweakedPubkey'] = tap.output_pubkey.hex()) + expected = test_case.setdefault("expected", {})) + expected['scriptPubKey'] = tap.scriptPubKey.hex()) + expected['bip350Address'] = program_to_witness(1, bytes(tap.output_pubkey), True)) + if len(tap.leaves):) + control_blocks = expected.setdefault("scriptPathControlBlocks", [None] * len(tap.leaves))) + for leaf in tap.leaves:) + ctx = {**DEFAULT_CONTEXT, 'tap': tap, 'leaf': leaf}) + control_blocks[int(leaf)] = get(ctx, "controlblock").hex()) + spk_tests.append(test_case)) +) + # Construct a deterministic transaction spending all outputs created above.) + tx = CTransaction()) + tx.version = 2) + tx.vin = []) + inputs = []) + input_spks = [tap_spks[0], tap_spks[1], old_spks[0], tap_spks[2], tap_spks[5], old_spks[2], tap_spks[6], tap_spks[3], tap_spks[4]]) + sequences = [0, SEQUENCE_FINAL, SEQUENCE_FINAL, 0xfffffffe, 0xfffffffe, 0, 0, SEQUENCE_FINAL, SEQUENCE_FINAL]) + hashtypes = [SIGHASH_SINGLE, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, SIGHASH_ALL, SIGHASH_ALL, SIGHASH_DEFAULT, SIGHASH_ALL, SIGHASH_NONE, SIGHASH_NONE|SIGHASH_ANYONECANPAY, SIGHASH_ALL|SIGHASH_ANYONECANPAY]) + for i, spk in enumerate(input_spks):) + tx.vin.append(CTxIn(spend_info[spk]['prevout'], CScript(), sequences[i]))) + inputs.append(spend_info[spk]['utxo'])) + tx.vout.append(CTxOut(1000000000, old_spks[1]))) + tx.vout.append(CTxOut(3410000000, pubs[98]))) + tx.nLockTime = 500000000) + precomputed = {) + "hashAmounts": BIP341_sha_amounts(inputs),) + "hashPrevouts": BIP341_sha_prevouts(tx),) + "hashScriptPubkeys": BIP341_sha_scriptpubkeys(inputs),) + "hashSequences": BIP341_sha_sequences(tx),) + "hashOutputs": BIP341_sha_outputs(tx)) + }) + keypath_tests = tests.setdefault("keyPathSpending", [])) + tx_test = {}) + global_given = tx_test.setdefault("given", {})) + global_given['rawUnsignedTx'] = tx.serialize().hex()) + utxos_spent = global_given.setdefault("utxosSpent", [])) + for i in range(len(input_spks)):) + utxos_spent.append({"scriptPubKey": inputs[i].scriptPubKey.hex(), "amountSats": inputs[i].nValue})) + global_intermediary = tx_test.setdefault("intermediary", {})) + for key in sorted(precomputed.keys()):) + global_intermediary[key] = precomputed[key].hex()) + test_list = tx_test.setdefault('inputSpending', [])) + for i in range(len(input_spks)):) + ctx = {) + **DEFAULT_CONTEXT,) + **spend_info[input_spks[i]],) + 'tx': tx,) + 'utxos': inputs,) + 'idx': i,) + 'hashtype': hashtypes[i],) + 'deterministic': True) + }) + if ctx['mode'] == 'taproot':) + test_case = {}) + given = test_case.setdefault("given", {})) + given['txinIndex'] = i) + given['internalPrivkey'] = get(ctx, 'key').hex()) + if get(ctx, "tap").merkle_root,bytes():) + given['merkleRoot'] = get(ctx, "tap").merkle_root.hex()) + else:) + given['merkleRoot'] = None) + given['hashType'] = get(ctx, "hashtype")) + intermediary = test_case.setdefault("intermediary", {})) + intermediary['internalPubkey'] = get(ctx, "tap").internal_pubkey.hex()) + intermediary['tweak'] = get(ctx, "tap").tweak.hex()) + intermediary['tweakedPrivkey'] = get(ctx, "key_tweaked").hex()) + sigmsg = get(ctx, "sigmsg")) + intermediary['sigMsg'] = sigmsg.hex()) + intermediary['precomputedUsed'] = [key for key in sorted(precomputed.keys()) if sigmsg.count(precomputed[key])]) + intermediary['sigHash'] = get(ctx, "sighash").hex()) + expected = test_case.setdefault("expected", {})) + expected['witness'] = [get(ctx, "sign").hex()]) + test_list.append(test_case)) + tx.wit.vtxinwit.append(CTxInWitness())) + tx.vin[i].scriptSig = CScript(flatten(get(ctx, "scriptsig")))) + tx.wit.vtxinwit[i].scriptWitness.stack = flatten(get(ctx, "witness"))) + aux = tx_test.setdefault("auxiliary", {})) + aux['fullySignedTx'] = tx.serialize().hex()) + keypath_tests.append(tx_test)) + assert_equal(hashlib.sha256(tx.serialize()).hexdigest(), "24bab662cb55a7f3bae29b559f651674c62bcc1cd442d44715c0133939107b38")) + # Mine the spending transaction) + self.block_submit(self.nodes[0], [tx], "Spending txn", None, sigops_weight=10000, accept=True, witness=True)) +) + if GEN_TEST_VECTORS:) + print(json.dumps(tests, indent=4, sort_keys=False))) +) + def run_test(self):) + self.gen_test_vectors()) +) + self.log.info("Post-activation tests...")) + self.test_spenders(self.nodes[0], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])) + # Run each test twice; once in isolation, and once combined with others. Testing in isolation) + # means that the standardness is verified in every test (as combined transactions are only standard) + # when all their inputs are standard).) + self.test_spenders(self.nodes[0], spenders_taproot_nonstandard(), input_counts=[1])) + self.test_spenders(self.nodes[0], spenders_taproot_nonstandard(), input_counts=[2, 3])) +) +) +if __name__ == '__main__':) + TaprootTest(__file__).main()) diff --git a/test/functional/mempool_accept_wtxid.py b/test/functional/mempool_accept_wtxid.py index d3b5f215d3ae53..1c65f034f1ee53 100755 --- a/test/functional/mempool_accept_wtxid.py +++ b/test/functional/mempool_accept_wtxid.py @@ -1,129 +1,129 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -""" -Test mempool acceptance in case of an already known transaction -with identical non-witness data but different witness. -""" - -from copy import deepcopy -from test_framework.messages import ( - COIN, - COutPoint, - CTransaction, - CTxIn, - CTxInWitness, - CTxOut, - sha256, -) -from test_framework.p2p import P2PTxInvStore -from test_framework.script import ( - CScript, - OP_0, - OP_ELSE, - OP_ENDIF, - OP_EQUAL, - OP_HASH160, - OP_IF, - OP_TRUE, - hash160, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, -) - -class MempoolWtxidTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - - def run_test(self): - node = self.nodes[0] - - self.log.info('Start with empty mempool and 101 blocks') - # The last 100 coinbase transactions are premature - blockhash = self.generate(node, 101)[0] - txid = node.getblock(blockhash=blockhash, verbosity=2)["tx"][0]["txid"] - assert_equal(node.getmempoolinfo()['size'], 0) - - self.log.info("Submit parent with multiple script branches to mempool") - hashlock = hash160(b'Preimage') - witness_script = CScript([OP_IF, OP_HASH160, hashlock, OP_EQUAL, OP_ELSE, OP_TRUE, OP_ENDIF]) - witness_program = sha256(witness_script) - script_pubkey = CScript([OP_0, witness_program]) - - parent = CTransaction() - parent.vin.append(CTxIn(COutPoint(int(txid, 16), 0), b"")) - parent.vout.append(CTxOut(int(9.99998 * COIN), script_pubkey)) - parent.rehash() - - privkeys = [node.get_deterministic_priv_key().key] - raw_parent = node.signrawtransactionwithkey(hexstring=parent.serialize().hex(), privkeys=privkeys)['hex'] - parent_txid = node.sendrawtransaction(hexstring=raw_parent, maxfeerate=0) - self.generate(node, 1) - - peer_wtxid_relay = node.add_p2p_connection(P2PTxInvStore()) - - # Create a new transaction with witness solving first branch - child_witness_script = CScript([OP_TRUE]) - child_witness_program = sha256(child_witness_script) - child_script_pubkey = CScript([OP_0, child_witness_program]) - - child_one = CTransaction() - child_one.vin.append(CTxIn(COutPoint(int(parent_txid, 16), 0), b"")) - child_one.vout.append(CTxOut(int(9.99996 * COIN), child_script_pubkey)) - child_one.wit.vtxinwit.append(CTxInWitness()) - child_one.wit.vtxinwit[0].scriptWitness.stack = [b'Preimage', b'\x01', witness_script] - child_one_wtxid = child_one.getwtxid() - child_one_txid = child_one.rehash() - - # Create another identical transaction with witness solving second branch - child_two = deepcopy(child_one) - child_two.wit.vtxinwit[0].scriptWitness.stack = [b'', witness_script] - child_two_wtxid = child_two.getwtxid() - child_two_txid = child_two.rehash() - - assert_equal(child_one_txid, child_two_txid) - assert child_one_wtxid != child_two_wtxid - - self.log.info("Submit child_one to the mempool") - txid_submitted = node.sendrawtransaction(child_one.serialize().hex()) - assert_equal(node.getmempoolentry(txid_submitted)['wtxid'], child_one_wtxid) - - peer_wtxid_relay.wait_for_broadcast([child_one_wtxid]) - assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0) - - # testmempoolaccept reports the "already in mempool" error - assert_equal(node.testmempoolaccept([child_one.serialize().hex()]), [{ - "txid": child_one_txid, - "wtxid": child_one_wtxid, - "allowed": False, - "reject-reason": "txn-already-in-mempool" - }]) - assert_equal(node.testmempoolaccept([child_two.serialize().hex()])[0], { - "txid": child_two_txid, - "wtxid": child_two_wtxid, - "allowed": False, - "reject-reason": "txn-same-nonwitness-data-in-mempool" - }) - - # sendrawtransaction will not throw but quits early when the exact same transaction is already in mempool - node.sendrawtransaction(child_one.serialize().hex()) - - self.log.info("Connect another peer that hasn't seen child_one before") - peer_wtxid_relay_2 = node.add_p2p_connection(P2PTxInvStore()) - - self.log.info("Submit child_two to the mempool") - # sendrawtransaction will not throw but quits early when a transaction with the same non-witness data is already in mempool - node.sendrawtransaction(child_two.serialize().hex()) - - # The node should rebroadcast the transaction using the wtxid of the correct transaction - # (child_one, which is in its mempool). - peer_wtxid_relay_2.wait_for_broadcast([child_one_wtxid]) - assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0) - -if __name__ == '__main__': - MempoolWtxidTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2021 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +""") +Test mempool acceptance in case of an already known transaction) +with identical non-witness data but different witness.) +""") +) +from copy import deepcopy) +from test_framework.messages import () + COIN,) + COutPoint,) + CTransaction,) + CTxIn,) + CTxInWitness,) + CTxOut,) + sha256,) +)) +from test_framework.p2p import P2PTxInvStore) +from test_framework.script import () + CScript,) + OP_0,) + OP_ELSE,) + OP_ENDIF,) + OP_EQUAL,) + OP_HASH160,) + OP_IF,) + OP_TRUE,) + hash160,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) +)) +) +class MempoolWtxidTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + self.setup_clean_chain = True) +) + def run_test(self):) + node = self.nodes[0]) +) + self.log.info('Start with empty mempool and 101 blocks')) + # The last 100 coinbase transactions are premature) + blockhash = self.generate(node, 101)[0]) + txid = node.getblock(blockhash=blockhash, verbosity=2)["tx"][0]["txid"]) + assert_equal(node.getmempoolinfo()['size'], 0)) +) + self.log.info("Submit parent with multiple script branches to mempool")) + hashlock = hash160(b'Preimage')) + witness_script = CScript([OP_IF, OP_HASH160, hashlock, OP_EQUAL, OP_ELSE, OP_TRUE, OP_ENDIF])) + witness_program = sha256(witness_script)) + script_pubkey = CScript([OP_0, witness_program])) +) + parent = CTransaction()) + parent.vin.append(CTxIn(COutPoint(int(txid, 16), 0), b""))) + parent.vout.append(CTxOut(int(9.99998 * COIN), script_pubkey))) + parent.rehash()) +) + privkeys = [node.get_deterministic_priv_key().key]) + raw_parent = node.signrawtransactionwithkey(hexstring=parent.serialize().hex(), privkeys=privkeys)['hex']) + parent_txid = node.sendrawtransaction(hexstring=raw_parent, maxfeerate=0)) + self.generate(node, 1)) +) + peer_wtxid_relay = node.add_p2p_connection(P2PTxInvStore())) +) + # Create a new transaction with witness solving first branch) + child_witness_script = CScript([OP_TRUE])) + child_witness_program = sha256(child_witness_script)) + child_script_pubkey = CScript([OP_0, child_witness_program])) +) + child_one = CTransaction()) + child_one.vin.append(CTxIn(COutPoint(int(parent_txid, 16), 0), b""))) + child_one.vout.append(CTxOut(int(9.99996 * COIN), child_script_pubkey))) + child_one.wit.vtxinwit.append(CTxInWitness())) + child_one.wit.vtxinwit[0].scriptWitness.stack = [b'Preimage', b'\x01', witness_script]) + child_one_wtxid = child_one.getwtxid()) + child_one_txid = child_one.rehash()) +) + # Create another identical transaction with witness solving second branch) + child_two = deepcopy(child_one)) + child_two.wit.vtxinwit[0].scriptWitness.stack = [b'', witness_script]) + child_two_wtxid = child_two.getwtxid()) + child_two_txid = child_two.rehash()) +) + assert_equal(child_one_txid, child_two_txid)) + assert_not_equal(child_one_wtxid, child_two_wtxid)) +) + self.log.info("Submit child_one to the mempool")) + txid_submitted = node.sendrawtransaction(child_one.serialize().hex())) + assert_equal(node.getmempoolentry(txid_submitted)['wtxid'], child_one_wtxid)) +) + peer_wtxid_relay.wait_for_broadcast([child_one_wtxid])) + assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0)) +) + # testmempoolaccept reports the "already in mempool" error) + assert_equal(node.testmempoolaccept([child_one.serialize().hex()]), [{) + "txid": child_one_txid,) + "wtxid": child_one_wtxid,) + "allowed": False,) + "reject-reason": "txn-already-in-mempool") + }])) + assert_equal(node.testmempoolaccept([child_two.serialize().hex()])[0], {) + "txid": child_two_txid,) + "wtxid": child_two_wtxid,) + "allowed": False,) + "reject-reason": "txn-same-nonwitness-data-in-mempool") + })) +) + # sendrawtransaction will not throw but quits early when the exact same transaction is already in mempool) + node.sendrawtransaction(child_one.serialize().hex())) +) + self.log.info("Connect another peer that hasn't seen child_one before")) + peer_wtxid_relay_2 = node.add_p2p_connection(P2PTxInvStore())) +) + self.log.info("Submit child_two to the mempool")) + # sendrawtransaction will not throw but quits early when a transaction with the same non-witness data is already in mempool) + node.sendrawtransaction(child_two.serialize().hex())) +) + # The node should rebroadcast the transaction using the wtxid of the correct transaction) + # (child_one, which is in its mempool).) + peer_wtxid_relay_2.wait_for_broadcast([child_one_wtxid])) + assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0)) +) +if __name__ == '__main__':) + MempoolWtxidTest(__file__).main()) diff --git a/test/functional/mempool_truc.py b/test/functional/mempool_truc.py index e7acccb0fb654f..b0531402197360 100755 --- a/test/functional/mempool_truc.py +++ b/test/functional/mempool_truc.py @@ -1,620 +1,620 @@ -#!/usr/bin/env python3 -# Copyright (c) 2024 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -from decimal import Decimal - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises_rpc_error, -) -from test_framework.wallet import ( - COIN, - DEFAULT_FEE, - MiniWallet, -) - -MAX_REPLACEMENT_CANDIDATES = 100 -TRUC_MAX_VSIZE = 10000 -TRUC_CHILD_MAX_VSIZE = 1000 - -def cleanup(extra_args=None): - def decorator(func): - def wrapper(self): - try: - if extra_args is not None: - self.restart_node(0, extra_args=extra_args) - func(self) - finally: - # Clear mempool again after test - self.generate(self.nodes[0], 1) - if extra_args is not None: - self.restart_node(0) - return wrapper - return decorator - -class MempoolTRUC(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [[]] - self.setup_clean_chain = True - - def check_mempool(self, txids): - """Assert exact contents of the node's mempool (by txid).""" - mempool_contents = self.nodes[0].getrawmempool() - assert_equal(len(txids), len(mempool_contents)) - assert all([txid in txids for txid in mempool_contents]) - - @cleanup(extra_args=["-datacarriersize=20000"]) - def test_truc_max_vsize(self): - node = self.nodes[0] - self.log.info("Test TRUC-specific maximum transaction vsize") - tx_v3_heavy = self.wallet.create_self_transfer(target_vsize=TRUC_MAX_VSIZE + 1, version=3) - assert_greater_than_or_equal(tx_v3_heavy["tx"].get_vsize(), TRUC_MAX_VSIZE) - expected_error_heavy = f"TRUC-violation, version=3 tx {tx_v3_heavy['txid']} (wtxid={tx_v3_heavy['wtxid']}) is too big" - assert_raises_rpc_error(-26, expected_error_heavy, node.sendrawtransaction, tx_v3_heavy["hex"]) - self.check_mempool([]) - - # Ensure we are hitting the TRUC-specific limit and not something else - tx_v2_heavy = self.wallet.send_self_transfer(from_node=node, target_vsize=TRUC_MAX_VSIZE + 1, version=2) - self.check_mempool([tx_v2_heavy["txid"]]) - - @cleanup(extra_args=["-datacarriersize=1000"]) - def test_truc_acceptance(self): - node = self.nodes[0] - self.log.info("Test a child of a TRUC transaction cannot be more than 1000vB") - tx_v3_parent_normal = self.wallet.send_self_transfer(from_node=node, version=3) - self.check_mempool([tx_v3_parent_normal["txid"]]) - tx_v3_child_heavy = self.wallet.create_self_transfer( - utxo_to_spend=tx_v3_parent_normal["new_utxo"], - target_vsize=TRUC_CHILD_MAX_VSIZE + 1, - version=3 - ) - assert_greater_than_or_equal(tx_v3_child_heavy["tx"].get_vsize(), TRUC_CHILD_MAX_VSIZE) - expected_error_child_heavy = f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big" - assert_raises_rpc_error(-26, expected_error_child_heavy, node.sendrawtransaction, tx_v3_child_heavy["hex"]) - self.check_mempool([tx_v3_parent_normal["txid"]]) - # tx has no descendants - assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 1) - - self.log.info("Test that, during replacements, only the new transaction counts for TRUC descendant limit") - tx_v3_child_almost_heavy = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE, - utxo_to_spend=tx_v3_parent_normal["new_utxo"], - target_vsize=TRUC_CHILD_MAX_VSIZE - 3, - version=3 - ) - assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_almost_heavy["tx"].get_vsize()) - self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy["txid"]]) - assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2) - tx_v3_child_almost_heavy_rbf = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE * 2, - utxo_to_spend=tx_v3_parent_normal["new_utxo"], - target_vsize=875, - version=3 - ) - assert_greater_than_or_equal(tx_v3_child_almost_heavy["tx"].get_vsize() + tx_v3_child_almost_heavy_rbf["tx"].get_vsize(), - TRUC_CHILD_MAX_VSIZE) - self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy_rbf["txid"]]) - assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2) - - @cleanup(extra_args=None) - def test_truc_replacement(self): - node = self.nodes[0] - self.log.info("Test TRUC transactions may be replaced by TRUC transactions") - utxo_v3_bip125 = self.wallet.get_utxo() - tx_v3_bip125 = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE, - utxo_to_spend=utxo_v3_bip125, - version=3 - ) - self.check_mempool([tx_v3_bip125["txid"]]) - - tx_v3_bip125_rbf = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE * 2, - utxo_to_spend=utxo_v3_bip125, - version=3 - ) - self.check_mempool([tx_v3_bip125_rbf["txid"]]) - - self.log.info("Test TRUC transactions may be replaced by non-TRUC (BIP125) transactions") - tx_v3_bip125_rbf_v2 = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE * 3, - utxo_to_spend=utxo_v3_bip125, - version=2 - ) - self.check_mempool([tx_v3_bip125_rbf_v2["txid"]]) - - self.log.info("Test that replacements cannot cause violation of inherited TRUC") - utxo_v3_parent = self.wallet.get_utxo() - tx_v3_parent = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE, - utxo_to_spend=utxo_v3_parent, - version=3 - ) - tx_v3_child = self.wallet.send_self_transfer( - from_node=node, - fee_rate=DEFAULT_FEE, - utxo_to_spend=tx_v3_parent["new_utxo"], - version=3 - ) - self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]]) - - tx_v3_child_rbf_v2 = self.wallet.create_self_transfer( - fee_rate=DEFAULT_FEE * 2, - utxo_to_spend=tx_v3_parent["new_utxo"], - version=2 - ) - expected_error_v2_v3 = f"TRUC-violation, non-version=3 tx {tx_v3_child_rbf_v2['txid']} (wtxid={tx_v3_child_rbf_v2['wtxid']}) cannot spend from version=3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})" - assert_raises_rpc_error(-26, expected_error_v2_v3, node.sendrawtransaction, tx_v3_child_rbf_v2["hex"]) - self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]]) - - - @cleanup(extra_args=["-datacarriersize=40000"]) - def test_truc_reorg(self): - node = self.nodes[0] - self.log.info("Test that, during a reorg, TRUC rules are not enforced") - tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2) - tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3) - tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3) - self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"]]) - - block = self.generate(node, 1) - self.check_mempool([]) - tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2) - tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3) - tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_vsize=1250, version=3) - assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], TRUC_CHILD_MAX_VSIZE) - self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]]) - node.invalidateblock(block[0]) - self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]]) - # This is needed because generate() will create the exact same block again. - node.reconsiderblock(block[0]) - - - @cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000"]) - def test_nondefault_package_limits(self): - """ - Max standard tx size + TRUC rules imply the ancestor/descendant rules (at their default - values), but those checks must not be skipped. Ensure both sets of checks are done by - changing the ancestor/descendant limit configurations. - """ - node = self.nodes[0] - self.log.info("Test that a decreased limitdescendantsize also applies to TRUC child") - parent_target_vsize = 9990 - child_target_vsize = 500 - tx_v3_parent_large1 = self.wallet.send_self_transfer( - from_node=node, - target_vsize=parent_target_vsize, - version=3 - ) - tx_v3_child_large1 = self.wallet.create_self_transfer( - utxo_to_spend=tx_v3_parent_large1["new_utxo"], - target_vsize=child_target_vsize, - version=3 - ) - - # Parent and child are within v3 limits, but parent's 10kvB descendant limit is exceeded - assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large1["tx"].get_vsize()) - assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large1["tx"].get_vsize()) - assert_greater_than(tx_v3_parent_large1["tx"].get_vsize() + tx_v3_child_large1["tx"].get_vsize(), 10000) - - assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds descendant size limit for tx {tx_v3_parent_large1['txid']}", node.sendrawtransaction, tx_v3_child_large1["hex"]) - self.check_mempool([tx_v3_parent_large1["txid"]]) - assert_equal(node.getmempoolentry(tx_v3_parent_large1["txid"])["descendantcount"], 1) - self.generate(node, 1) - - self.log.info("Test that a decreased limitancestorsize also applies to v3 parent") - self.restart_node(0, extra_args=["-limitancestorsize=10", "-datacarriersize=40000"]) - tx_v3_parent_large2 = self.wallet.send_self_transfer( - from_node=node, - target_vsize=parent_target_vsize, - version=3 - ) - tx_v3_child_large2 = self.wallet.create_self_transfer( - utxo_to_spend=tx_v3_parent_large2["new_utxo"], - target_vsize=child_target_vsize, - version=3 - ) - - # Parent and child are within TRUC limits - assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large2["tx"].get_vsize()) - assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large2["tx"].get_vsize()) - assert_greater_than(tx_v3_parent_large2["tx"].get_vsize() + tx_v3_child_large2["tx"].get_vsize(), 10000) - - assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds ancestor size limit", node.sendrawtransaction, tx_v3_child_large2["hex"]) - self.check_mempool([tx_v3_parent_large2["txid"]]) - - @cleanup(extra_args=["-datacarriersize=1000"]) - def test_truc_ancestors_package(self): - self.log.info("Test that TRUC ancestor limits are checked within the package") - node = self.nodes[0] - tx_v3_parent_normal = self.wallet.create_self_transfer( - fee_rate=0, - target_vsize=1001, - version=3 - ) - tx_v3_parent_2_normal = self.wallet.create_self_transfer( - fee_rate=0, - target_vsize=1001, - version=3 - ) - tx_v3_child_multiparent = self.wallet.create_self_transfer_multi( - utxos_to_spend=[tx_v3_parent_normal["new_utxo"], tx_v3_parent_2_normal["new_utxo"]], - fee_per_output=10000, - version=3 - ) - tx_v3_child_heavy = self.wallet.create_self_transfer_multi( - utxos_to_spend=[tx_v3_parent_normal["new_utxo"]], - target_vsize=TRUC_CHILD_MAX_VSIZE + 1, - fee_per_output=10000, - version=3 - ) - - self.check_mempool([]) - result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_parent_2_normal["hex"], tx_v3_child_multiparent["hex"]]) - assert_equal(result['package_msg'], f"TRUC-violation, tx {tx_v3_child_multiparent['txid']} (wtxid={tx_v3_child_multiparent['wtxid']}) would have too many ancestors") - self.check_mempool([]) - - self.check_mempool([]) - result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_child_heavy["hex"]]) - # tx_v3_child_heavy is heavy based on vsize, not sigops. - assert_equal(result['package_msg'], f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big: {tx_v3_child_heavy['tx'].get_vsize()} > 1000 virtual bytes") - self.check_mempool([]) - - tx_v3_parent = self.wallet.create_self_transfer(version=3) - tx_v3_child = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxo"], version=3) - tx_v3_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_child["new_utxo"], version=3) - result = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child["hex"], tx_v3_grandchild["hex"]]) - assert all([txresult["package-error"] == f"TRUC-violation, tx {tx_v3_grandchild['txid']} (wtxid={tx_v3_grandchild['wtxid']}) would have too many ancestors" for txresult in result]) - - @cleanup(extra_args=None) - def test_truc_ancestors_package_and_mempool(self): - """ - A TRUC transaction in a package cannot have 2 TRUC parents. - Test that if we have a transaction graph A -> B -> C, where A, B, C are - all TRUC transactions, that we cannot use submitpackage to get the - transactions all into the mempool. - - Verify, in particular, that if A is already in the mempool, then - submitpackage(B, C) will fail. - """ - node = self.nodes[0] - self.log.info("Test that TRUC ancestor limits include transactions within the package and all in-mempool ancestors") - # This is our transaction "A": - tx_in_mempool = self.wallet.send_self_transfer(from_node=node, version=3) - - # Verify that A is in the mempool - self.check_mempool([tx_in_mempool["txid"]]) - - # tx_0fee_parent is our transaction "B"; just create it. - tx_0fee_parent = self.wallet.create_self_transfer(utxo_to_spend=tx_in_mempool["new_utxo"], fee=0, fee_rate=0, version=3) - - # tx_child_violator is our transaction "C"; create it: - tx_child_violator = self.wallet.create_self_transfer_multi(utxos_to_spend=[tx_0fee_parent["new_utxo"]], version=3) - - # submitpackage(B, C) should fail - result = node.submitpackage([tx_0fee_parent["hex"], tx_child_violator["hex"]]) - assert_equal(result['package_msg'], f"TRUC-violation, tx {tx_child_violator['txid']} (wtxid={tx_child_violator['wtxid']}) would have too many ancestors") - self.check_mempool([tx_in_mempool["txid"]]) - - @cleanup(extra_args=None) - def test_sibling_eviction_package(self): - """ - When a transaction has a mempool sibling, it may be eligible for sibling eviction. - However, this option is only available in single transaction acceptance. It doesn't work in - a multi-testmempoolaccept (where RBF is disabled) or when doing package CPFP. - """ - self.log.info("Test TRUC sibling eviction in submitpackage and multi-testmempoolaccept") - node = self.nodes[0] - # Add a parent + child to mempool - tx_mempool_parent = self.wallet.send_self_transfer_multi( - from_node=node, - utxos_to_spend=[self.wallet.get_utxo()], - num_outputs=2, - version=3 - ) - tx_mempool_sibling = self.wallet.send_self_transfer( - from_node=node, - utxo_to_spend=tx_mempool_parent["new_utxos"][0], - version=3 - ) - self.check_mempool([tx_mempool_parent["txid"], tx_mempool_sibling["txid"]]) - - tx_sibling_1 = self.wallet.create_self_transfer( - utxo_to_spend=tx_mempool_parent["new_utxos"][1], - version=3, - fee_rate=DEFAULT_FEE*100, - ) - tx_has_mempool_uncle = self.wallet.create_self_transfer(utxo_to_spend=tx_sibling_1["new_utxo"], version=3) - - tx_sibling_2 = self.wallet.create_self_transfer( - utxo_to_spend=tx_mempool_parent["new_utxos"][0], - version=3, - fee_rate=DEFAULT_FEE*200, - ) - - tx_sibling_3 = self.wallet.create_self_transfer( - utxo_to_spend=tx_mempool_parent["new_utxos"][1], - version=3, - fee_rate=0, - ) - tx_bumps_parent_with_sibling = self.wallet.create_self_transfer( - utxo_to_spend=tx_sibling_3["new_utxo"], - version=3, - fee_rate=DEFAULT_FEE*300, - ) - - # Fails with another non-related transaction via testmempoolaccept - tx_unrelated = self.wallet.create_self_transfer(version=3) - result_test_unrelated = node.testmempoolaccept([tx_sibling_1["hex"], tx_unrelated["hex"]]) - assert_equal(result_test_unrelated[0]["reject-reason"], "TRUC-violation") - - # Fails in a package via testmempoolaccept - result_test_1p1c = node.testmempoolaccept([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]]) - assert_equal(result_test_1p1c[0]["reject-reason"], "TRUC-violation") - - # Allowed when tx is submitted in a package and evaluated individually. - # Note that the child failed since it would be the 3rd generation. - result_package_indiv = node.submitpackage([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]]) - self.check_mempool([tx_mempool_parent["txid"], tx_sibling_1["txid"]]) - expected_error_gen3 = f"TRUC-violation, tx {tx_has_mempool_uncle['txid']} (wtxid={tx_has_mempool_uncle['wtxid']}) would have too many ancestors" - - assert_equal(result_package_indiv["tx-results"][tx_has_mempool_uncle['wtxid']]['error'], expected_error_gen3) - - # Allowed when tx is submitted in a package with in-mempool parent (which is deduplicated). - node.submitpackage([tx_mempool_parent["hex"], tx_sibling_2["hex"]]) - self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]]) - - # Child cannot pay for sibling eviction for parent, as it violates TRUC topology limits - result_package_cpfp = node.submitpackage([tx_sibling_3["hex"], tx_bumps_parent_with_sibling["hex"]]) - self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]]) - expected_error_cpfp = f"TRUC-violation, tx {tx_mempool_parent['txid']} (wtxid={tx_mempool_parent['wtxid']}) would exceed descendant count limit" - - assert_equal(result_package_cpfp["tx-results"][tx_sibling_3['wtxid']]['error'], expected_error_cpfp) - - - @cleanup(extra_args=["-datacarriersize=1000"]) - def test_truc_package_inheritance(self): - self.log.info("Test that TRUC inheritance is checked within package") - node = self.nodes[0] - tx_v3_parent = self.wallet.create_self_transfer( - fee_rate=0, - target_vsize=1001, - version=3 - ) - tx_v2_child = self.wallet.create_self_transfer_multi( - utxos_to_spend=[tx_v3_parent["new_utxo"]], - fee_per_output=10000, - version=2 - ) - self.check_mempool([]) - result = node.submitpackage([tx_v3_parent["hex"], tx_v2_child["hex"]]) - assert_equal(result['package_msg'], f"TRUC-violation, non-version=3 tx {tx_v2_child['txid']} (wtxid={tx_v2_child['wtxid']}) cannot spend from version=3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})") - self.check_mempool([]) - - @cleanup(extra_args=None) - def test_truc_in_testmempoolaccept(self): - node = self.nodes[0] - - self.log.info("Test that TRUC inheritance is accurately assessed in testmempoolaccept") - tx_v2 = self.wallet.create_self_transfer(version=2) - tx_v2_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=2) - tx_v3_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=3) - tx_v3 = self.wallet.create_self_transfer(version=3) - tx_v2_from_v3 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3["new_utxo"], version=2) - tx_v3_from_v3 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3["new_utxo"], version=3) - - # testmempoolaccept paths don't require child-with-parents topology. Ensure that topology - # assumptions aren't made in inheritance checks. - test_accept_v2_and_v3 = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"]]) - assert all([result["allowed"] for result in test_accept_v2_and_v3]) - - test_accept_v3_from_v2 = node.testmempoolaccept([tx_v2["hex"], tx_v3_from_v2["hex"]]) - expected_error_v3_from_v2 = f"TRUC-violation, version=3 tx {tx_v3_from_v2['txid']} (wtxid={tx_v3_from_v2['wtxid']}) cannot spend from non-version=3 tx {tx_v2['txid']} (wtxid={tx_v2['wtxid']})" - assert all([result["package-error"] == expected_error_v3_from_v2 for result in test_accept_v3_from_v2]) - - test_accept_v2_from_v3 = node.testmempoolaccept([tx_v3["hex"], tx_v2_from_v3["hex"]]) - expected_error_v2_from_v3 = f"TRUC-violation, non-version=3 tx {tx_v2_from_v3['txid']} (wtxid={tx_v2_from_v3['wtxid']}) cannot spend from version=3 tx {tx_v3['txid']} (wtxid={tx_v3['wtxid']})" - assert all([result["package-error"] == expected_error_v2_from_v3 for result in test_accept_v2_from_v3]) - - test_accept_pairs = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"], tx_v2_from_v2["hex"], tx_v3_from_v3["hex"]]) - assert all([result["allowed"] for result in test_accept_pairs]) - - self.log.info("Test that descendant violations are caught in testmempoolaccept") - tx_v3_independent = self.wallet.create_self_transfer(version=3) - tx_v3_parent = self.wallet.create_self_transfer_multi(num_outputs=2, version=3) - tx_v3_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][0], version=3) - tx_v3_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][1], version=3) - test_accept_2children = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"]]) - expected_error_2children = f"TRUC-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit" - assert all([result["package-error"] == expected_error_2children for result in test_accept_2children]) - - # Extra TRUC transaction does not get incorrectly marked as extra descendant - test_accept_1child_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_independent["hex"]]) - assert all([result["allowed"] for result in test_accept_1child_with_exra]) - - # Extra TRUC transaction does not make us ignore the extra descendant - test_accept_2children_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"], tx_v3_independent["hex"]]) - expected_error_extra = f"TRUC-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit" - assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_exra]) - # Same result if the parent is already in mempool - node.sendrawtransaction(tx_v3_parent["hex"]) - test_accept_2children_with_in_mempool_parent = node.testmempoolaccept([tx_v3_child_1["hex"], tx_v3_child_2["hex"]]) - assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_in_mempool_parent]) - - @cleanup(extra_args=None) - def test_reorg_2child_rbf(self): - node = self.nodes[0] - self.log.info("Test that children of a TRUC transaction can be replaced individually, even if there are multiple due to reorg") - - ancestor_tx = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3) - self.check_mempool([ancestor_tx["txid"]]) - - block = self.generate(node, 1)[0] - self.check_mempool([]) - - child_1 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][0]) - child_2 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][1]) - self.check_mempool([child_1["txid"], child_2["txid"]]) - - self.generate(node, 1) - self.check_mempool([]) - - # Create a reorg, causing ancestor_tx to exceed the 1-child limit - node.invalidateblock(block) - self.check_mempool([ancestor_tx["txid"], child_1["txid"], child_2["txid"]]) - assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3) - - # Create a replacement of child_1. It does not conflict with child_2. - child_1_conflict = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][0], fee_rate=Decimal("0.01")) - - # Ensure child_1 and child_1_conflict are different transactions - assert (child_1_conflict["txid"] != child_1["txid"]) - self.check_mempool([ancestor_tx["txid"], child_1_conflict["txid"], child_2["txid"]]) - assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3) - - @cleanup(extra_args=None) - def test_truc_sibling_eviction(self): - self.log.info("Test sibling eviction for TRUC") - node = self.nodes[0] - tx_v3_parent = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3) - # This is the sibling to replace - tx_v3_child_1 = self.wallet.send_self_transfer( - from_node=node, utxo_to_spend=tx_v3_parent["new_utxos"][0], fee_rate=DEFAULT_FEE * 2, version=3 - ) - assert tx_v3_child_1["txid"] in node.getrawmempool() - - self.log.info("Test tx must be higher feerate than sibling to evict it") - tx_v3_child_2_rule6 = self.wallet.create_self_transfer( - utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE, version=3 - ) - rule6_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule6['txid']}; new feerate" - assert_raises_rpc_error(-26, rule6_str, node.sendrawtransaction, tx_v3_child_2_rule6["hex"]) - self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']]) - - self.log.info("Test tx must meet absolute fee rules to evict sibling") - tx_v3_child_2_rule4 = self.wallet.create_self_transfer( - utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=2 * DEFAULT_FEE + Decimal("0.00000001"), version=3 - ) - rule4_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule4['txid']}, not enough additional fees to relay" - assert_raises_rpc_error(-26, rule4_str, node.sendrawtransaction, tx_v3_child_2_rule4["hex"]) - self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']]) - - self.log.info("Test tx cannot cause more than 100 evictions including RBF and sibling eviction") - # First add 4 groups of 25 transactions. - utxos_for_conflict = [] - txids_v2_100 = [] - for _ in range(4): - confirmed_utxo = self.wallet.get_utxo(confirmed_only=True) - utxos_for_conflict.append(confirmed_utxo) - # 25 is within descendant limits - chain_length = int(MAX_REPLACEMENT_CANDIDATES / 4) - chain = self.wallet.create_self_transfer_chain(chain_length=chain_length, utxo_to_spend=confirmed_utxo) - for item in chain: - txids_v2_100.append(item["txid"]) - node.sendrawtransaction(item["hex"]) - self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]]) - - # Replacing 100 transactions is fine - tx_v3_replacement_only = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000) - # Override maxfeerate - it costs a lot to replace these 100 transactions. - assert node.testmempoolaccept([tx_v3_replacement_only["hex"]], maxfeerate=0)[0]["allowed"] - # Adding another one exceeds the limit. - utxos_for_conflict.append(tx_v3_parent["new_utxos"][1]) - tx_v3_child_2_rule5 = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000, version=3) - rule5_str = f"too many potential replacements (including sibling eviction), rejecting replacement {tx_v3_child_2_rule5['txid']}; too many potential replacements (101 > 100)" - assert_raises_rpc_error(-26, rule5_str, node.sendrawtransaction, tx_v3_child_2_rule5["hex"]) - self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]]) - - self.log.info("Test sibling eviction is successful if it meets all RBF rules") - tx_v3_child_2 = self.wallet.create_self_transfer( - utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE*10, version=3 - ) - node.sendrawtransaction(tx_v3_child_2["hex"]) - self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_2["txid"]]) - - self.log.info("Test that it's possible to do a sibling eviction and RBF at the same time") - utxo_unrelated_conflict = self.wallet.get_utxo(confirmed_only=True) - tx_unrelated_replacee = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo_unrelated_conflict) - assert tx_unrelated_replacee["txid"] in node.getrawmempool() - - fee_to_beat = max(int(tx_v3_child_2["fee"] * COIN), int(tx_unrelated_replacee["fee"]*COIN)) - - tx_v3_child_3 = self.wallet.create_self_transfer_multi( - utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat*2, version=3 - ) - node.sendrawtransaction(tx_v3_child_3["hex"]) - self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_3["txid"]]) - - @cleanup(extra_args=None) - def test_reorg_sibling_eviction_1p2c(self): - node = self.nodes[0] - self.log.info("Test that sibling eviction is not allowed when multiple siblings exist") - - tx_with_multi_children = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=3, version=3, confirmed_only=True) - self.check_mempool([tx_with_multi_children["txid"]]) - - block_to_disconnect = self.generate(node, 1)[0] - self.check_mempool([]) - - tx_with_sibling1 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=tx_with_multi_children["new_utxos"][0]) - tx_with_sibling2 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=tx_with_multi_children["new_utxos"][1]) - self.check_mempool([tx_with_sibling1["txid"], tx_with_sibling2["txid"]]) - - # Create a reorg, bringing tx_with_multi_children back into the mempool with a descendant count of 3. - node.invalidateblock(block_to_disconnect) - self.check_mempool([tx_with_multi_children["txid"], tx_with_sibling1["txid"], tx_with_sibling2["txid"]]) - assert_equal(node.getmempoolentry(tx_with_multi_children["txid"])["descendantcount"], 3) - - # Sibling eviction is not allowed because there are two siblings - tx_with_sibling3 = self.wallet.create_self_transfer( - version=3, - utxo_to_spend=tx_with_multi_children["new_utxos"][2], - fee_rate=DEFAULT_FEE*50 - ) - expected_error_2siblings = f"TRUC-violation, tx {tx_with_multi_children['txid']} (wtxid={tx_with_multi_children['wtxid']}) would exceed descendant count limit" - assert_raises_rpc_error(-26, expected_error_2siblings, node.sendrawtransaction, tx_with_sibling3["hex"]) - - # However, an RBF (with conflicting inputs) is possible even if the resulting cluster size exceeds 2 - tx_with_sibling3_rbf = self.wallet.send_self_transfer( - from_node=node, - version=3, - utxo_to_spend=tx_with_multi_children["new_utxos"][0], - fee_rate=DEFAULT_FEE*50 - ) - self.check_mempool([tx_with_multi_children["txid"], tx_with_sibling3_rbf["txid"], tx_with_sibling2["txid"]]) - - - def run_test(self): - self.log.info("Generate blocks to create UTXOs") - node = self.nodes[0] - self.wallet = MiniWallet(node) - self.generate(self.wallet, 120) - self.test_truc_max_vsize() - self.test_truc_acceptance() - self.test_truc_replacement() - self.test_truc_reorg() - self.test_nondefault_package_limits() - self.test_truc_ancestors_package() - self.test_truc_ancestors_package_and_mempool() - self.test_sibling_eviction_package() - self.test_truc_package_inheritance() - self.test_truc_in_testmempoolaccept() - self.test_reorg_2child_rbf() - self.test_truc_sibling_eviction() - self.test_reorg_sibling_eviction_1p2c() - - -if __name__ == "__main__": - MempoolTRUC(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2024 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +from decimal import Decimal) +) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_greater_than,) + assert_greater_than_or_equal,) + assert_raises_rpc_error,) +)) +from test_framework.wallet import () + COIN,) + DEFAULT_FEE,) + MiniWallet,) +)) +) +MAX_REPLACEMENT_CANDIDATES = 100) +TRUC_MAX_VSIZE = 10000) +TRUC_CHILD_MAX_VSIZE = 1000) +) +def cleanup(extra_args=None):) + def decorator(func):) + def wrapper(self):) + try:) + if extra_args is not None:) + self.restart_node(0, extra_args=extra_args)) + func(self)) + finally:) + # Clear mempool again after test) + self.generate(self.nodes[0], 1)) + if extra_args is not None:) + self.restart_node(0)) + return wrapper) + return decorator) +) +class MempoolTRUC(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + self.extra_args = [[]]) + self.setup_clean_chain = True) +) + def check_mempool(self, txids):) + """Assert exact contents of the node's mempool (by txid).""") + mempool_contents = self.nodes[0].getrawmempool()) + assert_equal(len(txids), len(mempool_contents))) + assert all([txid in txids for txid in mempool_contents])) +) + @cleanup(extra_args=["-datacarriersize=20000"])) + def test_truc_max_vsize(self):) + node = self.nodes[0]) + self.log.info("Test TRUC-specific maximum transaction vsize")) + tx_v3_heavy = self.wallet.create_self_transfer(target_vsize=TRUC_MAX_VSIZE + 1, version=3)) + assert_greater_than_or_equal(tx_v3_heavy["tx"].get_vsize(), TRUC_MAX_VSIZE)) + expected_error_heavy = f"TRUC-violation, version=3 tx {tx_v3_heavy['txid']} (wtxid={tx_v3_heavy['wtxid']}) is too big") + assert_raises_rpc_error(-26, expected_error_heavy, node.sendrawtransaction, tx_v3_heavy["hex"])) + self.check_mempool([])) +) + # Ensure we are hitting the TRUC-specific limit and not something else) + tx_v2_heavy = self.wallet.send_self_transfer(from_node=node, target_vsize=TRUC_MAX_VSIZE + 1, version=2)) + self.check_mempool([tx_v2_heavy["txid"]])) +) + @cleanup(extra_args=["-datacarriersize=1000"])) + def test_truc_acceptance(self):) + node = self.nodes[0]) + self.log.info("Test a child of a TRUC transaction cannot be more than 1000vB")) + tx_v3_parent_normal = self.wallet.send_self_transfer(from_node=node, version=3)) + self.check_mempool([tx_v3_parent_normal["txid"]])) + tx_v3_child_heavy = self.wallet.create_self_transfer() + utxo_to_spend=tx_v3_parent_normal["new_utxo"],) + target_vsize=TRUC_CHILD_MAX_VSIZE + 1,) + version=3) + )) + assert_greater_than_or_equal(tx_v3_child_heavy["tx"].get_vsize(), TRUC_CHILD_MAX_VSIZE)) + expected_error_child_heavy = f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big") + assert_raises_rpc_error(-26, expected_error_child_heavy, node.sendrawtransaction, tx_v3_child_heavy["hex"])) + self.check_mempool([tx_v3_parent_normal["txid"]])) + # tx has no descendants) + assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 1)) +) + self.log.info("Test that, during replacements, only the new transaction counts for TRUC descendant limit")) + tx_v3_child_almost_heavy = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE,) + utxo_to_spend=tx_v3_parent_normal["new_utxo"],) + target_vsize=TRUC_CHILD_MAX_VSIZE - 3,) + version=3) + )) + assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_almost_heavy["tx"].get_vsize())) + self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy["txid"]])) + assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2)) + tx_v3_child_almost_heavy_rbf = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE * 2,) + utxo_to_spend=tx_v3_parent_normal["new_utxo"],) + target_vsize=875,) + version=3) + )) + assert_greater_than_or_equal(tx_v3_child_almost_heavy["tx"].get_vsize() + tx_v3_child_almost_heavy_rbf["tx"].get_vsize(),) + TRUC_CHILD_MAX_VSIZE)) + self.check_mempool([tx_v3_parent_normal["txid"], tx_v3_child_almost_heavy_rbf["txid"]])) + assert_equal(node.getmempoolentry(tx_v3_parent_normal["txid"])["descendantcount"], 2)) +) + @cleanup(extra_args=None)) + def test_truc_replacement(self):) + node = self.nodes[0]) + self.log.info("Test TRUC transactions may be replaced by TRUC transactions")) + utxo_v3_bip125 = self.wallet.get_utxo()) + tx_v3_bip125 = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE,) + utxo_to_spend=utxo_v3_bip125,) + version=3) + )) + self.check_mempool([tx_v3_bip125["txid"]])) +) + tx_v3_bip125_rbf = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE * 2,) + utxo_to_spend=utxo_v3_bip125,) + version=3) + )) + self.check_mempool([tx_v3_bip125_rbf["txid"]])) +) + self.log.info("Test TRUC transactions may be replaced by non-TRUC (BIP125) transactions")) + tx_v3_bip125_rbf_v2 = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE * 3,) + utxo_to_spend=utxo_v3_bip125,) + version=2) + )) + self.check_mempool([tx_v3_bip125_rbf_v2["txid"]])) +) + self.log.info("Test that replacements cannot cause violation of inherited TRUC")) + utxo_v3_parent = self.wallet.get_utxo()) + tx_v3_parent = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE,) + utxo_to_spend=utxo_v3_parent,) + version=3) + )) + tx_v3_child = self.wallet.send_self_transfer() + from_node=node,) + fee_rate=DEFAULT_FEE,) + utxo_to_spend=tx_v3_parent["new_utxo"],) + version=3) + )) + self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]])) +) + tx_v3_child_rbf_v2 = self.wallet.create_self_transfer() + fee_rate=DEFAULT_FEE * 2,) + utxo_to_spend=tx_v3_parent["new_utxo"],) + version=2) + )) + expected_error_v2_v3 = f"TRUC-violation, non-version=3 tx {tx_v3_child_rbf_v2['txid']} (wtxid={tx_v3_child_rbf_v2['wtxid']}) cannot spend from version=3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})") + assert_raises_rpc_error(-26, expected_error_v2_v3, node.sendrawtransaction, tx_v3_child_rbf_v2["hex"])) + self.check_mempool([tx_v3_bip125_rbf_v2["txid"], tx_v3_parent["txid"], tx_v3_child["txid"]])) +) +) + @cleanup(extra_args=["-datacarriersize=40000"])) + def test_truc_reorg(self):) + node = self.nodes[0]) + self.log.info("Test that, during a reorg, TRUC rules are not enforced")) + tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2)) + tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3)) + tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3)) + self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"]])) +) + block = self.generate(node, 1)) + self.check_mempool([])) + tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2)) + tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3)) + tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_vsize=1250, version=3)) + assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], TRUC_CHILD_MAX_VSIZE)) + self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])) + node.invalidateblock(block[0])) + self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])) + # This is needed because generate() will create the exact same block again.) + node.reconsiderblock(block[0])) +) +) + @cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000"])) + def test_nondefault_package_limits(self):) + """) + Max standard tx size + TRUC rules imply the ancestor/descendant rules (at their default) + values), but those checks must not be skipped. Ensure both sets of checks are done by) + changing the ancestor/descendant limit configurations.) + """) + node = self.nodes[0]) + self.log.info("Test that a decreased limitdescendantsize also applies to TRUC child")) + parent_target_vsize = 9990) + child_target_vsize = 500) + tx_v3_parent_large1 = self.wallet.send_self_transfer() + from_node=node,) + target_vsize=parent_target_vsize,) + version=3) + )) + tx_v3_child_large1 = self.wallet.create_self_transfer() + utxo_to_spend=tx_v3_parent_large1["new_utxo"],) + target_vsize=child_target_vsize,) + version=3) + )) +) + # Parent and child are within v3 limits, but parent's 10kvB descendant limit is exceeded) + assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large1["tx"].get_vsize())) + assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large1["tx"].get_vsize())) + assert_greater_than(tx_v3_parent_large1["tx"].get_vsize() + tx_v3_child_large1["tx"].get_vsize(), 10000)) +) + assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds descendant size limit for tx {tx_v3_parent_large1['txid']}", node.sendrawtransaction, tx_v3_child_large1["hex"])) + self.check_mempool([tx_v3_parent_large1["txid"]])) + assert_equal(node.getmempoolentry(tx_v3_parent_large1["txid"])["descendantcount"], 1)) + self.generate(node, 1)) +) + self.log.info("Test that a decreased limitancestorsize also applies to v3 parent")) + self.restart_node(0, extra_args=["-limitancestorsize=10", "-datacarriersize=40000"])) + tx_v3_parent_large2 = self.wallet.send_self_transfer() + from_node=node,) + target_vsize=parent_target_vsize,) + version=3) + )) + tx_v3_child_large2 = self.wallet.create_self_transfer() + utxo_to_spend=tx_v3_parent_large2["new_utxo"],) + target_vsize=child_target_vsize,) + version=3) + )) +) + # Parent and child are within TRUC limits) + assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large2["tx"].get_vsize())) + assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large2["tx"].get_vsize())) + assert_greater_than(tx_v3_parent_large2["tx"].get_vsize() + tx_v3_child_large2["tx"].get_vsize(), 10000)) +) + assert_raises_rpc_error(-26, f"too-long-mempool-chain, exceeds ancestor size limit", node.sendrawtransaction, tx_v3_child_large2["hex"])) + self.check_mempool([tx_v3_parent_large2["txid"]])) +) + @cleanup(extra_args=["-datacarriersize=1000"])) + def test_truc_ancestors_package(self):) + self.log.info("Test that TRUC ancestor limits are checked within the package")) + node = self.nodes[0]) + tx_v3_parent_normal = self.wallet.create_self_transfer() + fee_rate=0,) + target_vsize=1001,) + version=3) + )) + tx_v3_parent_2_normal = self.wallet.create_self_transfer() + fee_rate=0,) + target_vsize=1001,) + version=3) + )) + tx_v3_child_multiparent = self.wallet.create_self_transfer_multi() + utxos_to_spend=[tx_v3_parent_normal["new_utxo"], tx_v3_parent_2_normal["new_utxo"]],) + fee_per_output=10000,) + version=3) + )) + tx_v3_child_heavy = self.wallet.create_self_transfer_multi() + utxos_to_spend=[tx_v3_parent_normal["new_utxo"]],) + target_vsize=TRUC_CHILD_MAX_VSIZE + 1,) + fee_per_output=10000,) + version=3) + )) +) + self.check_mempool([])) + result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_parent_2_normal["hex"], tx_v3_child_multiparent["hex"]])) + assert_equal(result['package_msg'], f"TRUC-violation, tx {tx_v3_child_multiparent['txid']} (wtxid={tx_v3_child_multiparent['wtxid']}) would have too many ancestors")) + self.check_mempool([])) +) + self.check_mempool([])) + result = node.submitpackage([tx_v3_parent_normal["hex"], tx_v3_child_heavy["hex"]])) + # tx_v3_child_heavy is heavy based on vsize, not sigops.) + assert_equal(result['package_msg'], f"TRUC-violation, version=3 child tx {tx_v3_child_heavy['txid']} (wtxid={tx_v3_child_heavy['wtxid']}) is too big: {tx_v3_child_heavy['tx'].get_vsize()} > 1000 virtual bytes")) + self.check_mempool([])) +) + tx_v3_parent = self.wallet.create_self_transfer(version=3)) + tx_v3_child = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxo"], version=3)) + tx_v3_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_child["new_utxo"], version=3)) + result = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child["hex"], tx_v3_grandchild["hex"]])) + assert all([txresult["package-error"] == f"TRUC-violation, tx {tx_v3_grandchild['txid']} (wtxid={tx_v3_grandchild['wtxid']}) would have too many ancestors" for txresult in result])) +) + @cleanup(extra_args=None)) + def test_truc_ancestors_package_and_mempool(self):) + """) + A TRUC transaction in a package cannot have 2 TRUC parents.) + Test that if we have a transaction graph A -> B -> C, where A, B, C are) + all TRUC transactions, that we cannot use submitpackage to get the) + transactions all into the mempool.) +) + Verify, in particular, that if A is already in the mempool, then) + submitpackage(B, C) will fail.) + """) + node = self.nodes[0]) + self.log.info("Test that TRUC ancestor limits include transactions within the package and all in-mempool ancestors")) + # This is our transaction "A":) + tx_in_mempool = self.wallet.send_self_transfer(from_node=node, version=3)) +) + # Verify that A is in the mempool) + self.check_mempool([tx_in_mempool["txid"]])) +) + # tx_0fee_parent is our transaction "B"; just create it.) + tx_0fee_parent = self.wallet.create_self_transfer(utxo_to_spend=tx_in_mempool["new_utxo"], fee=0, fee_rate=0, version=3)) +) + # tx_child_violator is our transaction "C"; create it:) + tx_child_violator = self.wallet.create_self_transfer_multi(utxos_to_spend=[tx_0fee_parent["new_utxo"]], version=3)) +) + # submitpackage(B, C) should fail) + result = node.submitpackage([tx_0fee_parent["hex"], tx_child_violator["hex"]])) + assert_equal(result['package_msg'], f"TRUC-violation, tx {tx_child_violator['txid']} (wtxid={tx_child_violator['wtxid']}) would have too many ancestors")) + self.check_mempool([tx_in_mempool["txid"]])) +) + @cleanup(extra_args=None)) + def test_sibling_eviction_package(self):) + """) + When a transaction has a mempool sibling, it may be eligible for sibling eviction.) + However, this option is only available in single transaction acceptance. It doesn't work in) + a multi-testmempoolaccept (where RBF is disabled) or when doing package CPFP.) + """) + self.log.info("Test TRUC sibling eviction in submitpackage and multi-testmempoolaccept")) + node = self.nodes[0]) + # Add a parent + child to mempool) + tx_mempool_parent = self.wallet.send_self_transfer_multi() + from_node=node,) + utxos_to_spend=[self.wallet.get_utxo()],) + num_outputs=2,) + version=3) + )) + tx_mempool_sibling = self.wallet.send_self_transfer() + from_node=node,) + utxo_to_spend=tx_mempool_parent["new_utxos"][0],) + version=3) + )) + self.check_mempool([tx_mempool_parent["txid"], tx_mempool_sibling["txid"]])) +) + tx_sibling_1 = self.wallet.create_self_transfer() + utxo_to_spend=tx_mempool_parent["new_utxos"][1],) + version=3,) + fee_rate=DEFAULT_FEE*100,) + )) + tx_has_mempool_uncle = self.wallet.create_self_transfer(utxo_to_spend=tx_sibling_1["new_utxo"], version=3)) +) + tx_sibling_2 = self.wallet.create_self_transfer() + utxo_to_spend=tx_mempool_parent["new_utxos"][0],) + version=3,) + fee_rate=DEFAULT_FEE*200,) + )) +) + tx_sibling_3 = self.wallet.create_self_transfer() + utxo_to_spend=tx_mempool_parent["new_utxos"][1],) + version=3,) + fee_rate=0,) + )) + tx_bumps_parent_with_sibling = self.wallet.create_self_transfer() + utxo_to_spend=tx_sibling_3["new_utxo"],) + version=3,) + fee_rate=DEFAULT_FEE*300,) + )) +) + # Fails with another non-related transaction via testmempoolaccept) + tx_unrelated = self.wallet.create_self_transfer(version=3)) + result_test_unrelated = node.testmempoolaccept([tx_sibling_1["hex"], tx_unrelated["hex"]])) + assert_equal(result_test_unrelated[0]["reject-reason"], "TRUC-violation")) +) + # Fails in a package via testmempoolaccept) + result_test_1p1c = node.testmempoolaccept([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]])) + assert_equal(result_test_1p1c[0]["reject-reason"], "TRUC-violation")) +) + # Allowed when tx is submitted in a package and evaluated individually.) + # Note that the child failed since it would be the 3rd generation.) + result_package_indiv = node.submitpackage([tx_sibling_1["hex"], tx_has_mempool_uncle["hex"]])) + self.check_mempool([tx_mempool_parent["txid"], tx_sibling_1["txid"]])) + expected_error_gen3 = f"TRUC-violation, tx {tx_has_mempool_uncle['txid']} (wtxid={tx_has_mempool_uncle['wtxid']}) would have too many ancestors") +) + assert_equal(result_package_indiv["tx-results"][tx_has_mempool_uncle['wtxid']]['error'], expected_error_gen3)) +) + # Allowed when tx is submitted in a package with in-mempool parent (which is deduplicated).) + node.submitpackage([tx_mempool_parent["hex"], tx_sibling_2["hex"]])) + self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]])) +) + # Child cannot pay for sibling eviction for parent, as it violates TRUC topology limits) + result_package_cpfp = node.submitpackage([tx_sibling_3["hex"], tx_bumps_parent_with_sibling["hex"]])) + self.check_mempool([tx_mempool_parent["txid"], tx_sibling_2["txid"]])) + expected_error_cpfp = f"TRUC-violation, tx {tx_mempool_parent['txid']} (wtxid={tx_mempool_parent['wtxid']}) would exceed descendant count limit") +) + assert_equal(result_package_cpfp["tx-results"][tx_sibling_3['wtxid']]['error'], expected_error_cpfp)) +) +) + @cleanup(extra_args=["-datacarriersize=1000"])) + def test_truc_package_inheritance(self):) + self.log.info("Test that TRUC inheritance is checked within package")) + node = self.nodes[0]) + tx_v3_parent = self.wallet.create_self_transfer() + fee_rate=0,) + target_vsize=1001,) + version=3) + )) + tx_v2_child = self.wallet.create_self_transfer_multi() + utxos_to_spend=[tx_v3_parent["new_utxo"]],) + fee_per_output=10000,) + version=2) + )) + self.check_mempool([])) + result = node.submitpackage([tx_v3_parent["hex"], tx_v2_child["hex"]])) + assert_equal(result['package_msg'], f"TRUC-violation, non-version=3 tx {tx_v2_child['txid']} (wtxid={tx_v2_child['wtxid']}) cannot spend from version=3 tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']})")) + self.check_mempool([])) +) + @cleanup(extra_args=None)) + def test_truc_in_testmempoolaccept(self):) + node = self.nodes[0]) +) + self.log.info("Test that TRUC inheritance is accurately assessed in testmempoolaccept")) + tx_v2 = self.wallet.create_self_transfer(version=2)) + tx_v2_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=2)) + tx_v3_from_v2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v2["new_utxo"], version=3)) + tx_v3 = self.wallet.create_self_transfer(version=3)) + tx_v2_from_v3 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3["new_utxo"], version=2)) + tx_v3_from_v3 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3["new_utxo"], version=3)) +) + # testmempoolaccept paths don't require child-with-parents topology. Ensure that topology) + # assumptions aren't made in inheritance checks.) + test_accept_v2_and_v3 = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"]])) + assert all([result["allowed"] for result in test_accept_v2_and_v3])) +) + test_accept_v3_from_v2 = node.testmempoolaccept([tx_v2["hex"], tx_v3_from_v2["hex"]])) + expected_error_v3_from_v2 = f"TRUC-violation, version=3 tx {tx_v3_from_v2['txid']} (wtxid={tx_v3_from_v2['wtxid']}) cannot spend from non-version=3 tx {tx_v2['txid']} (wtxid={tx_v2['wtxid']})") + assert all([result["package-error"] == expected_error_v3_from_v2 for result in test_accept_v3_from_v2])) +) + test_accept_v2_from_v3 = node.testmempoolaccept([tx_v3["hex"], tx_v2_from_v3["hex"]])) + expected_error_v2_from_v3 = f"TRUC-violation, non-version=3 tx {tx_v2_from_v3['txid']} (wtxid={tx_v2_from_v3['wtxid']}) cannot spend from version=3 tx {tx_v3['txid']} (wtxid={tx_v3['wtxid']})") + assert all([result["package-error"] == expected_error_v2_from_v3 for result in test_accept_v2_from_v3])) +) + test_accept_pairs = node.testmempoolaccept([tx_v2["hex"], tx_v3["hex"], tx_v2_from_v2["hex"], tx_v3_from_v3["hex"]])) + assert all([result["allowed"] for result in test_accept_pairs])) +) + self.log.info("Test that descendant violations are caught in testmempoolaccept")) + tx_v3_independent = self.wallet.create_self_transfer(version=3)) + tx_v3_parent = self.wallet.create_self_transfer_multi(num_outputs=2, version=3)) + tx_v3_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][0], version=3)) + tx_v3_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_v3_parent["new_utxos"][1], version=3)) + test_accept_2children = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"]])) + expected_error_2children = f"TRUC-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit") + assert all([result["package-error"] == expected_error_2children for result in test_accept_2children])) +) + # Extra TRUC transaction does not get incorrectly marked as extra descendant) + test_accept_1child_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_independent["hex"]])) + assert all([result["allowed"] for result in test_accept_1child_with_exra])) +) + # Extra TRUC transaction does not make us ignore the extra descendant) + test_accept_2children_with_exra = node.testmempoolaccept([tx_v3_parent["hex"], tx_v3_child_1["hex"], tx_v3_child_2["hex"], tx_v3_independent["hex"]])) + expected_error_extra = f"TRUC-violation, tx {tx_v3_parent['txid']} (wtxid={tx_v3_parent['wtxid']}) would exceed descendant count limit") + assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_exra])) + # Same result if the parent is already in mempool) + node.sendrawtransaction(tx_v3_parent["hex"])) + test_accept_2children_with_in_mempool_parent = node.testmempoolaccept([tx_v3_child_1["hex"], tx_v3_child_2["hex"]])) + assert all([result["package-error"] == expected_error_extra for result in test_accept_2children_with_in_mempool_parent])) +) + @cleanup(extra_args=None)) + def test_reorg_2child_rbf(self):) + node = self.nodes[0]) + self.log.info("Test that children of a TRUC transaction can be replaced individually, even if there are multiple due to reorg")) +) + ancestor_tx = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3)) + self.check_mempool([ancestor_tx["txid"]])) +) + block = self.generate(node, 1)[0]) + self.check_mempool([])) +) + child_1 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][0])) + child_2 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][1])) + self.check_mempool([child_1["txid"], child_2["txid"]])) +) + self.generate(node, 1)) + self.check_mempool([])) +) + # Create a reorg, causing ancestor_tx to exceed the 1-child limit) + node.invalidateblock(block)) + self.check_mempool([ancestor_tx["txid"], child_1["txid"], child_2["txid"]])) + assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3)) +) + # Create a replacement of child_1. It does not conflict with child_2.) + child_1_conflict = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=ancestor_tx["new_utxos"][0], fee_rate=Decimal("0.01"))) +) + # Ensure child_1 and child_1_conflict are different transactions) + assert_not_equal(child_1_conflict["txid"], child_1["txid"])) + self.check_mempool([ancestor_tx["txid"], child_1_conflict["txid"], child_2["txid"]])) + assert_equal(node.getmempoolentry(ancestor_tx["txid"])["descendantcount"], 3)) +) + @cleanup(extra_args=None)) + def test_truc_sibling_eviction(self):) + self.log.info("Test sibling eviction for TRUC")) + node = self.nodes[0]) + tx_v3_parent = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=2, version=3)) + # This is the sibling to replace) + tx_v3_child_1 = self.wallet.send_self_transfer() + from_node=node, utxo_to_spend=tx_v3_parent["new_utxos"][0], fee_rate=DEFAULT_FEE * 2, version=3) + )) + assert tx_v3_child_1["txid"] in node.getrawmempool()) +) + self.log.info("Test tx must be higher feerate than sibling to evict it")) + tx_v3_child_2_rule6 = self.wallet.create_self_transfer() + utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE, version=3) + )) + rule6_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule6['txid']}; new feerate") + assert_raises_rpc_error(-26, rule6_str, node.sendrawtransaction, tx_v3_child_2_rule6["hex"])) + self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']])) +) + self.log.info("Test tx must meet absolute fee rules to evict sibling")) + tx_v3_child_2_rule4 = self.wallet.create_self_transfer() + utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=2 * DEFAULT_FEE + Decimal("0.00000001"), version=3) + )) + rule4_str = f"insufficient fee (including sibling eviction), rejecting replacement {tx_v3_child_2_rule4['txid']}, not enough additional fees to relay") + assert_raises_rpc_error(-26, rule4_str, node.sendrawtransaction, tx_v3_child_2_rule4["hex"])) + self.check_mempool([tx_v3_parent['txid'], tx_v3_child_1['txid']])) +) + self.log.info("Test tx cannot cause more than 100 evictions including RBF and sibling eviction")) + # First add 4 groups of 25 transactions.) + utxos_for_conflict = []) + txids_v2_100 = []) + for _ in range(4):) + confirmed_utxo = self.wallet.get_utxo(confirmed_only=True)) + utxos_for_conflict.append(confirmed_utxo)) + # 25 is within descendant limits) + chain_length = int(MAX_REPLACEMENT_CANDIDATES / 4)) + chain = self.wallet.create_self_transfer_chain(chain_length=chain_length, utxo_to_spend=confirmed_utxo)) + for item in chain:) + txids_v2_100.append(item["txid"])) + node.sendrawtransaction(item["hex"])) + self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]])) +) + # Replacing 100 transactions is fine) + tx_v3_replacement_only = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000)) + # Override maxfeerate - it costs a lot to replace these 100 transactions.) + assert node.testmempoolaccept([tx_v3_replacement_only["hex"]], maxfeerate=0)[0]["allowed"]) + # Adding another one exceeds the limit.) + utxos_for_conflict.append(tx_v3_parent["new_utxos"][1])) + tx_v3_child_2_rule5 = self.wallet.create_self_transfer_multi(utxos_to_spend=utxos_for_conflict, fee_per_output=4000000, version=3)) + rule5_str = f"too many potential replacements (including sibling eviction), rejecting replacement {tx_v3_child_2_rule5['txid']}; too many potential replacements (101 > 100)") + assert_raises_rpc_error(-26, rule5_str, node.sendrawtransaction, tx_v3_child_2_rule5["hex"])) + self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_1["txid"]])) +) + self.log.info("Test sibling eviction is successful if it meets all RBF rules")) + tx_v3_child_2 = self.wallet.create_self_transfer() + utxo_to_spend=tx_v3_parent["new_utxos"][1], fee_rate=DEFAULT_FEE*10, version=3) + )) + node.sendrawtransaction(tx_v3_child_2["hex"])) + self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_2["txid"]])) +) + self.log.info("Test that it's possible to do a sibling eviction and RBF at the same time")) + utxo_unrelated_conflict = self.wallet.get_utxo(confirmed_only=True)) + tx_unrelated_replacee = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=utxo_unrelated_conflict)) + assert tx_unrelated_replacee["txid"] in node.getrawmempool()) +) + fee_to_beat = max(int(tx_v3_child_2["fee"] * COIN), int(tx_unrelated_replacee["fee"]*COIN))) +) + tx_v3_child_3 = self.wallet.create_self_transfer_multi() + utxos_to_spend=[tx_v3_parent["new_utxos"][0], utxo_unrelated_conflict], fee_per_output=fee_to_beat*2, version=3) + )) + node.sendrawtransaction(tx_v3_child_3["hex"])) + self.check_mempool(txids_v2_100 + [tx_v3_parent["txid"], tx_v3_child_3["txid"]])) +) + @cleanup(extra_args=None)) + def test_reorg_sibling_eviction_1p2c(self):) + node = self.nodes[0]) + self.log.info("Test that sibling eviction is not allowed when multiple siblings exist")) +) + tx_with_multi_children = self.wallet.send_self_transfer_multi(from_node=node, num_outputs=3, version=3, confirmed_only=True)) + self.check_mempool([tx_with_multi_children["txid"]])) +) + block_to_disconnect = self.generate(node, 1)[0]) + self.check_mempool([])) +) + tx_with_sibling1 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=tx_with_multi_children["new_utxos"][0])) + tx_with_sibling2 = self.wallet.send_self_transfer(from_node=node, version=3, utxo_to_spend=tx_with_multi_children["new_utxos"][1])) + self.check_mempool([tx_with_sibling1["txid"], tx_with_sibling2["txid"]])) +) + # Create a reorg, bringing tx_with_multi_children back into the mempool with a descendant count of 3.) + node.invalidateblock(block_to_disconnect)) + self.check_mempool([tx_with_multi_children["txid"], tx_with_sibling1["txid"], tx_with_sibling2["txid"]])) + assert_equal(node.getmempoolentry(tx_with_multi_children["txid"])["descendantcount"], 3)) +) + # Sibling eviction is not allowed because there are two siblings) + tx_with_sibling3 = self.wallet.create_self_transfer() + version=3,) + utxo_to_spend=tx_with_multi_children["new_utxos"][2],) + fee_rate=DEFAULT_FEE*50) + )) + expected_error_2siblings = f"TRUC-violation, tx {tx_with_multi_children['txid']} (wtxid={tx_with_multi_children['wtxid']}) would exceed descendant count limit") + assert_raises_rpc_error(-26, expected_error_2siblings, node.sendrawtransaction, tx_with_sibling3["hex"])) +) + # However, an RBF (with conflicting inputs) is possible even if the resulting cluster size exceeds 2) + tx_with_sibling3_rbf = self.wallet.send_self_transfer() + from_node=node,) + version=3,) + utxo_to_spend=tx_with_multi_children["new_utxos"][0],) + fee_rate=DEFAULT_FEE*50) + )) + self.check_mempool([tx_with_multi_children["txid"], tx_with_sibling3_rbf["txid"], tx_with_sibling2["txid"]])) +) +) + def run_test(self):) + self.log.info("Generate blocks to create UTXOs")) + node = self.nodes[0]) + self.wallet = MiniWallet(node)) + self.generate(self.wallet, 120)) + self.test_truc_max_vsize()) + self.test_truc_acceptance()) + self.test_truc_replacement()) + self.test_truc_reorg()) + self.test_nondefault_package_limits()) + self.test_truc_ancestors_package()) + self.test_truc_ancestors_package_and_mempool()) + self.test_sibling_eviction_package()) + self.test_truc_package_inheritance()) + self.test_truc_in_testmempoolaccept()) + self.test_reorg_2child_rbf()) + self.test_truc_sibling_eviction()) + self.test_reorg_sibling_eviction_1p2c()) +) +) +if __name__ == "__main__":) + MempoolTRUC(__file__).main()) diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index c83d5c05a4b325..e7f85c3f9ccce7 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -1,309 +1,309 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the prioritisetransaction mining RPC.""" - -from decimal import Decimal -import time - -from test_framework.messages import ( - COIN, - MAX_BLOCK_WEIGHT, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_raises_rpc_error, - create_lots_of_big_transactions, - gen_return_txouts, -) -from test_framework.wallet import MiniWallet - - -class PrioritiseTransactionTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [[ - "-printpriority=1", - "-datacarriersize=100000", - ]] * self.num_nodes - self.supports_cli = False - - def clear_prioritisation(self, node): - for txid, info in node.getprioritisedtransactions().items(): - delta = info["fee_delta"] - node.prioritisetransaction(txid, 0, -delta) - assert_equal(node.getprioritisedtransactions(), {}) - - def test_replacement(self): - self.log.info("Test tx prioritisation stays after a tx is replaced") - conflicting_input = self.wallet.get_utxo() - tx_replacee = self.wallet.create_self_transfer(utxo_to_spend=conflicting_input, fee_rate=Decimal("0.0001")) - tx_replacement = self.wallet.create_self_transfer(utxo_to_spend=conflicting_input, fee_rate=Decimal("0.005")) - # Add 1 satoshi fee delta to replacee - self.nodes[0].prioritisetransaction(tx_replacee["txid"], 0, 100) - assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : 100, "in_mempool" : False}}) - self.nodes[0].sendrawtransaction(tx_replacee["hex"]) - assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : 100, "in_mempool" : True, "modified_fee": int(tx_replacee["fee"] * COIN + 100)}}) - self.nodes[0].sendrawtransaction(tx_replacement["hex"]) - assert tx_replacee["txid"] not in self.nodes[0].getrawmempool() - assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : 100, "in_mempool" : False}}) - - # PrioritiseTransaction is additive - self.nodes[0].prioritisetransaction(tx_replacee["txid"], 0, COIN) - self.nodes[0].sendrawtransaction(tx_replacee["hex"]) - assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : COIN + 100, "in_mempool" : True, "modified_fee": int(tx_replacee["fee"] * COIN + COIN + 100)}}) - self.generate(self.nodes[0], 1) - assert_equal(self.nodes[0].getprioritisedtransactions(), {}) - - def test_diamond(self): - self.log.info("Test diamond-shape package with priority") - mock_time = int(time.time()) - self.nodes[0].setmocktime(mock_time) - - # tx_a - # / \ - # / \ - # tx_b tx_c - # \ / - # \ / - # tx_d - - tx_o_a = self.wallet.send_self_transfer_multi( - from_node=self.nodes[0], - num_outputs=2, - ) - txid_a = tx_o_a["txid"] - - tx_o_b, tx_o_c = [self.wallet.send_self_transfer( - from_node=self.nodes[0], - utxo_to_spend=u, - ) for u in tx_o_a["new_utxos"]] - txid_b = tx_o_b["txid"] - txid_c = tx_o_c["txid"] - - tx_o_d = self.wallet.send_self_transfer_multi( - from_node=self.nodes[0], - utxos_to_spend=[ - self.wallet.get_utxo(txid=txid_b), - self.wallet.get_utxo(txid=txid_c), - ], - ) - txid_d = tx_o_d["txid"] - - self.log.info("Test priority while txs are in mempool") - raw_before = self.nodes[0].getrawmempool(verbose=True) - fee_delta_b = Decimal(9999) / COIN - fee_delta_c_1 = Decimal(-1234) / COIN - fee_delta_c_2 = Decimal(8888) / COIN - self.nodes[0].prioritisetransaction(txid=txid_b, fee_delta=int(fee_delta_b * COIN)) - self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_1 * COIN)) - self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_2 * COIN)) - raw_before[txid_a]["fees"]["descendant"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2 - raw_before[txid_b]["fees"]["modified"] += fee_delta_b - raw_before[txid_b]["fees"]["ancestor"] += fee_delta_b - raw_before[txid_b]["fees"]["descendant"] += fee_delta_b - raw_before[txid_c]["fees"]["modified"] += fee_delta_c_1 + fee_delta_c_2 - raw_before[txid_c]["fees"]["ancestor"] += fee_delta_c_1 + fee_delta_c_2 - raw_before[txid_c]["fees"]["descendant"] += fee_delta_c_1 + fee_delta_c_2 - raw_before[txid_d]["fees"]["ancestor"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2 - raw_after = self.nodes[0].getrawmempool(verbose=True) - assert_equal(raw_before[txid_a], raw_after[txid_a]) - assert_equal(raw_before, raw_after) - assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : True, "modified_fee": int(fee_delta_b*COIN + COIN * tx_o_b["fee"])}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : True, "modified_fee": int((fee_delta_c_1 + fee_delta_c_2 ) * COIN + COIN * tx_o_c["fee"])}}) - # Clear prioritisation, otherwise the transactions' fee deltas are persisted to mempool.dat and loaded again when the node - # is restarted at the end of this subtest. Deltas are removed when a transaction is mined, but only at that time. We do - # not check whether mapDeltas transactions were mined when loading from mempool.dat. - self.clear_prioritisation(node=self.nodes[0]) - - self.log.info("Test priority while txs are not in mempool") - self.restart_node(0, extra_args=["-nopersistmempool"]) - self.nodes[0].setmocktime(mock_time) - assert_equal(self.nodes[0].getmempoolinfo()["size"], 0) - self.nodes[0].prioritisetransaction(txid=txid_b, fee_delta=int(fee_delta_b * COIN)) - self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_1 * COIN)) - self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_2 * COIN)) - assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : False}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : False}}) - for t in [tx_o_a["hex"], tx_o_b["hex"], tx_o_c["hex"], tx_o_d["hex"]]: - self.nodes[0].sendrawtransaction(t) - raw_after = self.nodes[0].getrawmempool(verbose=True) - assert_equal(raw_before[txid_a], raw_after[txid_a]) - assert_equal(raw_before, raw_after) - assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : True, "modified_fee": int(fee_delta_b*COIN + COIN * tx_o_b["fee"])}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : True, "modified_fee": int((fee_delta_c_1 + fee_delta_c_2 ) * COIN + COIN * tx_o_c["fee"])}}) - - # Clear mempool - self.generate(self.nodes[0], 1) - # Prioritisation for transactions is automatically deleted after they are mined. - assert_equal(self.nodes[0].getprioritisedtransactions(), {}) - - # Use default extra_args - self.restart_node(0) - assert_equal(self.nodes[0].getprioritisedtransactions(), {}) - - def run_test(self): - self.wallet = MiniWallet(self.nodes[0]) - - # Test `prioritisetransaction` required parameters - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction) - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '') - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0) - - # Test `prioritisetransaction` invalid extra parameters - assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0) - - # Test `getprioritisedtransactions` invalid parameters - assert_raises_rpc_error(-1, "getprioritisedtransactions", - self.nodes[0].getprioritisedtransactions, True) - - # Test `prioritisetransaction` invalid `txid` - assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0) - assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0) - - # Test `prioritisetransaction` invalid `dummy` - txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' - assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", self.nodes[0].prioritisetransaction, txid, 'foo', 0) - assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0) - - # Test `prioritisetransaction` invalid `fee_delta` - assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo') - - self.test_replacement() - self.test_diamond() - - self.txouts = gen_return_txouts() - self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] - - utxo_count = 90 - utxos = self.wallet.send_self_transfer_multi(from_node=self.nodes[0], num_outputs=utxo_count)['new_utxos'] - self.generate(self.wallet, 1) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - - base_fee = self.relayfee*100 # our transactions are smaller than 100kb - txids = [] - - # Create 3 batches of transactions at 3 different fee rate levels - range_size = utxo_count // 3 - for i in range(3): - txids.append([]) - start_range = i * range_size - end_range = start_range + range_size - txids[i] = create_lots_of_big_transactions( - self.wallet, - self.nodes[0], - (i+1) * base_fee, - end_range - start_range, - self.txouts, - utxos[start_range:end_range]) - - # Make sure that the size of each group of transactions exceeds - # MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to - # create more transactions. - mempool = self.nodes[0].getrawmempool(True) - sizes = [0, 0, 0] - for i in range(3): - for j in txids[i]: - assert j in mempool - sizes[i] += mempool[j]['vsize'] - assert sizes[i] > MAX_BLOCK_WEIGHT // 4 # Fail => raise utxo_count - - assert_equal(self.nodes[0].getprioritisedtransactions(), {}) - # add a fee delta to something in the cheapest bucket and make sure it gets mined - # also check that a different entry in the cheapest bucket is NOT mined - self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN)) - assert_equal(self.nodes[0].getprioritisedtransactions(), {txids[0][0] : { "fee_delta" : 3*base_fee*COIN, "in_mempool" : True, "modified_fee": int(3*base_fee*COIN + COIN * 1 * base_fee)}}) - - # Priority disappears when prioritisetransaction is called with an inverse value... - self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(-3*base_fee*COIN)) - assert txids[0][0] not in self.nodes[0].getprioritisedtransactions() - # ... and reappears when prioritisetransaction is called again. - self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN)) - assert txids[0][0] in self.nodes[0].getprioritisedtransactions() - - self.generate(self.nodes[0], 1) - - mempool = self.nodes[0].getrawmempool() - self.log.info("Assert that prioritised transaction was mined") - assert txids[0][0] not in mempool - assert txids[0][1] in mempool - - high_fee_tx = None - for x in txids[2]: - if x not in mempool: - high_fee_tx = x - - # Something high-fee should have been mined! - assert high_fee_tx is not None - - # Add a prioritisation before a tx is in the mempool (de-prioritising a - # high-fee transaction so that it's now low fee). - self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN)) - assert_equal(self.nodes[0].getprioritisedtransactions()[high_fee_tx], { "fee_delta" : -2*base_fee*COIN, "in_mempool" : False}) - - # Add everything back to mempool - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - - # Check to make sure our high fee rate tx is back in the mempool - mempool = self.nodes[0].getrawmempool() - assert high_fee_tx in mempool - - # Now verify the modified-high feerate transaction isn't mined before - # the other high fee transactions. Keep mining until our mempool has - # decreased by all the high fee size that we calculated above. - while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): - self.generate(self.nodes[0], 1, sync_fun=self.no_op) - - # High fee transaction should not have been mined, but other high fee rate - # transactions should have been. - mempool = self.nodes[0].getrawmempool() - self.log.info("Assert that de-prioritised transaction is still in mempool") - assert high_fee_tx in mempool - assert_equal(self.nodes[0].getprioritisedtransactions()[high_fee_tx], { "fee_delta" : -2*base_fee*COIN, "in_mempool" : True, "modified_fee": int(-2*base_fee*COIN + COIN * 3 * base_fee)}) - for x in txids[2]: - if (x != high_fee_tx): - assert x not in mempool - - - self.log.info("Assert that 0 delta is never added to mapDeltas") - tx_id_zero_del = self.wallet.create_self_transfer()['txid'] - self.nodes[0].prioritisetransaction(txid=tx_id_zero_del, fee_delta=0) - assert tx_id_zero_del not in self.nodes[0].getprioritisedtransactions() - - # Create a free transaction. Should be rejected. - tx_res = self.wallet.create_self_transfer(fee_rate=0) - tx_hex = tx_res['hex'] - tx_id = tx_res['txid'] - - # This will raise an exception due to min relay fee not being met - assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex) - assert tx_id not in self.nodes[0].getrawmempool() - - # This is a less than 1000-byte transaction, so just set the fee - # to be the minimum for a 1000-byte transaction and check that it is - # accepted. - self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN)) - assert_equal(self.nodes[0].getprioritisedtransactions()[tx_id], { "fee_delta" : self.relayfee*COIN, "in_mempool" : False}) - - self.log.info("Assert that prioritised free transaction is accepted to mempool") - assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id) - assert tx_id in self.nodes[0].getrawmempool() - assert_equal(self.nodes[0].getprioritisedtransactions()[tx_id], { "fee_delta" : self.relayfee*COIN, "in_mempool" : True, "modified_fee": int(self.relayfee*COIN + COIN * tx_res["fee"])}) - - # Test that calling prioritisetransaction is sufficient to trigger - # getblocktemplate to (eventually) return a new block. - mock_time = int(time.time()) - self.nodes[0].setmocktime(mock_time) - template = self.nodes[0].getblocktemplate({'rules': ['segwit']}) - self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN)) - - # Calling prioritisetransaction with the inverse amount should delete its prioritisation entry - assert tx_id not in self.nodes[0].getprioritisedtransactions() - - self.nodes[0].setmocktime(mock_time+10) - new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']}) - - assert template != new_template - -if __name__ == '__main__': - PrioritiseTransactionTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2015-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the prioritisetransaction mining RPC.""") +) +from decimal import Decimal) +import time) +) +from test_framework.messages import () + COIN,) + MAX_BLOCK_WEIGHT,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_raises_rpc_error,) + create_lots_of_big_transactions,) + gen_return_txouts,) +)) +from test_framework.wallet import MiniWallet) +) +) +class PrioritiseTransactionTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + self.extra_args = [[) + "-printpriority=1",) + "-datacarriersize=100000",) + ]] * self.num_nodes) + self.supports_cli = False) +) + def clear_prioritisation(self, node):) + for txid, info in node.getprioritisedtransactions().items():) + delta = info["fee_delta"]) + node.prioritisetransaction(txid, 0, -delta)) + assert_equal(node.getprioritisedtransactions(), {})) +) + def test_replacement(self):) + self.log.info("Test tx prioritisation stays after a tx is replaced")) + conflicting_input = self.wallet.get_utxo()) + tx_replacee = self.wallet.create_self_transfer(utxo_to_spend=conflicting_input, fee_rate=Decimal("0.0001"))) + tx_replacement = self.wallet.create_self_transfer(utxo_to_spend=conflicting_input, fee_rate=Decimal("0.005"))) + # Add 1 satoshi fee delta to replacee) + self.nodes[0].prioritisetransaction(tx_replacee["txid"], 0, 100)) + assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : 100, "in_mempool" : False}})) + self.nodes[0].sendrawtransaction(tx_replacee["hex"])) + assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : 100, "in_mempool" : True, "modified_fee": int(tx_replacee["fee"] * COIN + 100)}})) + self.nodes[0].sendrawtransaction(tx_replacement["hex"])) + assert tx_replacee["txid"] not in self.nodes[0].getrawmempool()) + assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : 100, "in_mempool" : False}})) +) + # PrioritiseTransaction is additive) + self.nodes[0].prioritisetransaction(tx_replacee["txid"], 0, COIN)) + self.nodes[0].sendrawtransaction(tx_replacee["hex"])) + assert_equal(self.nodes[0].getprioritisedtransactions(), { tx_replacee["txid"] : { "fee_delta" : COIN + 100, "in_mempool" : True, "modified_fee": int(tx_replacee["fee"] * COIN + COIN + 100)}})) + self.generate(self.nodes[0], 1)) + assert_equal(self.nodes[0].getprioritisedtransactions(), {})) +) + def test_diamond(self):) + self.log.info("Test diamond-shape package with priority")) + mock_time = int(time.time())) + self.nodes[0].setmocktime(mock_time)) +) + # tx_a) + # / \) + # / \) + # tx_b tx_c) + # \ /) + # \ /) + # tx_d) +) + tx_o_a = self.wallet.send_self_transfer_multi() + from_node=self.nodes[0],) + num_outputs=2,) + )) + txid_a = tx_o_a["txid"]) +) + tx_o_b, tx_o_c = [self.wallet.send_self_transfer() + from_node=self.nodes[0],) + utxo_to_spend=u,) + ) for u in tx_o_a["new_utxos"]]) + txid_b = tx_o_b["txid"]) + txid_c = tx_o_c["txid"]) +) + tx_o_d = self.wallet.send_self_transfer_multi() + from_node=self.nodes[0],) + utxos_to_spend=[) + self.wallet.get_utxo(txid=txid_b),) + self.wallet.get_utxo(txid=txid_c),) + ],) + )) + txid_d = tx_o_d["txid"]) +) + self.log.info("Test priority while txs are in mempool")) + raw_before = self.nodes[0].getrawmempool(verbose=True)) + fee_delta_b = Decimal(9999) / COIN) + fee_delta_c_1 = Decimal(-1234) / COIN) + fee_delta_c_2 = Decimal(8888) / COIN) + self.nodes[0].prioritisetransaction(txid=txid_b, fee_delta=int(fee_delta_b * COIN))) + self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_1 * COIN))) + self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_2 * COIN))) + raw_before[txid_a]["fees"]["descendant"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2) + raw_before[txid_b]["fees"]["modified"] += fee_delta_b) + raw_before[txid_b]["fees"]["ancestor"] += fee_delta_b) + raw_before[txid_b]["fees"]["descendant"] += fee_delta_b) + raw_before[txid_c]["fees"]["modified"] += fee_delta_c_1 + fee_delta_c_2) + raw_before[txid_c]["fees"]["ancestor"] += fee_delta_c_1 + fee_delta_c_2) + raw_before[txid_c]["fees"]["descendant"] += fee_delta_c_1 + fee_delta_c_2) + raw_before[txid_d]["fees"]["ancestor"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2) + raw_after = self.nodes[0].getrawmempool(verbose=True)) + assert_equal(raw_before[txid_a], raw_after[txid_a])) + assert_equal(raw_before, raw_after)) + assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : True, "modified_fee": int(fee_delta_b*COIN + COIN * tx_o_b["fee"])}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : True, "modified_fee": int((fee_delta_c_1 + fee_delta_c_2 ) * COIN + COIN * tx_o_c["fee"])}})) + # Clear prioritisation, otherwise the transactions' fee deltas are persisted to mempool.dat and loaded again when the node) + # is restarted at the end of this subtest. Deltas are removed when a transaction is mined, but only at that time. We do) + # not check whether mapDeltas transactions were mined when loading from mempool.dat.) + self.clear_prioritisation(node=self.nodes[0])) +) + self.log.info("Test priority while txs are not in mempool")) + self.restart_node(0, extra_args=["-nopersistmempool"])) + self.nodes[0].setmocktime(mock_time)) + assert_equal(self.nodes[0].getmempoolinfo()["size"], 0)) + self.nodes[0].prioritisetransaction(txid=txid_b, fee_delta=int(fee_delta_b * COIN))) + self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_1 * COIN))) + self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_2 * COIN))) + assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : False}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : False}})) + for t in [tx_o_a["hex"], tx_o_b["hex"], tx_o_c["hex"], tx_o_d["hex"]]:) + self.nodes[0].sendrawtransaction(t)) + raw_after = self.nodes[0].getrawmempool(verbose=True)) + assert_equal(raw_before[txid_a], raw_after[txid_a])) + assert_equal(raw_before, raw_after)) + assert_equal(self.nodes[0].getprioritisedtransactions(), {txid_b: {"fee_delta" : fee_delta_b*COIN, "in_mempool" : True, "modified_fee": int(fee_delta_b*COIN + COIN * tx_o_b["fee"])}, txid_c: {"fee_delta" : (fee_delta_c_1 + fee_delta_c_2)*COIN, "in_mempool" : True, "modified_fee": int((fee_delta_c_1 + fee_delta_c_2 ) * COIN + COIN * tx_o_c["fee"])}})) +) + # Clear mempool) + self.generate(self.nodes[0], 1)) + # Prioritisation for transactions is automatically deleted after they are mined.) + assert_equal(self.nodes[0].getprioritisedtransactions(), {})) +) + # Use default extra_args) + self.restart_node(0)) + assert_equal(self.nodes[0].getprioritisedtransactions(), {})) +) + def run_test(self):) + self.wallet = MiniWallet(self.nodes[0])) +) + # Test `prioritisetransaction` required parameters) + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)) + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')) + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)) +) + # Test `prioritisetransaction` invalid extra parameters) + assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)) +) + # Test `getprioritisedtransactions` invalid parameters) + assert_raises_rpc_error(-1, "getprioritisedtransactions",) + self.nodes[0].getprioritisedtransactions, True)) +) + # Test `prioritisetransaction` invalid `txid`) + assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)) + assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)) +) + # Test `prioritisetransaction` invalid `dummy`) + txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000') + assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", self.nodes[0].prioritisetransaction, txid, 'foo', 0)) + assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)) +) + # Test `prioritisetransaction` invalid `fee_delta`) + assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')) +) + self.test_replacement()) + self.test_diamond()) +) + self.txouts = gen_return_txouts()) + self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']) +) + utxo_count = 90) + utxos = self.wallet.send_self_transfer_multi(from_node=self.nodes[0], num_outputs=utxo_count)['new_utxos']) + self.generate(self.wallet, 1)) + assert_equal(len(self.nodes[0].getrawmempool()), 0)) +) + base_fee = self.relayfee*100 # our transactions are smaller than 100kb) + txids = []) +) + # Create 3 batches of transactions at 3 different fee rate levels) + range_size = utxo_count // 3) + for i in range(3):) + txids.append([])) + start_range = i * range_size) + end_range = start_range + range_size) + txids[i] = create_lots_of_big_transactions() + self.wallet,) + self.nodes[0],) + (i+1) * base_fee,) + end_range - start_range,) + self.txouts,) + utxos[start_range:end_range])) +) + # Make sure that the size of each group of transactions exceeds) + # MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to) + # create more transactions.) + mempool = self.nodes[0].getrawmempool(True)) + sizes = [0, 0, 0]) + for i in range(3):) + for j in txids[i]:) + assert j in mempool) + sizes[i] += mempool[j]['vsize']) + assert sizes[i] > MAX_BLOCK_WEIGHT // 4 # Fail => raise utxo_count) +) + assert_equal(self.nodes[0].getprioritisedtransactions(), {})) + # add a fee delta to something in the cheapest bucket and make sure it gets mined) + # also check that a different entry in the cheapest bucket is NOT mined) + self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))) + assert_equal(self.nodes[0].getprioritisedtransactions(), {txids[0][0] : { "fee_delta" : 3*base_fee*COIN, "in_mempool" : True, "modified_fee": int(3*base_fee*COIN + COIN * 1 * base_fee)}})) +) + # Priority disappears when prioritisetransaction is called with an inverse value...) + self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(-3*base_fee*COIN))) + assert txids[0][0] not in self.nodes[0].getprioritisedtransactions()) + # ... and reappears when prioritisetransaction is called again.) + self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))) + assert txids[0][0] in self.nodes[0].getprioritisedtransactions()) +) + self.generate(self.nodes[0], 1)) +) + mempool = self.nodes[0].getrawmempool()) + self.log.info("Assert that prioritised transaction was mined")) + assert txids[0][0] not in mempool) + assert txids[0][1] in mempool) +) + high_fee_tx = None) + for x in txids[2]:) + if x not in mempool:) + high_fee_tx = x) +) + # Something high-fee should have been mined!) + assert high_fee_tx is not None) +) + # Add a prioritisation before a tx is in the mempool (de-prioritising a) + # high-fee transaction so that it's now low fee).) + self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))) + assert_equal(self.nodes[0].getprioritisedtransactions()[high_fee_tx], { "fee_delta" : -2*base_fee*COIN, "in_mempool" : False})) +) + # Add everything back to mempool) + self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())) +) + # Check to make sure our high fee rate tx is back in the mempool) + mempool = self.nodes[0].getrawmempool()) + assert high_fee_tx in mempool) +) + # Now verify the modified-high feerate transaction isn't mined before) + # the other high fee transactions. Keep mining until our mempool has) + # decreased by all the high fee size that we calculated above.) + while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):) + self.generate(self.nodes[0], 1, sync_fun=self.no_op)) +) + # High fee transaction should not have been mined, but other high fee rate) + # transactions should have been.) + mempool = self.nodes[0].getrawmempool()) + self.log.info("Assert that de-prioritised transaction is still in mempool")) + assert high_fee_tx in mempool) + assert_equal(self.nodes[0].getprioritisedtransactions()[high_fee_tx], { "fee_delta" : -2*base_fee*COIN, "in_mempool" : True, "modified_fee": int(-2*base_fee*COIN + COIN * 3 * base_fee)})) + for x in txids[2]:) + if (x,high_fee_tx):) + assert x not in mempool) +) +) + self.log.info("Assert that 0 delta is never added to mapDeltas")) + tx_id_zero_del = self.wallet.create_self_transfer()['txid']) + self.nodes[0].prioritisetransaction(txid=tx_id_zero_del, fee_delta=0)) + assert tx_id_zero_del not in self.nodes[0].getprioritisedtransactions()) +) + # Create a free transaction. Should be rejected.) + tx_res = self.wallet.create_self_transfer(fee_rate=0)) + tx_hex = tx_res['hex']) + tx_id = tx_res['txid']) +) + # This will raise an exception due to min relay fee not being met) + assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)) + assert tx_id not in self.nodes[0].getrawmempool()) +) + # This is a less than 1000-byte transaction, so just set the fee) + # to be the minimum for a 1000-byte transaction and check that it is) + # accepted.) + self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))) + assert_equal(self.nodes[0].getprioritisedtransactions()[tx_id], { "fee_delta" : self.relayfee*COIN, "in_mempool" : False})) +) + self.log.info("Assert that prioritised free transaction is accepted to mempool")) + assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)) + assert tx_id in self.nodes[0].getrawmempool()) + assert_equal(self.nodes[0].getprioritisedtransactions()[tx_id], { "fee_delta" : self.relayfee*COIN, "in_mempool" : True, "modified_fee": int(self.relayfee*COIN + COIN * tx_res["fee"])})) +) + # Test that calling prioritisetransaction is sufficient to trigger) + # getblocktemplate to (eventually) return a new block.) + mock_time = int(time.time())) + self.nodes[0].setmocktime(mock_time)) + template = self.nodes[0].getblocktemplate({'rules': ['segwit']})) + self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))) +) + # Calling prioritisetransaction with the inverse amount should delete its prioritisation entry) + assert tx_id not in self.nodes[0].getprioritisedtransactions()) +) + self.nodes[0].setmocktime(mock_time+10)) + new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})) +) + assert_not_equal(template, new_template)) +) +if __name__ == '__main__':) + PrioritiseTransactionTest(__file__).main()) diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index 028f14d9c0892f..64b0516b944868 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -1,286 +1,286 @@ -#!/usr/bin/env python3 -# Copyright (c) 2019-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Tests NODE_COMPACT_FILTERS (BIP 157/158). - -Tests that a node configured with -blockfilterindex and -peerblockfilters signals -NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts. -""" - -from test_framework.messages import ( - FILTER_TYPE_BASIC, - NODE_COMPACT_FILTERS, - hash256, - msg_getcfcheckpt, - msg_getcfheaders, - msg_getcfilters, - ser_uint256, - uint256_from_str, -) -from test_framework.p2p import P2PInterface -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, -) - -class FiltersClient(P2PInterface): - def __init__(self): - super().__init__() - # Store the cfilters received. - self.cfilters = [] - - def pop_cfilters(self): - cfilters = self.cfilters - self.cfilters = [] - return cfilters - - def on_cfilter(self, message): - """Store cfilters received in a list.""" - self.cfilters.append(message) - - -class CompactFiltersTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.rpc_timeout = 480 - self.num_nodes = 2 - self.extra_args = [ - ["-blockfilterindex", "-peerblockfilters"], - ["-blockfilterindex"], - ] - - def run_test(self): - # Node 0 supports COMPACT_FILTERS, node 1 does not. - peer_0 = self.nodes[0].add_p2p_connection(FiltersClient()) - peer_1 = self.nodes[1].add_p2p_connection(FiltersClient()) - - # Nodes 0 & 1 share the same first 999 blocks in the chain. - self.generate(self.nodes[0], 999) - - # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting - self.disconnect_nodes(0, 1) - - stale_block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0] - self.nodes[0].syncwithvalidationinterfacequeue() - assert_equal(self.nodes[0].getblockcount(), 1000) - - self.generate(self.nodes[1], 1001, sync_fun=self.no_op) - assert_equal(self.nodes[1].getblockcount(), 2000) - - # Check that nodes have signalled NODE_COMPACT_FILTERS correctly. - assert peer_0.nServices & NODE_COMPACT_FILTERS != 0 - assert peer_1.nServices & NODE_COMPACT_FILTERS == 0 - - # Check that the localservices is as expected. - assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0 - assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0 - - self.log.info("get cfcheckpt on chain to be re-orged out.") - request = msg_getcfcheckpt( - filter_type=FILTER_TYPE_BASIC, - stop_hash=int(stale_block_hash, 16), - ) - peer_0.send_and_ping(message=request) - response = peer_0.last_message['cfcheckpt'] - assert_equal(response.filter_type, request.filter_type) - assert_equal(response.stop_hash, request.stop_hash) - assert_equal(len(response.headers), 1) - - self.log.info("Reorg node 0 to a new chain.") - self.connect_nodes(0, 1) - self.sync_blocks(timeout=600) - self.nodes[0].syncwithvalidationinterfacequeue() - - main_block_hash = self.nodes[0].getblockhash(1000) - assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" - - self.log.info("Check that peers can fetch cfcheckpt on active chain.") - tip_hash = self.nodes[0].getbestblockhash() - request = msg_getcfcheckpt( - filter_type=FILTER_TYPE_BASIC, - stop_hash=int(tip_hash, 16), - ) - peer_0.send_and_ping(request) - response = peer_0.last_message['cfcheckpt'] - assert_equal(response.filter_type, request.filter_type) - assert_equal(response.stop_hash, request.stop_hash) - - main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header'] - tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] - assert_equal( - response.headers, - [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)], - ) - - self.log.info("Check that peers can fetch cfcheckpt on stale chain.") - request = msg_getcfcheckpt( - filter_type=FILTER_TYPE_BASIC, - stop_hash=int(stale_block_hash, 16), - ) - peer_0.send_and_ping(request) - response = peer_0.last_message['cfcheckpt'] - - stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] - assert_equal( - response.headers, - [int(header, 16) for header in (stale_cfcheckpt, )], - ) - - self.log.info("Check that peers can fetch cfheaders on active chain.") - request = msg_getcfheaders( - filter_type=FILTER_TYPE_BASIC, - start_height=1, - stop_hash=int(main_block_hash, 16), - ) - peer_0.send_and_ping(request) - response = peer_0.last_message['cfheaders'] - main_cfhashes = response.hashes - assert_equal(len(main_cfhashes), 1000) - assert_equal( - compute_last_header(response.prev_header, response.hashes), - int(main_cfcheckpt, 16), - ) - - self.log.info("Check that peers can fetch cfheaders on stale chain.") - request = msg_getcfheaders( - filter_type=FILTER_TYPE_BASIC, - start_height=1, - stop_hash=int(stale_block_hash, 16), - ) - peer_0.send_and_ping(request) - response = peer_0.last_message['cfheaders'] - stale_cfhashes = response.hashes - assert_equal(len(stale_cfhashes), 1000) - assert_equal( - compute_last_header(response.prev_header, response.hashes), - int(stale_cfcheckpt, 16), - ) - - self.log.info("Check that peers can fetch cfilters.") - stop_hash = self.nodes[0].getblockhash(10) - request = msg_getcfilters( - filter_type=FILTER_TYPE_BASIC, - start_height=1, - stop_hash=int(stop_hash, 16), - ) - peer_0.send_and_ping(request) - response = peer_0.pop_cfilters() - assert_equal(len(response), 10) - - self.log.info("Check that cfilter responses are correct.") - for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)): - block_hash = self.nodes[0].getblockhash(height) - assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) - assert_equal(cfilter.block_hash, int(block_hash, 16)) - computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) - assert_equal(computed_cfhash, cfhash) - - self.log.info("Check that peers can fetch cfilters for stale blocks.") - request = msg_getcfilters( - filter_type=FILTER_TYPE_BASIC, - start_height=1000, - stop_hash=int(stale_block_hash, 16), - ) - peer_0.send_and_ping(request) - response = peer_0.pop_cfilters() - assert_equal(len(response), 1) - - cfilter = response[0] - assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC) - assert_equal(cfilter.block_hash, int(stale_block_hash, 16)) - computed_cfhash = uint256_from_str(hash256(cfilter.filter_data)) - assert_equal(computed_cfhash, stale_cfhashes[999]) - - self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.") - requests = [ - msg_getcfcheckpt( - filter_type=FILTER_TYPE_BASIC, - stop_hash=int(main_block_hash, 16), - ), - msg_getcfheaders( - filter_type=FILTER_TYPE_BASIC, - start_height=1000, - stop_hash=int(main_block_hash, 16), - ), - msg_getcfilters( - filter_type=FILTER_TYPE_BASIC, - start_height=1000, - stop_hash=int(main_block_hash, 16), - ), - ] - for request in requests: - peer_1 = self.nodes[1].add_p2p_connection(P2PInterface()) - with self.nodes[1].assert_debug_log(expected_msgs=["requested unsupported block filter type"]): - peer_1.send_message(request) - peer_1.wait_for_disconnect() - - self.log.info("Check that invalid requests result in disconnection.") - requests = [ - # Requesting too many filters results in disconnection. - ( - msg_getcfilters( - filter_type=FILTER_TYPE_BASIC, - start_height=0, - stop_hash=int(main_block_hash, 16), - ), "requested too many cfilters/cfheaders" - ), - # Requesting too many filter headers results in disconnection. - ( - msg_getcfheaders( - filter_type=FILTER_TYPE_BASIC, - start_height=0, - stop_hash=int(tip_hash, 16), - ), "requested too many cfilters/cfheaders" - ), - # Requesting unknown filter type results in disconnection. - ( - msg_getcfcheckpt( - filter_type=255, - stop_hash=int(main_block_hash, 16), - ), "requested unsupported block filter type" - ), - # Requesting unknown hash results in disconnection. - ( - msg_getcfcheckpt( - filter_type=FILTER_TYPE_BASIC, - stop_hash=123456789, - ), "requested invalid block hash" - ), - ( - # Request with (start block height > stop block height) results in disconnection. - msg_getcfheaders( - filter_type=FILTER_TYPE_BASIC, - start_height=1000, - stop_hash=int(self.nodes[0].getblockhash(999), 16), - ), "sent invalid getcfilters/getcfheaders with start height 1000 and stop height 999" - ), - ] - for request, expected_log_msg in requests: - peer_0 = self.nodes[0].add_p2p_connection(P2PInterface()) - with self.nodes[0].assert_debug_log(expected_msgs=[expected_log_msg]): - peer_0.send_message(request) - peer_0.wait_for_disconnect() - - self.log.info("Test -peerblockfilters without -blockfilterindex raises an error") - self.stop_node(0) - self.nodes[0].extra_args = ["-peerblockfilters"] - msg = "Error: Cannot set -peerblockfilters without -blockfilterindex." - self.nodes[0].assert_start_raises_init_error(expected_msg=msg) - - self.log.info("Test unknown value to -blockfilterindex raises an error") - self.nodes[0].extra_args = ["-blockfilterindex=abc"] - msg = "Error: Unknown -blockfilterindex value abc." - self.nodes[0].assert_start_raises_init_error(expected_msg=msg) - -def compute_last_header(prev_header, hashes): - """Compute the last filter header from a starting header and a sequence of filter hashes.""" - header = ser_uint256(prev_header) - for filter_hash in hashes: - header = hash256(ser_uint256(filter_hash) + header) - return uint256_from_str(header) - - -if __name__ == '__main__': - CompactFiltersTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2019-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Tests NODE_COMPACT_FILTERS (BIP 157/158).) +) +Tests that a node configured with -blockfilterindex and -peerblockfilters signals) +NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts.) +""") +) +from test_framework.messages import () + FILTER_TYPE_BASIC,) + NODE_COMPACT_FILTERS,) + hash256,) + msg_getcfcheckpt,) + msg_getcfheaders,) + msg_getcfilters,) + ser_uint256,) + uint256_from_str,) +)) +from test_framework.p2p import P2PInterface) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) +)) +) +class FiltersClient(P2PInterface):) + def __init__(self):) + super().__init__()) + # Store the cfilters received.) + self.cfilters = []) +) + def pop_cfilters(self):) + cfilters = self.cfilters) + self.cfilters = []) + return cfilters) +) + def on_cfilter(self, message):) + """Store cfilters received in a list.""") + self.cfilters.append(message)) +) +) +class CompactFiltersTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.rpc_timeout = 480) + self.num_nodes = 2) + self.extra_args = [) + ["-blockfilterindex", "-peerblockfilters"],) + ["-blockfilterindex"],) + ]) +) + def run_test(self):) + # Node 0 supports COMPACT_FILTERS, node 1 does not.) + peer_0 = self.nodes[0].add_p2p_connection(FiltersClient())) + peer_1 = self.nodes[1].add_p2p_connection(FiltersClient())) +) + # Nodes 0 & 1 share the same first 999 blocks in the chain.) + self.generate(self.nodes[0], 999)) +) + # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting) + self.disconnect_nodes(0, 1)) +) + stale_block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0]) + self.nodes[0].syncwithvalidationinterfacequeue()) + assert_equal(self.nodes[0].getblockcount(), 1000)) +) + self.generate(self.nodes[1], 1001, sync_fun=self.no_op)) + assert_equal(self.nodes[1].getblockcount(), 2000)) +) + # Check that nodes have signalled NODE_COMPACT_FILTERS correctly.) + assert_not_equal(peer_0.nServices & NODE_COMPACT_FILTERS, 0)) + assert peer_1.nServices & NODE_COMPACT_FILTERS == 0) +) + # Check that the localservices is as expected.) + assert_not_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS, 0)) + assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0) +) + self.log.info("get cfcheckpt on chain to be re-orged out.")) + request = msg_getcfcheckpt() + filter_type=FILTER_TYPE_BASIC,) + stop_hash=int(stale_block_hash, 16),) + )) + peer_0.send_and_ping(message=request)) + response = peer_0.last_message['cfcheckpt']) + assert_equal(response.filter_type, request.filter_type)) + assert_equal(response.stop_hash, request.stop_hash)) + assert_equal(len(response.headers), 1)) +) + self.log.info("Reorg node 0 to a new chain.")) + self.connect_nodes(0, 1)) + self.sync_blocks(timeout=600)) + self.nodes[0].syncwithvalidationinterfacequeue()) +) + main_block_hash = self.nodes[0].getblockhash(1000)) + assert_not_equal(main_block_hash, stale_block_hash, "node 0 chain did not reorganize")) +) + self.log.info("Check that peers can fetch cfcheckpt on active chain.")) + tip_hash = self.nodes[0].getbestblockhash()) + request = msg_getcfcheckpt() + filter_type=FILTER_TYPE_BASIC,) + stop_hash=int(tip_hash, 16),) + )) + peer_0.send_and_ping(request)) + response = peer_0.last_message['cfcheckpt']) + assert_equal(response.filter_type, request.filter_type)) + assert_equal(response.stop_hash, request.stop_hash)) +) + main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header']) + tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header']) + assert_equal() + response.headers,) + [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)],) + )) +) + self.log.info("Check that peers can fetch cfcheckpt on stale chain.")) + request = msg_getcfcheckpt() + filter_type=FILTER_TYPE_BASIC,) + stop_hash=int(stale_block_hash, 16),) + )) + peer_0.send_and_ping(request)) + response = peer_0.last_message['cfcheckpt']) +) + stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header']) + assert_equal() + response.headers,) + [int(header, 16) for header in (stale_cfcheckpt, )],) + )) +) + self.log.info("Check that peers can fetch cfheaders on active chain.")) + request = msg_getcfheaders() + filter_type=FILTER_TYPE_BASIC,) + start_height=1,) + stop_hash=int(main_block_hash, 16),) + )) + peer_0.send_and_ping(request)) + response = peer_0.last_message['cfheaders']) + main_cfhashes = response.hashes) + assert_equal(len(main_cfhashes), 1000)) + assert_equal() + compute_last_header(response.prev_header, response.hashes),) + int(main_cfcheckpt, 16),) + )) +) + self.log.info("Check that peers can fetch cfheaders on stale chain.")) + request = msg_getcfheaders() + filter_type=FILTER_TYPE_BASIC,) + start_height=1,) + stop_hash=int(stale_block_hash, 16),) + )) + peer_0.send_and_ping(request)) + response = peer_0.last_message['cfheaders']) + stale_cfhashes = response.hashes) + assert_equal(len(stale_cfhashes), 1000)) + assert_equal() + compute_last_header(response.prev_header, response.hashes),) + int(stale_cfcheckpt, 16),) + )) +) + self.log.info("Check that peers can fetch cfilters.")) + stop_hash = self.nodes[0].getblockhash(10)) + request = msg_getcfilters() + filter_type=FILTER_TYPE_BASIC,) + start_height=1,) + stop_hash=int(stop_hash, 16),) + )) + peer_0.send_and_ping(request)) + response = peer_0.pop_cfilters()) + assert_equal(len(response), 10)) +) + self.log.info("Check that cfilter responses are correct.")) + for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)):) + block_hash = self.nodes[0].getblockhash(height)) + assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)) + assert_equal(cfilter.block_hash, int(block_hash, 16))) + computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))) + assert_equal(computed_cfhash, cfhash)) +) + self.log.info("Check that peers can fetch cfilters for stale blocks.")) + request = msg_getcfilters() + filter_type=FILTER_TYPE_BASIC,) + start_height=1000,) + stop_hash=int(stale_block_hash, 16),) + )) + peer_0.send_and_ping(request)) + response = peer_0.pop_cfilters()) + assert_equal(len(response), 1)) +) + cfilter = response[0]) + assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)) + assert_equal(cfilter.block_hash, int(stale_block_hash, 16))) + computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))) + assert_equal(computed_cfhash, stale_cfhashes[999])) +) + self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.")) + requests = [) + msg_getcfcheckpt() + filter_type=FILTER_TYPE_BASIC,) + stop_hash=int(main_block_hash, 16),) + ),) + msg_getcfheaders() + filter_type=FILTER_TYPE_BASIC,) + start_height=1000,) + stop_hash=int(main_block_hash, 16),) + ),) + msg_getcfilters() + filter_type=FILTER_TYPE_BASIC,) + start_height=1000,) + stop_hash=int(main_block_hash, 16),) + ),) + ]) + for request in requests:) + peer_1 = self.nodes[1].add_p2p_connection(P2PInterface())) + with self.nodes[1].assert_debug_log(expected_msgs=["requested unsupported block filter type"]):) + peer_1.send_message(request)) + peer_1.wait_for_disconnect()) +) + self.log.info("Check that invalid requests result in disconnection.")) + requests = [) + # Requesting too many filters results in disconnection.) + () + msg_getcfilters() + filter_type=FILTER_TYPE_BASIC,) + start_height=0,) + stop_hash=int(main_block_hash, 16),) + ), "requested too many cfilters/cfheaders") + ),) + # Requesting too many filter headers results in disconnection.) + () + msg_getcfheaders() + filter_type=FILTER_TYPE_BASIC,) + start_height=0,) + stop_hash=int(tip_hash, 16),) + ), "requested too many cfilters/cfheaders") + ),) + # Requesting unknown filter type results in disconnection.) + () + msg_getcfcheckpt() + filter_type=255,) + stop_hash=int(main_block_hash, 16),) + ), "requested unsupported block filter type") + ),) + # Requesting unknown hash results in disconnection.) + () + msg_getcfcheckpt() + filter_type=FILTER_TYPE_BASIC,) + stop_hash=123456789,) + ), "requested invalid block hash") + ),) + () + # Request with (start block height > stop block height) results in disconnection.) + msg_getcfheaders() + filter_type=FILTER_TYPE_BASIC,) + start_height=1000,) + stop_hash=int(self.nodes[0].getblockhash(999), 16),) + ), "sent invalid getcfilters/getcfheaders with start height 1000 and stop height 999") + ),) + ]) + for request, expected_log_msg in requests:) + peer_0 = self.nodes[0].add_p2p_connection(P2PInterface())) + with self.nodes[0].assert_debug_log(expected_msgs=[expected_log_msg]):) + peer_0.send_message(request)) + peer_0.wait_for_disconnect()) +) + self.log.info("Test -peerblockfilters without -blockfilterindex raises an error")) + self.stop_node(0)) + self.nodes[0].extra_args = ["-peerblockfilters"]) + msg = "Error: Cannot set -peerblockfilters without -blockfilterindex.") + self.nodes[0].assert_start_raises_init_error(expected_msg=msg)) +) + self.log.info("Test unknown value to -blockfilterindex raises an error")) + self.nodes[0].extra_args = ["-blockfilterindex=abc"]) + msg = "Error: Unknown -blockfilterindex value abc.") + self.nodes[0].assert_start_raises_init_error(expected_msg=msg)) +) +def compute_last_header(prev_header, hashes):) + """Compute the last filter header from a starting header and a sequence of filter hashes.""") + header = ser_uint256(prev_header)) + for filter_hash in hashes:) + header = hash256(ser_uint256(filter_hash) + header)) + return uint256_from_str(header)) +) +) +if __name__ == '__main__':) + CompactFiltersTest(__file__).main()) diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index accaea54ba45dd..351d0b8121477e 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -1,969 +1,969 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test compact blocks (BIP 152).""" -import random - -from test_framework.blocktools import ( - COINBASE_MATURITY, - NORMAL_GBT_REQUEST_PARAMS, - add_witness_commitment, - create_block, -) -from test_framework.messages import ( - BlockTransactions, - BlockTransactionsRequest, - CBlock, - CBlockHeader, - CInv, - COutPoint, - CTransaction, - CTxIn, - CTxInWitness, - CTxOut, - from_hex, - HeaderAndShortIDs, - MSG_BLOCK, - MSG_CMPCT_BLOCK, - MSG_WITNESS_FLAG, - P2PHeaderAndShortIDs, - PrefilledTransaction, - calculate_shortid, - msg_block, - msg_blocktxn, - msg_cmpctblock, - msg_getblocktxn, - msg_getdata, - msg_getheaders, - msg_headers, - msg_inv, - msg_no_witness_block, - msg_no_witness_blocktxn, - msg_sendcmpct, - msg_sendheaders, - msg_tx, - ser_uint256, - tx_from_hex, -) -from test_framework.p2p import ( - P2PInterface, - p2p_lock, -) -from test_framework.script import ( - CScript, - OP_DROP, - OP_TRUE, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - softfork_active, -) -from test_framework.wallet import MiniWallet - - -# TestP2PConn: A peer we use to send messages to bitcoind, and store responses. -class TestP2PConn(P2PInterface): - def __init__(self): - super().__init__() - self.last_sendcmpct = [] - self.block_announced = False - # Store the hashes of blocks we've seen announced. - # This is for synchronizing the p2p message traffic, - # so we can eg wait until a particular block is announced. - self.announced_blockhashes = set() - - def on_sendcmpct(self, message): - self.last_sendcmpct.append(message) - - def on_cmpctblock(self, message): - self.block_announced = True - self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() - self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256) - - def on_headers(self, message): - self.block_announced = True - for x in self.last_message["headers"].headers: - x.calc_sha256() - self.announced_blockhashes.add(x.sha256) - - def on_inv(self, message): - for x in self.last_message["inv"].inv: - if x.type == MSG_BLOCK: - self.block_announced = True - self.announced_blockhashes.add(x.hash) - - # Requires caller to hold p2p_lock - def received_block_announcement(self): - return self.block_announced - - def clear_block_announcement(self): - with p2p_lock: - self.block_announced = False - self.last_message.pop("inv", None) - self.last_message.pop("headers", None) - self.last_message.pop("cmpctblock", None) - - def clear_getblocktxn(self): - with p2p_lock: - self.last_message.pop("getblocktxn", None) - - def get_headers(self, locator, hashstop): - msg = msg_getheaders() - msg.locator.vHave = locator - msg.hashstop = hashstop - self.send_message(msg) - - def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() - headers_message.headers = [CBlockHeader(b) for b in new_blocks] - self.send_message(headers_message) - - def request_headers_and_sync(self, locator, hashstop=0): - self.clear_block_announcement() - self.get_headers(locator, hashstop) - self.wait_until(self.received_block_announcement, timeout=30) - self.clear_block_announcement() - - # Block until a block announcement for a particular block hash is - # received. - def wait_for_block_announcement(self, block_hash, timeout=30): - def received_hash(): - return (block_hash in self.announced_blockhashes) - self.wait_until(received_hash, timeout=timeout) - - def send_await_disconnect(self, message, timeout=30): - """Sends a message to the node and wait for disconnect. - - This is used when we want to send a message into the node that we expect - will get us disconnected, eg an invalid block.""" - self.send_message(message) - self.wait_for_disconnect(timeout=timeout) - -class CompactBlocksTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [[ - "-acceptnonstdtxn=1", - ]] - self.utxos = [] - - def build_block_on_tip(self, node): - block = create_block(tmpl=node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)) - block.solve() - return block - - # Create 10 more anyone-can-spend utxo's for testing. - def make_utxos(self): - block = self.build_block_on_tip(self.nodes[0]) - self.segwit_node.send_and_ping(msg_no_witness_block(block)) - assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256 - self.generate(self.wallet, COINBASE_MATURITY) - - total_value = block.vtx[0].vout[0].nValue - out_value = total_value // 10 - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) - for _ in range(10): - tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) - tx.rehash() - - block2 = self.build_block_on_tip(self.nodes[0]) - block2.vtx.append(tx) - block2.hashMerkleRoot = block2.calc_merkle_root() - block2.solve() - self.segwit_node.send_and_ping(msg_no_witness_block(block2)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) - self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) - - - # Test "sendcmpct" (between peers preferring the same version): - # - No compact block announcements unless sendcmpct is sent. - # - If sendcmpct is sent with version = 1, the message is ignored. - # - If sendcmpct is sent with version > 2, the message is ignored. - # - If sendcmpct is sent with boolean 0, then block announcements are not - # made with compact blocks. - # - If sendcmpct is then sent with boolean 1, then new block announcements - # are made with compact blocks. - def test_sendcmpct(self, test_node): - node = self.nodes[0] - - # Make sure we get a SENDCMPCT message from our peer - def received_sendcmpct(): - return (len(test_node.last_sendcmpct) > 0) - test_node.wait_until(received_sendcmpct, timeout=30) - with p2p_lock: - # Check that version 2 is received. - assert_equal(test_node.last_sendcmpct[0].version, 2) - test_node.last_sendcmpct = [] - - tip = int(node.getbestblockhash(), 16) - - def check_announcement_of_new_block(node, peer, predicate): - peer.clear_block_announcement() - block_hash = int(self.generate(node, 1)[0], 16) - peer.wait_for_block_announcement(block_hash, timeout=30) - assert peer.block_announced - - with p2p_lock: - assert predicate(peer), ( - "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( - block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) - - # We shouldn't get any block announcements via cmpctblock yet. - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Try one more time, this time after requesting headers. - test_node.request_headers_and_sync(locator=[tip]) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) - - # Test a few ways of using sendcmpct that should NOT - # result in compact block announcements. - # Before each test, sync the headers chain. - test_node.request_headers_and_sync(locator=[tip]) - - # Now try a SENDCMPCT message with too-low version - test_node.send_and_ping(msg_sendcmpct(announce=True, version=1)) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Headers sync before next test. - test_node.request_headers_and_sync(locator=[tip]) - - # Now try a SENDCMPCT message with too-high version - test_node.send_and_ping(msg_sendcmpct(announce=True, version=3)) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Headers sync before next test. - test_node.request_headers_and_sync(locator=[tip]) - - # Now try a SENDCMPCT message with valid version, but announce=False - test_node.send_and_ping(msg_sendcmpct(announce=False, version=2)) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) - - # Headers sync before next test. - test_node.request_headers_and_sync(locator=[tip]) - - # Finally, try a SENDCMPCT message with announce=True - test_node.send_and_ping(msg_sendcmpct(announce=True, version=2)) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Try one more time (no headers sync should be needed!) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Try one more time, after turning on sendheaders - test_node.send_and_ping(msg_sendheaders()) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Try one more time, after sending a version=1, announce=false message. - test_node.send_and_ping(msg_sendcmpct(announce=False, version=1)) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) - - # Now turn off announcements - test_node.send_and_ping(msg_sendcmpct(announce=False, version=2)) - check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) - - # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last. - def test_invalid_cmpctblock_message(self): - self.generate(self.nodes[0], COINBASE_MATURITY + 1) - block = self.build_block_on_tip(self.nodes[0]) - - cmpct_block = P2PHeaderAndShortIDs() - cmpct_block.header = CBlockHeader(block) - cmpct_block.prefilled_txn_length = 1 - # This index will be too high - prefilled_txn = PrefilledTransaction(1, block.vtx[0]) - cmpct_block.prefilled_txn = [prefilled_txn] - self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) - assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) - - # Compare the generated shortids to what we expect based on BIP 152, given - # bitcoind's choice of nonce. - def test_compactblock_construction(self, test_node): - node = self.nodes[0] - # Generate a bunch of transactions. - self.generate(node, COINBASE_MATURITY + 1) - num_transactions = 25 - - segwit_tx_generated = False - for _ in range(num_transactions): - hex_tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['hex'] - tx = tx_from_hex(hex_tx) - if not tx.wit.is_null(): - segwit_tx_generated = True - - assert segwit_tx_generated # check that our test is not broken - - # Wait until we've seen the block announcement for the resulting tip - tip = int(node.getbestblockhash(), 16) - test_node.wait_for_block_announcement(tip) - - # Make sure we will receive a fast-announce compact block - self.request_cb_announcements(test_node) - - # Now mine a block, and look at the resulting compact block. - test_node.clear_block_announcement() - block_hash = int(self.generate(node, 1)[0], 16) - - # Store the raw block in our internal format. - block = from_hex(CBlock(), node.getblock("%064x" % block_hash, False)) - for tx in block.vtx: - tx.calc_sha256() - block.rehash() - - # Wait until the block was announced (via compact blocks) - test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30) - - # Now fetch and check the compact block - header_and_shortids = None - with p2p_lock: - # Convert the on-the-wire representation to absolute indexes - header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) - self.check_compactblock_construction_from_block(header_and_shortids, block_hash, block) - - # Now fetch the compact block using a normal non-announce getdata - test_node.clear_block_announcement() - inv = CInv(MSG_CMPCT_BLOCK, block_hash) - test_node.send_message(msg_getdata([inv])) - - test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30) - - # Now fetch and check the compact block - header_and_shortids = None - with p2p_lock: - # Convert the on-the-wire representation to absolute indexes - header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) - self.check_compactblock_construction_from_block(header_and_shortids, block_hash, block) - - def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block): - # Check that we got the right block! - header_and_shortids.header.calc_sha256() - assert_equal(header_and_shortids.header.sha256, block_hash) - - # Make sure the prefilled_txn appears to have included the coinbase - assert len(header_and_shortids.prefilled_txn) >= 1 - assert_equal(header_and_shortids.prefilled_txn[0].index, 0) - - # Check that all prefilled_txn entries match what's in the block. - for entry in header_and_shortids.prefilled_txn: - entry.tx.calc_sha256() - # This checks the non-witness parts of the tx agree - assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) - - # And this checks the witness - wtxid = entry.tx.calc_sha256(True) - assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True)) - - # Check that the cmpctblock message announced all the transactions. - assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) - - # And now check that all the shortids are as expected as well. - # Determine the siphash keys to use. - [k0, k1] = header_and_shortids.get_siphash_keys() - - index = 0 - while index < len(block.vtx): - if (len(header_and_shortids.prefilled_txn) > 0 and - header_and_shortids.prefilled_txn[0].index == index): - # Already checked prefilled transactions above - header_and_shortids.prefilled_txn.pop(0) - else: - tx_hash = block.vtx[index].calc_sha256(True) - shortid = calculate_shortid(k0, k1, tx_hash) - assert_equal(shortid, header_and_shortids.shortids[0]) - header_and_shortids.shortids.pop(0) - index += 1 - - # Test that bitcoind requests compact blocks when we announce new blocks - # via header or inv, and that responding to getblocktxn causes the block - # to be successfully reconstructed. - def test_compactblock_requests(self, test_node): - node = self.nodes[0] - # Try announcing a block with an inv or header, expect a compactblock - # request - for announce in ["inv", "header"]: - block = self.build_block_on_tip(node) - - if announce == "inv": - test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)])) - test_node.wait_for_getheaders(timeout=30) - test_node.send_header_for_blocks([block]) - else: - test_node.send_header_for_blocks([block]) - test_node.wait_for_getdata([block.sha256], timeout=30) - assert_equal(test_node.last_message["getdata"].inv[0].type, 4) - - # Send back a compactblock message that omits the coinbase - comp_block = HeaderAndShortIDs() - comp_block.header = CBlockHeader(block) - comp_block.nonce = 0 - [k0, k1] = comp_block.get_siphash_keys() - coinbase_hash = block.vtx[0].calc_sha256(True) - comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)] - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) - # Expect a getblocktxn message. - with p2p_lock: - assert "getblocktxn" in test_node.last_message - absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() - assert_equal(absolute_indexes, [0]) # should be a coinbase request - - # Send the coinbase, and verify that the tip advances. - msg = msg_blocktxn() - msg.block_transactions.blockhash = block.sha256 - msg.block_transactions.transactions = [block.vtx[0]] - test_node.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - # Create a chain of transactions from given utxo, and add to a new block. - def build_block_with_transactions(self, node, utxo, num_transactions): - block = self.build_block_on_tip(node) - - for _ in range(num_transactions): - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) - tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) - tx.rehash() - utxo = [tx.sha256, 0, tx.vout[0].nValue] - block.vtx.append(tx) - - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - return block - - # Test that we only receive getblocktxn requests for transactions that the - # node needs, and that responding to them causes the block to be - # reconstructed. - def test_getblocktxn_requests(self, test_node): - node = self.nodes[0] - - def test_getblocktxn_response(compact_block, peer, expected_result): - msg = msg_cmpctblock(compact_block.to_p2p()) - peer.send_and_ping(msg) - with p2p_lock: - assert "getblocktxn" in peer.last_message - absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute() - assert_equal(absolute_indexes, expected_result) - - def test_tip_after_message(node, peer, msg, tip): - peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), tip) - - # First try announcing compactblocks that won't reconstruct, and verify - # that we receive getblocktxn messages back. - utxo = self.utxos.pop(0) - - block = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, use_witness=True) - - test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) - - msg_bt = msg_no_witness_blocktxn() - msg_bt = msg_blocktxn() # serialize with witnesses - msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) - test_tip_after_message(node, test_node, msg_bt, block.sha256) - - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - - # Now try interspersing the prefilled transactions - comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=True) - test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) - msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) - test_tip_after_message(node, test_node, msg_bt, block.sha256) - - # Now try giving one transaction ahead of time. - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - test_node.send_and_ping(msg_tx(block.vtx[1])) - assert block.vtx[1].hash in node.getrawmempool() - - # Prefill 4 out of the 6 transactions, and verify that only the one - # that was not in the mempool is requested. - comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=True) - test_getblocktxn_response(comp_block, test_node, [5]) - - msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) - test_tip_after_message(node, test_node, msg_bt, block.sha256) - - # Now provide all transactions to the node before the block is - # announced and verify reconstruction happens immediately. - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 10) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - for tx in block.vtx[1:]: - test_node.send_message(msg_tx(tx)) - test_node.sync_with_ping() - # Make sure all transactions were accepted. - mempool = node.getrawmempool() - for tx in block.vtx[1:]: - assert tx.hash in mempool - - # Clear out last request. - with p2p_lock: - test_node.last_message.pop("getblocktxn", None) - - # Send compact block - comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True) - test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) - with p2p_lock: - # Shouldn't have gotten a request for any transaction - assert "getblocktxn" not in test_node.last_message - - # Incorrectly responding to a getblocktxn shouldn't cause the block to be - # permanently failed. - def test_incorrect_blocktxn_response(self, test_node): - node = self.nodes[0] - utxo = self.utxos.pop(0) - - block = self.build_block_with_transactions(node, utxo, 10) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - # Relay the first 5 transactions from the block in advance - for tx in block.vtx[1:6]: - test_node.send_message(msg_tx(tx)) - test_node.sync_with_ping() - # Make sure all transactions were accepted. - mempool = node.getrawmempool() - for tx in block.vtx[1:6]: - assert tx.hash in mempool - - # Send compact block - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True) - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - absolute_indexes = [] - with p2p_lock: - assert "getblocktxn" in test_node.last_message - absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() - assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) - - # Now give an incorrect response. - # Note that it's possible for bitcoind to be smart enough to know we're - # lying, since it could check to see if the shortid matches what we're - # sending, and eg disconnect us for misbehavior. If that behavior - # change was made, we could just modify this test by having a - # different peer provide the block further down, so that we're still - # verifying that the block isn't marked bad permanently. This is good - # enough for now. - msg = msg_blocktxn() - msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) - test_node.send_and_ping(msg) - - # Tip should not have updated - assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) - - # We should receive a getdata request - test_node.wait_for_getdata([block.sha256], timeout=10) - assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \ - test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG - - # Deliver the block - test_node.send_and_ping(msg_block(block)) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - def test_getblocktxn_handler(self, test_node): - node = self.nodes[0] - # bitcoind will not send blocktxn responses for blocks whose height is - # more than 10 blocks deep. - MAX_GETBLOCKTXN_DEPTH = 10 - chain_height = node.getblockcount() - current_height = chain_height - while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): - block_hash = node.getblockhash(current_height) - block = from_hex(CBlock(), node.getblock(block_hash, False)) - - msg = msg_getblocktxn() - msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) - num_to_request = random.randint(1, len(block.vtx)) - msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) - test_node.send_message(msg) - test_node.wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10) - - [tx.calc_sha256() for tx in block.vtx] - with p2p_lock: - assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16)) - all_indices = msg.block_txn_request.to_absolute() - for index in all_indices: - tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0) - tx.calc_sha256() - assert_equal(tx.sha256, block.vtx[index].sha256) - # Check that the witness matches - assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) - test_node.last_message.pop("blocktxn", None) - current_height -= 1 - - # Next request should send a full block response, as we're past the - # allowed depth for a blocktxn response. - block_hash = node.getblockhash(current_height) - msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) - with p2p_lock: - test_node.last_message.pop("block", None) - test_node.last_message.pop("blocktxn", None) - test_node.send_and_ping(msg) - with p2p_lock: - test_node.last_message["block"].block.calc_sha256() - assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16)) - assert "blocktxn" not in test_node.last_message - - # Request with out-of-bounds tx index results in disconnect - bad_peer = self.nodes[0].add_p2p_connection(TestP2PConn()) - block_hash = node.getblockhash(chain_height) - block = from_hex(CBlock(), node.getblock(block_hash, False)) - msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [len(block.vtx)]) - with node.assert_debug_log(['getblocktxn with out-of-bounds tx indices']): - bad_peer.send_message(msg) - bad_peer.wait_for_disconnect() - - def test_low_work_compactblocks(self, test_node): - # A compactblock with insufficient work won't get its header included - node = self.nodes[0] - hashPrevBlock = int(node.getblockhash(node.getblockcount() - 150), 16) - block = self.build_block_on_tip(node) - block.hashPrevBlock = hashPrevBlock - block.solve() - - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block) - with self.nodes[0].assert_debug_log(['[net] Ignoring low-work compact block from peer 0']): - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - - tips = node.getchaintips() - found = False - for x in tips: - if x["hash"] == block.hash: - found = True - break - assert not found - - def test_compactblocks_not_at_tip(self, test_node): - node = self.nodes[0] - # Test that requesting old compactblocks doesn't work. - MAX_CMPCTBLOCK_DEPTH = 5 - new_blocks = [] - for _ in range(MAX_CMPCTBLOCK_DEPTH + 1): - test_node.clear_block_announcement() - new_blocks.append(self.generate(node, 1)[0]) - test_node.wait_until(test_node.received_block_announcement, timeout=30) - - test_node.clear_block_announcement() - test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))])) - test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30) - - test_node.clear_block_announcement() - self.generate(node, 1) - test_node.wait_until(test_node.received_block_announcement, timeout=30) - test_node.clear_block_announcement() - with p2p_lock: - test_node.last_message.pop("block", None) - test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))])) - test_node.wait_until(lambda: "block" in test_node.last_message, timeout=30) - with p2p_lock: - test_node.last_message["block"].block.calc_sha256() - assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) - - # Generate an old compactblock, and verify that it's not accepted. - cur_height = node.getblockcount() - hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) - block = self.build_block_on_tip(node) - block.hashPrevBlock = hashPrevBlock - block.solve() - - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block) - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - - tips = node.getchaintips() - found = False - for x in tips: - if x["hash"] == block.hash: - assert_equal(x["status"], "headers-only") - found = True - break - assert found - - # Requesting this block via getblocktxn should silently fail - # (to avoid fingerprinting attacks). - msg = msg_getblocktxn() - msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) - with p2p_lock: - test_node.last_message.pop("blocktxn", None) - test_node.send_and_ping(msg) - with p2p_lock: - assert "blocktxn" not in test_node.last_message - - def test_end_to_end_block_relay(self, listeners): - node = self.nodes[0] - utxo = self.utxos.pop(0) - - block = self.build_block_with_transactions(node, utxo, 10) - - [l.clear_block_announcement() for l in listeners] - - # serialize without witness (this block has no witnesses anyway). - # TODO: repeat this test with witness tx's to a segwit node. - node.submitblock(block.serialize().hex()) - - for l in listeners: - l.wait_until(lambda: "cmpctblock" in l.last_message, timeout=30) - with p2p_lock: - for l in listeners: - l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() - assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) - - # Test that we don't get disconnected if we relay a compact block with valid header, - # but invalid transactions. - def test_invalid_tx_in_compactblock(self, test_node): - node = self.nodes[0] - assert len(self.utxos) - utxo = self.utxos[0] - - block = self.build_block_with_transactions(node, utxo, 5) - del block.vtx[3] - block.hashMerkleRoot = block.calc_merkle_root() - # Drop the coinbase witness but include the witness commitment. - add_witness_commitment(block) - block.vtx[0].wit.vtxinwit = [] - block.solve() - - # Now send the compact block with all transactions prefilled, and - # verify that we don't get disconnected. - comp_block = HeaderAndShortIDs() - comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=True) - msg = msg_cmpctblock(comp_block.to_p2p()) - test_node.send_and_ping(msg) - - # Check that the tip didn't advance - assert int(node.getbestblockhash(), 16) is not block.sha256 - test_node.sync_with_ping() - - # Helper for enabling cb announcements - # Send the sendcmpct request and sync headers - def request_cb_announcements(self, peer): - node = self.nodes[0] - tip = node.getbestblockhash() - peer.get_headers(locator=[int(tip, 16)], hashstop=0) - peer.send_and_ping(msg_sendcmpct(announce=True, version=2)) - - def test_compactblock_reconstruction_stalling_peer(self, stalling_peer, delivery_peer): - node = self.nodes[0] - assert len(self.utxos) - - def announce_cmpct_block(node, peer): - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) - - cmpct_block = HeaderAndShortIDs() - cmpct_block.initialize_from_block(block) - msg = msg_cmpctblock(cmpct_block.to_p2p()) - peer.send_and_ping(msg) - with p2p_lock: - assert "getblocktxn" in peer.last_message - return block, cmpct_block - - block, cmpct_block = announce_cmpct_block(node, stalling_peer) - - for tx in block.vtx[1:]: - delivery_peer.send_message(msg_tx(tx)) - delivery_peer.sync_with_ping() - mempool = node.getrawmempool() - for tx in block.vtx[1:]: - assert tx.hash in mempool - - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - - # Now test that delivering an invalid compact block won't break relay - - block, cmpct_block = announce_cmpct_block(node, stalling_peer) - for tx in block.vtx[1:]: - delivery_peer.send_message(msg_tx(tx)) - delivery_peer.sync_with_ping() - - cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()] - cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] - - cmpct_block.use_witness = True - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - assert int(node.getbestblockhash(), 16) != block.sha256 - - msg = msg_no_witness_blocktxn() - msg.block_transactions.blockhash = block.sha256 - msg.block_transactions.transactions = block.vtx[1:] - stalling_peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - def test_highbandwidth_mode_states_via_getpeerinfo(self): - # create new p2p connection for a fresh state w/o any prior sendcmpct messages sent - hb_test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) - - # assert the RPC getpeerinfo boolean fields `bip152_hb_{to, from}` - # match the given parameters for the last peer of a given node - def assert_highbandwidth_states(node, hb_to, hb_from): - peerinfo = node.getpeerinfo()[-1] - assert_equal(peerinfo['bip152_hb_to'], hb_to) - assert_equal(peerinfo['bip152_hb_from'], hb_from) - - # initially, neither node has selected the other peer as high-bandwidth yet - assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=False) - - # peer requests high-bandwidth mode by sending sendcmpct(1) - hb_test_node.send_and_ping(msg_sendcmpct(announce=True, version=2)) - assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=True) - - # peer generates a block and sends it to node, which should - # select the peer as high-bandwidth (up to 3 peers according to BIP 152) - block = self.build_block_on_tip(self.nodes[0]) - hb_test_node.send_and_ping(msg_block(block)) - assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=True) - - # peer requests low-bandwidth mode by sending sendcmpct(0) - hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=2)) - assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False) - - def test_compactblock_reconstruction_parallel_reconstruction(self, stalling_peer, delivery_peer, inbound_peer, outbound_peer): - """ All p2p connections are inbound except outbound_peer. We test that ultimate parallel slot - can only be taken by an outbound node unless prior attempts were done by an outbound - """ - node = self.nodes[0] - assert len(self.utxos) - - def announce_cmpct_block(node, peer, txn_count): - utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, txn_count) - - cmpct_block = HeaderAndShortIDs() - cmpct_block.initialize_from_block(block) - msg = msg_cmpctblock(cmpct_block.to_p2p()) - peer.send_and_ping(msg) - with p2p_lock: - assert "getblocktxn" in peer.last_message - return block, cmpct_block - - for name, peer in [("delivery", delivery_peer), ("inbound", inbound_peer), ("outbound", outbound_peer)]: - self.log.info(f"Setting {name} as high bandwidth peer") - block, cmpct_block = announce_cmpct_block(node, peer, 1) - msg = msg_blocktxn() - msg.block_transactions.blockhash = block.sha256 - msg.block_transactions.transactions = block.vtx[1:] - peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - peer.clear_getblocktxn() - - # Test the simple parallel download case... - for num_missing in [1, 5, 20]: - - # Remaining low-bandwidth peer is stalling_peer, who announces first - assert_equal([peer['bip152_hb_to'] for peer in node.getpeerinfo()], [False, True, True, True]) - - block, cmpct_block = announce_cmpct_block(node, stalling_peer, num_missing) - - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - with p2p_lock: - # The second peer to announce should still get a getblocktxn - assert "getblocktxn" in delivery_peer.last_message - assert int(node.getbestblockhash(), 16) != block.sha256 - - inbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - with p2p_lock: - # The third inbound peer to announce should *not* get a getblocktxn - assert "getblocktxn" not in inbound_peer.last_message - assert int(node.getbestblockhash(), 16) != block.sha256 - - outbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) - with p2p_lock: - # The third peer to announce should get a getblocktxn if outbound - assert "getblocktxn" in outbound_peer.last_message - assert int(node.getbestblockhash(), 16) != block.sha256 - - # Second peer completes the compact block first - msg = msg_blocktxn() - msg.block_transactions.blockhash = block.sha256 - msg.block_transactions.transactions = block.vtx[1:] - delivery_peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), block.sha256) - - # Nothing bad should happen if we get a late fill from the first peer... - stalling_peer.send_and_ping(msg) - self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - - delivery_peer.clear_getblocktxn() - inbound_peer.clear_getblocktxn() - outbound_peer.clear_getblocktxn() - - - def run_test(self): - self.wallet = MiniWallet(self.nodes[0]) - - # Setup the p2p connections - self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn()) - self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn()) - self.onemore_inbound_node = self.nodes[0].add_p2p_connection(TestP2PConn()) - self.outbound_node = self.nodes[0].add_outbound_p2p_connection(TestP2PConn(), p2p_idx=3, connection_type="outbound-full-relay") - - # We will need UTXOs to construct transactions in later tests. - self.make_utxos() - - assert softfork_active(self.nodes[0], "segwit") - - self.log.info("Testing SENDCMPCT p2p message... ") - self.test_sendcmpct(self.segwit_node) - self.test_sendcmpct(self.additional_segwit_node) - self.test_sendcmpct(self.onemore_inbound_node) - self.test_sendcmpct(self.outbound_node) - - self.log.info("Testing compactblock construction...") - self.test_compactblock_construction(self.segwit_node) - - self.log.info("Testing compactblock requests (segwit node)... ") - self.test_compactblock_requests(self.segwit_node) - - self.log.info("Testing getblocktxn requests (segwit node)...") - self.test_getblocktxn_requests(self.segwit_node) - - self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...") - self.test_getblocktxn_handler(self.segwit_node) - - self.log.info("Testing compactblock requests/announcements not at chain tip...") - self.test_compactblocks_not_at_tip(self.segwit_node) - - self.log.info("Testing handling of low-work compact blocks...") - self.test_low_work_compactblocks(self.segwit_node) - - self.log.info("Testing handling of incorrect blocktxn responses...") - self.test_incorrect_blocktxn_response(self.segwit_node) - - self.log.info("Testing reconstructing compact blocks with a stalling peer...") - self.test_compactblock_reconstruction_stalling_peer(self.segwit_node, self.additional_segwit_node) - - self.log.info("Testing reconstructing compact blocks from multiple peers...") - self.test_compactblock_reconstruction_parallel_reconstruction(stalling_peer=self.segwit_node, inbound_peer=self.onemore_inbound_node, delivery_peer=self.additional_segwit_node, outbound_peer=self.outbound_node) - - # Test that if we submitblock to node1, we'll get a compact block - # announcement to all peers. - # (Post-segwit activation, blocks won't propagate from node0 to node1 - # automatically, so don't bother testing a block announced to node0.) - self.log.info("Testing end-to-end block relay...") - self.request_cb_announcements(self.segwit_node) - self.request_cb_announcements(self.additional_segwit_node) - self.test_end_to_end_block_relay([self.segwit_node, self.additional_segwit_node]) - - self.log.info("Testing handling of invalid compact blocks...") - self.test_invalid_tx_in_compactblock(self.segwit_node) - - self.log.info("Testing invalid index in cmpctblock message...") - self.test_invalid_cmpctblock_message() - - self.log.info("Testing high-bandwidth mode states via getpeerinfo...") - self.test_highbandwidth_mode_states_via_getpeerinfo() - - -if __name__ == '__main__': - CompactBlocksTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2016-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test compact blocks (BIP 152).""") +import random) +) +from test_framework.blocktools import () + COINBASE_MATURITY,) + NORMAL_GBT_REQUEST_PARAMS,) + add_witness_commitment,) + create_block,) +)) +from test_framework.messages import () + BlockTransactions,) + BlockTransactionsRequest,) + CBlock,) + CBlockHeader,) + CInv,) + COutPoint,) + CTransaction,) + CTxIn,) + CTxInWitness,) + CTxOut,) + from_hex,) + HeaderAndShortIDs,) + MSG_BLOCK,) + MSG_CMPCT_BLOCK,) + MSG_WITNESS_FLAG,) + P2PHeaderAndShortIDs,) + PrefilledTransaction,) + calculate_shortid,) + msg_block,) + msg_blocktxn,) + msg_cmpctblock,) + msg_getblocktxn,) + msg_getdata,) + msg_getheaders,) + msg_headers,) + msg_inv,) + msg_no_witness_block,) + msg_no_witness_blocktxn,) + msg_sendcmpct,) + msg_sendheaders,) + msg_tx,) + ser_uint256,) + tx_from_hex,) +)) +from test_framework.p2p import () + P2PInterface,) + p2p_lock,) +)) +from test_framework.script import () + CScript,) + OP_DROP,) + OP_TRUE,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + softfork_active,) +)) +from test_framework.wallet import MiniWallet) +) +) +# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.) +class TestP2PConn(P2PInterface):) + def __init__(self):) + super().__init__()) + self.last_sendcmpct = []) + self.block_announced = False) + # Store the hashes of blocks we've seen announced.) + # This is for synchronizing the p2p message traffic,) + # so we can eg wait until a particular block is announced.) + self.announced_blockhashes = set()) +) + def on_sendcmpct(self, message):) + self.last_sendcmpct.append(message)) +) + def on_cmpctblock(self, message):) + self.block_announced = True) + self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()) + self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)) +) + def on_headers(self, message):) + self.block_announced = True) + for x in self.last_message["headers"].headers:) + x.calc_sha256()) + self.announced_blockhashes.add(x.sha256)) +) + def on_inv(self, message):) + for x in self.last_message["inv"].inv:) + if x.type == MSG_BLOCK:) + self.block_announced = True) + self.announced_blockhashes.add(x.hash)) +) + # Requires caller to hold p2p_lock) + def received_block_announcement(self):) + return self.block_announced) +) + def clear_block_announcement(self):) + with p2p_lock:) + self.block_announced = False) + self.last_message.pop("inv", None)) + self.last_message.pop("headers", None)) + self.last_message.pop("cmpctblock", None)) +) + def clear_getblocktxn(self):) + with p2p_lock:) + self.last_message.pop("getblocktxn", None)) +) + def get_headers(self, locator, hashstop):) + msg = msg_getheaders()) + msg.locator.vHave = locator) + msg.hashstop = hashstop) + self.send_message(msg)) +) + def send_header_for_blocks(self, new_blocks):) + headers_message = msg_headers()) + headers_message.headers = [CBlockHeader(b) for b in new_blocks]) + self.send_message(headers_message)) +) + def request_headers_and_sync(self, locator, hashstop=0):) + self.clear_block_announcement()) + self.get_headers(locator, hashstop)) + self.wait_until(self.received_block_announcement, timeout=30)) + self.clear_block_announcement()) +) + # Block until a block announcement for a particular block hash is) + # received.) + def wait_for_block_announcement(self, block_hash, timeout=30):) + def received_hash():) + return (block_hash in self.announced_blockhashes)) + self.wait_until(received_hash, timeout=timeout)) +) + def send_await_disconnect(self, message, timeout=30):) + """Sends a message to the node and wait for disconnect.) +) + This is used when we want to send a message into the node that we expect) + will get us disconnected, eg an invalid block.""") + self.send_message(message)) + self.wait_for_disconnect(timeout=timeout)) +) +class CompactBlocksTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 1) + self.extra_args = [[) + "-acceptnonstdtxn=1",) + ]]) + self.utxos = []) +) + def build_block_on_tip(self, node):) + block = create_block(tmpl=node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS))) + block.solve()) + return block) +) + # Create 10 more anyone-can-spend utxo's for testing.) + def make_utxos(self):) + block = self.build_block_on_tip(self.nodes[0])) + self.segwit_node.send_and_ping(msg_no_witness_block(block))) + assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256) + self.generate(self.wallet, COINBASE_MATURITY)) +) + total_value = block.vtx[0].vout[0].nValue) + out_value = total_value // 10) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))) + for _ in range(10):) + tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))) + tx.rehash()) +) + block2 = self.build_block_on_tip(self.nodes[0])) + block2.vtx.append(tx)) + block2.hashMerkleRoot = block2.calc_merkle_root()) + block2.solve()) + self.segwit_node.send_and_ping(msg_no_witness_block(block2))) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)) + self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])) +) +) + # Test "sendcmpct" (between peers preferring the same version):) + # - No compact block announcements unless sendcmpct is sent.) + # - If sendcmpct is sent with version = 1, the message is ignored.) + # - If sendcmpct is sent with version > 2, the message is ignored.) + # - If sendcmpct is sent with boolean 0, then block announcements are not) + # made with compact blocks.) + # - If sendcmpct is then sent with boolean 1, then new block announcements) + # are made with compact blocks.) + def test_sendcmpct(self, test_node):) + node = self.nodes[0]) +) + # Make sure we get a SENDCMPCT message from our peer) + def received_sendcmpct():) + return (len(test_node.last_sendcmpct) > 0)) + test_node.wait_until(received_sendcmpct, timeout=30)) + with p2p_lock:) + # Check that version 2 is received.) + assert_equal(test_node.last_sendcmpct[0].version, 2)) + test_node.last_sendcmpct = []) +) + tip = int(node.getbestblockhash(), 16)) +) + def check_announcement_of_new_block(node, peer, predicate):) + peer.clear_block_announcement()) + block_hash = int(self.generate(node, 1)[0], 16)) + peer.wait_for_block_announcement(block_hash, timeout=30)) + assert peer.block_announced) +) + with p2p_lock:) + assert predicate(peer), () + "block_hash={!r}, cmpctblock={!r}, inv={!r}".format() + block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))) +) + # We shouldn't get any block announcements via cmpctblock yet.) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)) +) + # Try one more time, this time after requesting headers.) + test_node.request_headers_and_sync(locator=[tip])) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)) +) + # Test a few ways of using sendcmpct that should NOT) + # result in compact block announcements.) + # Before each test, sync the headers chain.) + test_node.request_headers_and_sync(locator=[tip])) +) + # Now try a SENDCMPCT message with too-low version) + test_node.send_and_ping(msg_sendcmpct(announce=True, version=1))) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)) +) + # Headers sync before next test.) + test_node.request_headers_and_sync(locator=[tip])) +) + # Now try a SENDCMPCT message with too-high version) + test_node.send_and_ping(msg_sendcmpct(announce=True, version=3))) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)) +) + # Headers sync before next test.) + test_node.request_headers_and_sync(locator=[tip])) +) + # Now try a SENDCMPCT message with valid version, but announce=False) + test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)) +) + # Headers sync before next test.) + test_node.request_headers_and_sync(locator=[tip])) +) + # Finally, try a SENDCMPCT message with announce=True) + test_node.send_and_ping(msg_sendcmpct(announce=True, version=2))) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)) +) + # Try one more time (no headers sync should be needed!)) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)) +) + # Try one more time, after turning on sendheaders) + test_node.send_and_ping(msg_sendheaders())) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)) +) + # Try one more time, after sending a version=1, announce=false message.) + test_node.send_and_ping(msg_sendcmpct(announce=False, version=1))) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)) +) + # Now turn off announcements) + test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))) + check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)) +) + # This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.) + def test_invalid_cmpctblock_message(self):) + self.generate(self.nodes[0], COINBASE_MATURITY + 1)) + block = self.build_block_on_tip(self.nodes[0])) +) + cmpct_block = P2PHeaderAndShortIDs()) + cmpct_block.header = CBlockHeader(block)) + cmpct_block.prefilled_txn_length = 1) + # This index will be too high) + prefilled_txn = PrefilledTransaction(1, block.vtx[0])) + cmpct_block.prefilled_txn = [prefilled_txn]) + self.segwit_node.send_await_disconnect(msg_cmpctblock(cmpct_block))) + assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)) +) + # Compare the generated shortids to what we expect based on BIP 152, given) + # bitcoind's choice of nonce.) + def test_compactblock_construction(self, test_node):) + node = self.nodes[0]) + # Generate a bunch of transactions.) + self.generate(node, COINBASE_MATURITY + 1)) + num_transactions = 25) +) + segwit_tx_generated = False) + for _ in range(num_transactions):) + hex_tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['hex']) + tx = tx_from_hex(hex_tx)) + if not tx.wit.is_null():) + segwit_tx_generated = True) +) + assert segwit_tx_generated # check that our test is not broken) +) + # Wait until we've seen the block announcement for the resulting tip) + tip = int(node.getbestblockhash(), 16)) + test_node.wait_for_block_announcement(tip)) +) + # Make sure we will receive a fast-announce compact block) + self.request_cb_announcements(test_node)) +) + # Now mine a block, and look at the resulting compact block.) + test_node.clear_block_announcement()) + block_hash = int(self.generate(node, 1)[0], 16)) +) + # Store the raw block in our internal format.) + block = from_hex(CBlock(), node.getblock("%064x" % block_hash, False))) + for tx in block.vtx:) + tx.calc_sha256()) + block.rehash()) +) + # Wait until the block was announced (via compact blocks)) + test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)) +) + # Now fetch and check the compact block) + header_and_shortids = None) + with p2p_lock:) + # Convert the on-the-wire representation to absolute indexes) + header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)) + self.check_compactblock_construction_from_block(header_and_shortids, block_hash, block)) +) + # Now fetch the compact block using a normal non-announce getdata) + test_node.clear_block_announcement()) + inv = CInv(MSG_CMPCT_BLOCK, block_hash)) + test_node.send_message(msg_getdata([inv]))) +) + test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)) +) + # Now fetch and check the compact block) + header_and_shortids = None) + with p2p_lock:) + # Convert the on-the-wire representation to absolute indexes) + header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)) + self.check_compactblock_construction_from_block(header_and_shortids, block_hash, block)) +) + def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block):) + # Check that we got the right block!) + header_and_shortids.header.calc_sha256()) + assert_equal(header_and_shortids.header.sha256, block_hash)) +) + # Make sure the prefilled_txn appears to have included the coinbase) + assert len(header_and_shortids.prefilled_txn) >= 1) + assert_equal(header_and_shortids.prefilled_txn[0].index, 0)) +) + # Check that all prefilled_txn entries match what's in the block.) + for entry in header_and_shortids.prefilled_txn:) + entry.tx.calc_sha256()) + # This checks the non-witness parts of the tx agree) + assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)) +) + # And this checks the witness) + wtxid = entry.tx.calc_sha256(True)) + assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))) +) + # Check that the cmpctblock message announced all the transactions.) + assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))) +) + # And now check that all the shortids are as expected as well.) + # Determine the siphash keys to use.) + [k0, k1] = header_and_shortids.get_siphash_keys()) +) + index = 0) + while index < len(block.vtx):) + if (len(header_and_shortids.prefilled_txn) > 0 and) + header_and_shortids.prefilled_txn[0].index == index):) + # Already checked prefilled transactions above) + header_and_shortids.prefilled_txn.pop(0)) + else:) + tx_hash = block.vtx[index].calc_sha256(True)) + shortid = calculate_shortid(k0, k1, tx_hash)) + assert_equal(shortid, header_and_shortids.shortids[0])) + header_and_shortids.shortids.pop(0)) + index += 1) +) + # Test that bitcoind requests compact blocks when we announce new blocks) + # via header or inv, and that responding to getblocktxn causes the block) + # to be successfully reconstructed.) + def test_compactblock_requests(self, test_node):) + node = self.nodes[0]) + # Try announcing a block with an inv or header, expect a compactblock) + # request) + for announce in ["inv", "header"]:) + block = self.build_block_on_tip(node)) +) + if announce == "inv":) + test_node.send_message(msg_inv([CInv(MSG_BLOCK, block.sha256)]))) + test_node.wait_for_getheaders(timeout=30)) + test_node.send_header_for_blocks([block])) + else:) + test_node.send_header_for_blocks([block])) + test_node.wait_for_getdata([block.sha256], timeout=30)) + assert_equal(test_node.last_message["getdata"].inv[0].type, 4)) +) + # Send back a compactblock message that omits the coinbase) + comp_block = HeaderAndShortIDs()) + comp_block.header = CBlockHeader(block)) + comp_block.nonce = 0) + [k0, k1] = comp_block.get_siphash_keys()) + coinbase_hash = block.vtx[0].calc_sha256(True)) + comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))) + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)) + # Expect a getblocktxn message.) + with p2p_lock:) + assert "getblocktxn" in test_node.last_message) + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()) + assert_equal(absolute_indexes, [0]) # should be a coinbase request) +) + # Send the coinbase, and verify that the tip advances.) + msg = msg_blocktxn()) + msg.block_transactions.blockhash = block.sha256) + msg.block_transactions.transactions = [block.vtx[0]]) + test_node.send_and_ping(msg)) + assert_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + # Create a chain of transactions from given utxo, and add to a new block.) + def build_block_with_transactions(self, node, utxo, num_transactions):) + block = self.build_block_on_tip(node)) +) + for _ in range(num_transactions):) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))) + tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))) + tx.rehash()) + utxo = [tx.sha256, 0, tx.vout[0].nValue]) + block.vtx.append(tx)) +) + block.hashMerkleRoot = block.calc_merkle_root()) + block.solve()) + return block) +) + # Test that we only receive getblocktxn requests for transactions that the) + # node needs, and that responding to them causes the block to be) + # reconstructed.) + def test_getblocktxn_requests(self, test_node):) + node = self.nodes[0]) +) + def test_getblocktxn_response(compact_block, peer, expected_result):) + msg = msg_cmpctblock(compact_block.to_p2p())) + peer.send_and_ping(msg)) + with p2p_lock:) + assert "getblocktxn" in peer.last_message) + absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()) + assert_equal(absolute_indexes, expected_result)) +) + def test_tip_after_message(node, peer, msg, tip):) + peer.send_and_ping(msg)) + assert_equal(int(node.getbestblockhash(), 16), tip)) +) + # First try announcing compactblocks that won't reconstruct, and verify) + # that we receive getblocktxn messages back.) + utxo = self.utxos.pop(0)) +) + block = self.build_block_with_transactions(node, utxo, 5)) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) + comp_block = HeaderAndShortIDs()) + comp_block.initialize_from_block(block, use_witness=True)) +) + test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])) +) + msg_bt = msg_no_witness_blocktxn()) + msg_bt = msg_blocktxn() # serialize with witnesses) + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])) + test_tip_after_message(node, test_node, msg_bt, block.sha256)) +) + utxo = self.utxos.pop(0)) + block = self.build_block_with_transactions(node, utxo, 5)) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) +) + # Now try interspersing the prefilled transactions) + comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=True)) + test_getblocktxn_response(comp_block, test_node, [2, 3, 4])) + msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])) + test_tip_after_message(node, test_node, msg_bt, block.sha256)) +) + # Now try giving one transaction ahead of time.) + utxo = self.utxos.pop(0)) + block = self.build_block_with_transactions(node, utxo, 5)) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) + test_node.send_and_ping(msg_tx(block.vtx[1]))) + assert block.vtx[1].hash in node.getrawmempool()) +) + # Prefill 4 out of the 6 transactions, and verify that only the one) + # that was not in the mempool is requested.) + comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=True)) + test_getblocktxn_response(comp_block, test_node, [5])) +) + msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])) + test_tip_after_message(node, test_node, msg_bt, block.sha256)) +) + # Now provide all transactions to the node before the block is) + # announced and verify reconstruction happens immediately.) + utxo = self.utxos.pop(0)) + block = self.build_block_with_transactions(node, utxo, 10)) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) + for tx in block.vtx[1:]:) + test_node.send_message(msg_tx(tx))) + test_node.sync_with_ping()) + # Make sure all transactions were accepted.) + mempool = node.getrawmempool()) + for tx in block.vtx[1:]:) + assert tx.hash in mempool) +) + # Clear out last request.) + with p2p_lock:) + test_node.last_message.pop("getblocktxn", None)) +) + # Send compact block) + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True)) + test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)) + with p2p_lock:) + # Shouldn't have gotten a request for any transaction) + assert "getblocktxn" not in test_node.last_message) +) + # Incorrectly responding to a getblocktxn shouldn't cause the block to be) + # permanently failed.) + def test_incorrect_blocktxn_response(self, test_node):) + node = self.nodes[0]) + utxo = self.utxos.pop(0)) +) + block = self.build_block_with_transactions(node, utxo, 10)) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) + # Relay the first 5 transactions from the block in advance) + for tx in block.vtx[1:6]:) + test_node.send_message(msg_tx(tx))) + test_node.sync_with_ping()) + # Make sure all transactions were accepted.) + mempool = node.getrawmempool()) + for tx in block.vtx[1:6]:) + assert tx.hash in mempool) +) + # Send compact block) + comp_block = HeaderAndShortIDs()) + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True)) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))) + absolute_indexes = []) + with p2p_lock:) + assert "getblocktxn" in test_node.last_message) + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()) + assert_equal(absolute_indexes, [6, 7, 8, 9, 10])) +) + # Now give an incorrect response.) + # Note that it's possible for bitcoind to be smart enough to know we're) + # lying, since it could check to see if the shortid matches what we're) + # sending, and eg disconnect us for misbehavior. If that behavior) + # change was made, we could just modify this test by having a) + # different peer provide the block further down, so that we're still) + # verifying that the block isn't marked bad permanently. This is good) + # enough for now.) + msg = msg_blocktxn()) + msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])) + test_node.send_and_ping(msg)) +) + # Tip should not have updated) + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)) +) + # We should receive a getdata request) + test_node.wait_for_getdata([block.sha256], timeout=10)) + assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \) + test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG) +) + # Deliver the block) + test_node.send_and_ping(msg_block(block))) + assert_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + def test_getblocktxn_handler(self, test_node):) + node = self.nodes[0]) + # bitcoind will not send blocktxn responses for blocks whose height is) + # more than 10 blocks deep.) + MAX_GETBLOCKTXN_DEPTH = 10) + chain_height = node.getblockcount()) + current_height = chain_height) + while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):) + block_hash = node.getblockhash(current_height)) + block = from_hex(CBlock(), node.getblock(block_hash, False))) +) + msg = msg_getblocktxn()) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])) + num_to_request = random.randint(1, len(block.vtx))) + msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))) + test_node.send_message(msg)) + test_node.wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10)) +) + [tx.calc_sha256() for tx in block.vtx]) + with p2p_lock:) + assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))) + all_indices = msg.block_txn_request.to_absolute()) + for index in all_indices:) + tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)) + tx.calc_sha256()) + assert_equal(tx.sha256, block.vtx[index].sha256)) + # Check that the witness matches) + assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))) + test_node.last_message.pop("blocktxn", None)) + current_height -= 1) +) + # Next request should send a full block response, as we're past the) + # allowed depth for a blocktxn response.) + block_hash = node.getblockhash(current_height)) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])) + with p2p_lock:) + test_node.last_message.pop("block", None)) + test_node.last_message.pop("blocktxn", None)) + test_node.send_and_ping(msg)) + with p2p_lock:) + test_node.last_message["block"].block.calc_sha256()) + assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))) + assert "blocktxn" not in test_node.last_message) +) + # Request with out-of-bounds tx index results in disconnect) + bad_peer = self.nodes[0].add_p2p_connection(TestP2PConn())) + block_hash = node.getblockhash(chain_height)) + block = from_hex(CBlock(), node.getblock(block_hash, False))) + msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [len(block.vtx)])) + with node.assert_debug_log(['getblocktxn with out-of-bounds tx indices']):) + bad_peer.send_message(msg)) + bad_peer.wait_for_disconnect()) +) + def test_low_work_compactblocks(self, test_node):) + # A compactblock with insufficient work won't get its header included) + node = self.nodes[0]) + hashPrevBlock = int(node.getblockhash(node.getblockcount() - 150), 16)) + block = self.build_block_on_tip(node)) + block.hashPrevBlock = hashPrevBlock) + block.solve()) +) + comp_block = HeaderAndShortIDs()) + comp_block.initialize_from_block(block)) + with self.nodes[0].assert_debug_log(['[net] Ignoring low-work compact block from peer 0']):) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))) +) + tips = node.getchaintips()) + found = False) + for x in tips:) + if x["hash"] == block.hash:) + found = True) + break) + assert not found) +) + def test_compactblocks_not_at_tip(self, test_node):) + node = self.nodes[0]) + # Test that requesting old compactblocks doesn't work.) + MAX_CMPCTBLOCK_DEPTH = 5) + new_blocks = []) + for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):) + test_node.clear_block_announcement()) + new_blocks.append(self.generate(node, 1)[0])) + test_node.wait_until(test_node.received_block_announcement, timeout=30)) +) + test_node.clear_block_announcement()) + test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))) + test_node.wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)) +) + test_node.clear_block_announcement()) + self.generate(node, 1)) + test_node.wait_until(test_node.received_block_announcement, timeout=30)) + test_node.clear_block_announcement()) + with p2p_lock:) + test_node.last_message.pop("block", None)) + test_node.send_message(msg_getdata([CInv(MSG_CMPCT_BLOCK, int(new_blocks[0], 16))]))) + test_node.wait_until(lambda: "block" in test_node.last_message, timeout=30)) + with p2p_lock:) + test_node.last_message["block"].block.calc_sha256()) + assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))) +) + # Generate an old compactblock, and verify that it's not accepted.) + cur_height = node.getblockcount()) + hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)) + block = self.build_block_on_tip(node)) + block.hashPrevBlock = hashPrevBlock) + block.solve()) +) + comp_block = HeaderAndShortIDs()) + comp_block.initialize_from_block(block)) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))) +) + tips = node.getchaintips()) + found = False) + for x in tips:) + if x["hash"] == block.hash:) + assert_equal(x["status"], "headers-only")) + found = True) + break) + assert found) +) + # Requesting this block via getblocktxn should silently fail) + # (to avoid fingerprinting attacks).) + msg = msg_getblocktxn()) + msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])) + with p2p_lock:) + test_node.last_message.pop("blocktxn", None)) + test_node.send_and_ping(msg)) + with p2p_lock:) + assert "blocktxn" not in test_node.last_message) +) + def test_end_to_end_block_relay(self, listeners):) + node = self.nodes[0]) + utxo = self.utxos.pop(0)) +) + block = self.build_block_with_transactions(node, utxo, 10)) +) + [l.clear_block_announcement() for l in listeners]) +) + # serialize without witness (this block has no witnesses anyway).) + # TODO: repeat this test with witness tx's to a segwit node.) + node.submitblock(block.serialize().hex())) +) + for l in listeners:) + l.wait_until(lambda: "cmpctblock" in l.last_message, timeout=30)) + with p2p_lock:) + for l in listeners:) + l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()) + assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)) +) + # Test that we don't get disconnected if we relay a compact block with valid header,) + # but invalid transactions.) + def test_invalid_tx_in_compactblock(self, test_node):) + node = self.nodes[0]) + assert len(self.utxos)) + utxo = self.utxos[0]) +) + block = self.build_block_with_transactions(node, utxo, 5)) + del block.vtx[3]) + block.hashMerkleRoot = block.calc_merkle_root()) + # Drop the coinbase witness but include the witness commitment.) + add_witness_commitment(block)) + block.vtx[0].wit.vtxinwit = []) + block.solve()) +) + # Now send the compact block with all transactions prefilled, and) + # verify that we don't get disconnected.) + comp_block = HeaderAndShortIDs()) + comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=True)) + msg = msg_cmpctblock(comp_block.to_p2p())) + test_node.send_and_ping(msg)) +) + # Check that the tip didn't advance) + assert int(node.getbestblockhash(), 16) is not block.sha256) + test_node.sync_with_ping()) +) + # Helper for enabling cb announcements) + # Send the sendcmpct request and sync headers) + def request_cb_announcements(self, peer):) + node = self.nodes[0]) + tip = node.getbestblockhash()) + peer.get_headers(locator=[int(tip, 16)], hashstop=0)) + peer.send_and_ping(msg_sendcmpct(announce=True, version=2))) +) + def test_compactblock_reconstruction_stalling_peer(self, stalling_peer, delivery_peer):) + node = self.nodes[0]) + assert len(self.utxos)) +) + def announce_cmpct_block(node, peer):) + utxo = self.utxos.pop(0)) + block = self.build_block_with_transactions(node, utxo, 5)) +) + cmpct_block = HeaderAndShortIDs()) + cmpct_block.initialize_from_block(block)) + msg = msg_cmpctblock(cmpct_block.to_p2p())) + peer.send_and_ping(msg)) + with p2p_lock:) + assert "getblocktxn" in peer.last_message) + return block, cmpct_block) +) + block, cmpct_block = announce_cmpct_block(node, stalling_peer)) +) + for tx in block.vtx[1:]:) + delivery_peer.send_message(msg_tx(tx))) + delivery_peer.sync_with_ping()) + mempool = node.getrawmempool()) + for tx in block.vtx[1:]:) + assert tx.hash in mempool) +) + delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))) + assert_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) +) + # Now test that delivering an invalid compact block won't break relay) +) + block, cmpct_block = announce_cmpct_block(node, stalling_peer)) + for tx in block.vtx[1:]:) + delivery_peer.send_message(msg_tx(tx))) + delivery_peer.sync_with_ping()) +) + cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]) + cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]) +) + cmpct_block.use_witness = True) + delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))) + assert_not_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + msg = msg_no_witness_blocktxn()) + msg.block_transactions.blockhash = block.sha256) + msg.block_transactions.transactions = block.vtx[1:]) + stalling_peer.send_and_ping(msg)) + assert_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + def test_highbandwidth_mode_states_via_getpeerinfo(self):) + # create new p2p connection for a fresh state w/o any prior sendcmpct messages sent) + hb_test_node = self.nodes[0].add_p2p_connection(TestP2PConn())) +) + # assert the RPC getpeerinfo boolean fields `bip152_hb_{to, from}`) + # match the given parameters for the last peer of a given node) + def assert_highbandwidth_states(node, hb_to, hb_from):) + peerinfo = node.getpeerinfo()[-1]) + assert_equal(peerinfo['bip152_hb_to'], hb_to)) + assert_equal(peerinfo['bip152_hb_from'], hb_from)) +) + # initially, neither node has selected the other peer as high-bandwidth yet) + assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=False)) +) + # peer requests high-bandwidth mode by sending sendcmpct(1)) + hb_test_node.send_and_ping(msg_sendcmpct(announce=True, version=2))) + assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=True)) +) + # peer generates a block and sends it to node, which should) + # select the peer as high-bandwidth (up to 3 peers according to BIP 152)) + block = self.build_block_on_tip(self.nodes[0])) + hb_test_node.send_and_ping(msg_block(block))) + assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=True)) +) + # peer requests low-bandwidth mode by sending sendcmpct(0)) + hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))) + assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False)) +) + def test_compactblock_reconstruction_parallel_reconstruction(self, stalling_peer, delivery_peer, inbound_peer, outbound_peer):) + """ All p2p connections are inbound except outbound_peer. We test that ultimate parallel slot) + can only be taken by an outbound node unless prior attempts were done by an outbound) + """) + node = self.nodes[0]) + assert len(self.utxos)) +) + def announce_cmpct_block(node, peer, txn_count):) + utxo = self.utxos.pop(0)) + block = self.build_block_with_transactions(node, utxo, txn_count)) +) + cmpct_block = HeaderAndShortIDs()) + cmpct_block.initialize_from_block(block)) + msg = msg_cmpctblock(cmpct_block.to_p2p())) + peer.send_and_ping(msg)) + with p2p_lock:) + assert "getblocktxn" in peer.last_message) + return block, cmpct_block) +) + for name, peer in [("delivery", delivery_peer), ("inbound", inbound_peer), ("outbound", outbound_peer)]:) + self.log.info(f"Setting {name} as high bandwidth peer")) + block, cmpct_block = announce_cmpct_block(node, peer, 1)) + msg = msg_blocktxn()) + msg.block_transactions.blockhash = block.sha256) + msg.block_transactions.transactions = block.vtx[1:]) + peer.send_and_ping(msg)) + assert_equal(int(node.getbestblockhash(), 16), block.sha256)) + peer.clear_getblocktxn()) +) + # Test the simple parallel download case...) + for num_missing in [1, 5, 20]:) +) + # Remaining low-bandwidth peer is stalling_peer, who announces first) + assert_equal([peer['bip152_hb_to'] for peer in node.getpeerinfo()], [False, True, True, True])) +) + block, cmpct_block = announce_cmpct_block(node, stalling_peer, num_missing)) +) + delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))) + with p2p_lock:) + # The second peer to announce should still get a getblocktxn) + assert "getblocktxn" in delivery_peer.last_message) + assert_not_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + inbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))) + with p2p_lock:) + # The third inbound peer to announce should *not* get a getblocktxn) + assert "getblocktxn" not in inbound_peer.last_message) + assert_not_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + outbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))) + with p2p_lock:) + # The third peer to announce should get a getblocktxn if outbound) + assert "getblocktxn" in outbound_peer.last_message) + assert_not_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + # Second peer completes the compact block first) + msg = msg_blocktxn()) + msg.block_transactions.blockhash = block.sha256) + msg.block_transactions.transactions = block.vtx[1:]) + delivery_peer.send_and_ping(msg)) + assert_equal(int(node.getbestblockhash(), 16), block.sha256)) +) + # Nothing bad should happen if we get a late fill from the first peer...) + stalling_peer.send_and_ping(msg)) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])) +) + delivery_peer.clear_getblocktxn()) + inbound_peer.clear_getblocktxn()) + outbound_peer.clear_getblocktxn()) +) +) + def run_test(self):) + self.wallet = MiniWallet(self.nodes[0])) +) + # Setup the p2p connections) + self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())) + self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())) + self.onemore_inbound_node = self.nodes[0].add_p2p_connection(TestP2PConn())) + self.outbound_node = self.nodes[0].add_outbound_p2p_connection(TestP2PConn(), p2p_idx=3, connection_type="outbound-full-relay")) +) + # We will need UTXOs to construct transactions in later tests.) + self.make_utxos()) +) + assert softfork_active(self.nodes[0], "segwit")) +) + self.log.info("Testing SENDCMPCT p2p message... ")) + self.test_sendcmpct(self.segwit_node)) + self.test_sendcmpct(self.additional_segwit_node)) + self.test_sendcmpct(self.onemore_inbound_node)) + self.test_sendcmpct(self.outbound_node)) +) + self.log.info("Testing compactblock construction...")) + self.test_compactblock_construction(self.segwit_node)) +) + self.log.info("Testing compactblock requests (segwit node)... ")) + self.test_compactblock_requests(self.segwit_node)) +) + self.log.info("Testing getblocktxn requests (segwit node)...")) + self.test_getblocktxn_requests(self.segwit_node)) +) + self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")) + self.test_getblocktxn_handler(self.segwit_node)) +) + self.log.info("Testing compactblock requests/announcements not at chain tip...")) + self.test_compactblocks_not_at_tip(self.segwit_node)) +) + self.log.info("Testing handling of low-work compact blocks...")) + self.test_low_work_compactblocks(self.segwit_node)) +) + self.log.info("Testing handling of incorrect blocktxn responses...")) + self.test_incorrect_blocktxn_response(self.segwit_node)) +) + self.log.info("Testing reconstructing compact blocks with a stalling peer...")) + self.test_compactblock_reconstruction_stalling_peer(self.segwit_node, self.additional_segwit_node)) +) + self.log.info("Testing reconstructing compact blocks from multiple peers...")) + self.test_compactblock_reconstruction_parallel_reconstruction(stalling_peer=self.segwit_node, inbound_peer=self.onemore_inbound_node, delivery_peer=self.additional_segwit_node, outbound_peer=self.outbound_node)) +) + # Test that if we submitblock to node1, we'll get a compact block) + # announcement to all peers.) + # (Post-segwit activation, blocks won't propagate from node0 to node1) + # automatically, so don't bother testing a block announced to node0.)) + self.log.info("Testing end-to-end block relay...")) + self.request_cb_announcements(self.segwit_node)) + self.request_cb_announcements(self.additional_segwit_node)) + self.test_end_to_end_block_relay([self.segwit_node, self.additional_segwit_node])) +) + self.log.info("Testing handling of invalid compact blocks...")) + self.test_invalid_tx_in_compactblock(self.segwit_node)) +) + self.log.info("Testing invalid index in cmpctblock message...")) + self.test_invalid_cmpctblock_message()) +) + self.log.info("Testing high-bandwidth mode states via getpeerinfo...")) + self.test_highbandwidth_mode_states_via_getpeerinfo()) +) +) +if __name__ == '__main__':) + CompactBlocksTest(__file__).main()) diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py index 3dce1c5d946082..5955f963ddc185 100755 --- a/test/functional/p2p_getaddr_caching.py +++ b/test/functional/p2p_getaddr_caching.py @@ -1,123 +1,123 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test addr response caching""" - -import time - -from test_framework.p2p import ( - P2PInterface, - p2p_lock -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - p2p_port, -) - -# As defined in net_processing. -MAX_ADDR_TO_SEND = 1000 -MAX_PCT_ADDR_TO_SEND = 23 - - -class AddrReceiver(P2PInterface): - - def __init__(self): - super().__init__() - self.received_addrs = None - - def get_received_addrs(self): - with p2p_lock: - return self.received_addrs - - def on_addr(self, message): - self.received_addrs = [] - for addr in message.addrs: - self.received_addrs.append(addr.ip) - - def addr_received(self): - return self.received_addrs is not None - - -class AddrTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - # Use some of the remaining p2p ports for the onion binds. - self.onion_port1 = p2p_port(self.num_nodes) - self.onion_port2 = p2p_port(self.num_nodes + 1) - self.extra_args = [ - [f"-bind=127.0.0.1:{self.onion_port1}=onion", f"-bind=127.0.0.1:{self.onion_port2}=onion"], - ] - - def run_test(self): - self.log.info('Fill peer AddrMan with a lot of records') - for i in range(10000): - first_octet = i >> 8 - second_octet = i % 256 - a = "{}.{}.1.1".format(first_octet, second_octet) - self.nodes[0].addpeeraddress(a, 8333) - - # Need to make sure we hit MAX_ADDR_TO_SEND records in the addr response later because - # only a fraction of all known addresses can be cached and returned. - assert len(self.nodes[0].getnodeaddresses(0)) > int(MAX_ADDR_TO_SEND / (MAX_PCT_ADDR_TO_SEND / 100)) - - last_response_on_local_bind = None - last_response_on_onion_bind1 = None - last_response_on_onion_bind2 = None - self.log.info('Send many addr requests within short time to receive same response') - N = 5 - cur_mock_time = int(time.time()) - for i in range(N): - addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver()) - addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1) - addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2) - - # Trigger response - cur_mock_time += 5 * 60 - self.nodes[0].setmocktime(cur_mock_time) - addr_receiver_local.wait_until(addr_receiver_local.addr_received) - addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received) - addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received) - - if i > 0: - # Responses from different binds should be unique - assert last_response_on_local_bind != addr_receiver_onion1.get_received_addrs() - assert last_response_on_local_bind != addr_receiver_onion2.get_received_addrs() - assert last_response_on_onion_bind1 != addr_receiver_onion2.get_received_addrs() - # Responses on from the same bind should be the same - assert_equal(last_response_on_local_bind, addr_receiver_local.get_received_addrs()) - assert_equal(last_response_on_onion_bind1, addr_receiver_onion1.get_received_addrs()) - assert_equal(last_response_on_onion_bind2, addr_receiver_onion2.get_received_addrs()) - - last_response_on_local_bind = addr_receiver_local.get_received_addrs() - last_response_on_onion_bind1 = addr_receiver_onion1.get_received_addrs() - last_response_on_onion_bind2 = addr_receiver_onion2.get_received_addrs() - - for response in [last_response_on_local_bind, last_response_on_onion_bind1, last_response_on_onion_bind2]: - assert_equal(len(response), MAX_ADDR_TO_SEND) - - cur_mock_time += 3 * 24 * 60 * 60 - self.nodes[0].setmocktime(cur_mock_time) - - self.log.info('After time passed, see a new response to addr request') - addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver()) - addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1) - addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2) - - # Trigger response - cur_mock_time += 5 * 60 - self.nodes[0].setmocktime(cur_mock_time) - addr_receiver_local.wait_until(addr_receiver_local.addr_received) - addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received) - addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received) - - # new response is different - assert set(last_response_on_local_bind) != set(addr_receiver_local.get_received_addrs()) - assert set(last_response_on_onion_bind1) != set(addr_receiver_onion1.get_received_addrs()) - assert set(last_response_on_onion_bind2) != set(addr_receiver_onion2.get_received_addrs()) - - -if __name__ == '__main__': - AddrTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2020-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test addr response caching""") +) +import time) +) +from test_framework.p2p import () + P2PInterface,) + p2p_lock) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + p2p_port,) +)) +) +# As defined in net_processing.) +MAX_ADDR_TO_SEND = 1000) +MAX_PCT_ADDR_TO_SEND = 23) +) +) +class AddrReceiver(P2PInterface):) +) + def __init__(self):) + super().__init__()) + self.received_addrs = None) +) + def get_received_addrs(self):) + with p2p_lock:) + return self.received_addrs) +) + def on_addr(self, message):) + self.received_addrs = []) + for addr in message.addrs:) + self.received_addrs.append(addr.ip)) +) + def addr_received(self):) + return self.received_addrs is not None) +) +) +class AddrTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + # Use some of the remaining p2p ports for the onion binds.) + self.onion_port1 = p2p_port(self.num_nodes)) + self.onion_port2 = p2p_port(self.num_nodes + 1)) + self.extra_args = [) + [f"-bind=127.0.0.1:{self.onion_port1}=onion", f"-bind=127.0.0.1:{self.onion_port2}=onion"],) + ]) +) + def run_test(self):) + self.log.info('Fill peer AddrMan with a lot of records')) + for i in range(10000):) + first_octet = i >> 8) + second_octet = i % 256) + a = "{}.{}.1.1".format(first_octet, second_octet)) + self.nodes[0].addpeeraddress(a, 8333)) +) + # Need to make sure we hit MAX_ADDR_TO_SEND records in the addr response later because) + # only a fraction of all known addresses can be cached and returned.) + assert len(self.nodes[0].getnodeaddresses(0)) > int(MAX_ADDR_TO_SEND / (MAX_PCT_ADDR_TO_SEND / 100))) +) + last_response_on_local_bind = None) + last_response_on_onion_bind1 = None) + last_response_on_onion_bind2 = None) + self.log.info('Send many addr requests within short time to receive same response')) + N = 5) + cur_mock_time = int(time.time())) + for i in range(N):) + addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver())) + addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1)) + addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2)) +) + # Trigger response) + cur_mock_time += 5 * 60) + self.nodes[0].setmocktime(cur_mock_time)) + addr_receiver_local.wait_until(addr_receiver_local.addr_received)) + addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received)) + addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received)) +) + if i > 0:) + # Responses from different binds should be unique) + assert_not_equal(last_response_on_local_bind, addr_receiver_onion1.get_received_addrs())) + assert_not_equal(last_response_on_local_bind, addr_receiver_onion2.get_received_addrs())) + assert_not_equal(last_response_on_onion_bind1, addr_receiver_onion2.get_received_addrs())) + # Responses on from the same bind should be the same) + assert_equal(last_response_on_local_bind, addr_receiver_local.get_received_addrs())) + assert_equal(last_response_on_onion_bind1, addr_receiver_onion1.get_received_addrs())) + assert_equal(last_response_on_onion_bind2, addr_receiver_onion2.get_received_addrs())) +) + last_response_on_local_bind = addr_receiver_local.get_received_addrs()) + last_response_on_onion_bind1 = addr_receiver_onion1.get_received_addrs()) + last_response_on_onion_bind2 = addr_receiver_onion2.get_received_addrs()) +) + for response in [last_response_on_local_bind, last_response_on_onion_bind1, last_response_on_onion_bind2]:) + assert_equal(len(response), MAX_ADDR_TO_SEND)) +) + cur_mock_time += 3 * 24 * 60 * 60) + self.nodes[0].setmocktime(cur_mock_time)) +) + self.log.info('After time passed, see a new response to addr request')) + addr_receiver_local = self.nodes[0].add_p2p_connection(AddrReceiver())) + addr_receiver_onion1 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port1)) + addr_receiver_onion2 = self.nodes[0].add_p2p_connection(AddrReceiver(), dstport=self.onion_port2)) +) + # Trigger response) + cur_mock_time += 5 * 60) + self.nodes[0].setmocktime(cur_mock_time)) + addr_receiver_local.wait_until(addr_receiver_local.addr_received)) + addr_receiver_onion1.wait_until(addr_receiver_onion1.addr_received)) + addr_receiver_onion2.wait_until(addr_receiver_onion2.addr_received)) +) + # new response is different) + assert_not_equal(set(last_response_on_local_bind), set(addr_receiver_local.get_received_addrs()))) + assert_not_equal(set(last_response_on_onion_bind1), set(addr_receiver_onion1.get_received_addrs()))) + assert_not_equal(set(last_response_on_onion_bind2), set(addr_receiver_onion2.get_received_addrs()))) +) +) +if __name__ == '__main__':) + AddrTest(__file__).main()) diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index 9a9630d5ac6b67..47b2f500bb1ae3 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -1,101 +1,101 @@ -#!/usr/bin/env python3 -# Copyright (c) 2024 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -""" -Test P2P behaviour during the handshake phase (VERSION, VERACK messages). -""" -import itertools -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_not_equal -from test_framework.messages import ( - NODE_NETWORK, - NODE_NETWORK_LIMITED, - NODE_NONE, - NODE_P2P_V2, - NODE_WITNESS, +#!/usr/bin/env python3) +# Copyright (c) 2024 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +""") +Test P2P behaviour during the handshake phase (VERSION, VERACK messages).) +""") +import itertools) +import time) ) -from test_framework.p2p import P2PInterface -from test_framework.util import p2p_port - - -# Desirable service flags for outbound non-pruned and pruned peers. Note that -# the desirable service flags for pruned peers are dynamic and only apply if -# 1. the peer's service flag NODE_NETWORK_LIMITED is set *and* -# 2. the local chain is close to the tip (<24h) -DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS -DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS - - -class P2PHandshakeTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - - def add_outbound_connection(self, node, connection_type, services, wait_for_disconnect): - peer = node.add_outbound_p2p_connection( - P2PInterface(), p2p_idx=0, wait_for_disconnect=wait_for_disconnect, - connection_type=connection_type, services=services, - supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport) - if not wait_for_disconnect: - # check that connection is alive past the version handshake and disconnect manually - peer.sync_with_ping() - peer.peer_disconnect() - peer.wait_for_disconnect() - self.wait_until(lambda: len(node.getpeerinfo()) == 0) - - def test_desirable_service_flags(self, node, service_flag_tests, desirable_service_flags, expect_disconnect): - """Check that connecting to a peer either fails or succeeds depending on its offered - service flags in the VERSION message. The test is exercised for all relevant - outbound connection types where the desirable service flags check is done.""" - CONNECTION_TYPES = ["outbound-full-relay", "block-relay-only", "addr-fetch"] - for conn_type, services in itertools.product(CONNECTION_TYPES, service_flag_tests): - if self.options.v2transport: - services |= NODE_P2P_V2 - expected_result = "disconnect" if expect_disconnect else "connect" - self.log.info(f' - services 0x{services:08x}, type "{conn_type}" [{expected_result}]') - if expect_disconnect: - assert (services & desirable_service_flags) != desirable_service_flags - expected_debug_log = f'does not offer the expected services ' \ - f'({services:08x} offered, {desirable_service_flags:08x} expected)' - with node.assert_debug_log([expected_debug_log]): - self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=True) - else: - assert (services & desirable_service_flags) == desirable_service_flags - self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False) - - def generate_at_mocktime(self, time): - self.nodes[0].setmocktime(time) - self.generate(self.nodes[0], 1) - self.nodes[0].setmocktime(0) - - def run_test(self): - node = self.nodes[0] - self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") - self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], - DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) - self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS], - DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False) - - self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)") - self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail - self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], - DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) - self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed - self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], - DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) - - self.log.info("Check that feeler connections get disconnected immediately") - with node.assert_debug_log([f"feeler connection completed"]): - self.add_outbound_connection(node, "feeler", NODE_NONE, wait_for_disconnect=True) - - self.log.info("Check that connecting to ourself leads to immediate disconnect") - with node.assert_debug_log(["connected to self", "disconnecting"]): - node_listen_addr = f"127.0.0.1:{p2p_port(0)}" - node.addconnection(node_listen_addr, "outbound-full-relay", self.options.v2transport) - self.wait_until(lambda: len(node.getpeerinfo()) == 0) - - -if __name__ == '__main__': - P2PHandshakeTest(__file__).main() +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import assert_not_equal) +from test_framework.messages import () + NODE_NETWORK,) + NODE_NETWORK_LIMITED,) + NODE_NONE,) + NODE_P2P_V2,) + NODE_WITNESS,) +)) +from test_framework.p2p import P2PInterface) +from test_framework.util import p2p_port) +) +) +# Desirable service flags for outbound non-pruned and pruned peers. Note that) +# the desirable service flags for pruned peers are dynamic and only apply if) +# 1. the peer's service flag NODE_NETWORK_LIMITED is set *and*) +# 2. the local chain is close to the tip (<24h)) +DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS) +DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS) +) +) +class P2PHandshakeTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) +) + def add_outbound_connection(self, node, connection_type, services, wait_for_disconnect):) + peer = node.add_outbound_p2p_connection() + P2PInterface(), p2p_idx=0, wait_for_disconnect=wait_for_disconnect,) + connection_type=connection_type, services=services,) + supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport)) + if not wait_for_disconnect:) + # check that connection is alive past the version handshake and disconnect manually) + peer.sync_with_ping()) + peer.peer_disconnect()) + peer.wait_for_disconnect()) + self.wait_until(lambda: len(node.getpeerinfo()) == 0)) +) + def test_desirable_service_flags(self, node, service_flag_tests, desirable_service_flags, expect_disconnect):) + """Check that connecting to a peer either fails or succeeds depending on its offered) + service flags in the VERSION message. The test is exercised for all relevant) + outbound connection types where the desirable service flags check is done.""") + CONNECTION_TYPES = ["outbound-full-relay", "block-relay-only", "addr-fetch"]) + for conn_type, services in itertools.product(CONNECTION_TYPES, service_flag_tests):) + if self.options.v2transport:) + services |= NODE_P2P_V2) + expected_result = "disconnect" if expect_disconnect else "connect") + self.log.info(f' - services 0x{services:08x}, type "{conn_type}" [{expected_result}]')) + if expect_disconnect:) + assert_not_equal((services & desirable_service_flags), desirable_service_flags)) + expected_debug_log = f'does not offer the expected services ' \) + f'({services:08x} offered, {desirable_service_flags:08x} expected)') + with node.assert_debug_log([expected_debug_log]):) + self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=True)) + else:) + assert (services & desirable_service_flags) == desirable_service_flags) + self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False)) +) + def generate_at_mocktime(self, time):) + self.nodes[0].setmocktime(time)) + self.generate(self.nodes[0], 1)) + self.nodes[0].setmocktime(0)) +) + def run_test(self):) + node = self.nodes[0]) + self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)")) + self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS],) + DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True)) + self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS],) + DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False)) +) + self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)")) + self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail) + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS],) + DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True)) + self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed) + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS],) + DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False)) +) + self.log.info("Check that feeler connections get disconnected immediately")) + with node.assert_debug_log([f"feeler connection completed"]):) + self.add_outbound_connection(node, "feeler", NODE_NONE, wait_for_disconnect=True)) +) + self.log.info("Check that connecting to ourself leads to immediate disconnect")) + with node.assert_debug_log(["connected to self", "disconnecting"]):) + node_listen_addr = f"127.0.0.1:{p2p_port(0)}") + node.addconnection(node_listen_addr, "outbound-full-relay", self.options.v2transport)) + self.wait_until(lambda: len(node.getpeerinfo()) == 0)) +) +) +if __name__ == '__main__':) + P2PHandshakeTest(__file__).main()) diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index 68307fd13de4e2..c443aca90bae0e 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -1,144 +1,144 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2021 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test node responses to invalid blocks. - -In this test we connect to one node over p2p, and test block requests: -1) Valid blocks should be requested and become chain tip. -2) Invalid block with duplicated transaction should be re-requested. -3) Invalid block with bad coinbase value should be rejected and not -re-requested. -4) Invalid block due to future timestamp is later accepted when that timestamp -becomes valid. -""" -import copy -import time - -from test_framework.blocktools import ( - MAX_FUTURE_BLOCK_TIME, - create_block, - create_coinbase, - create_tx_with_script, -) -from test_framework.messages import COIN -from test_framework.p2p import P2PDataStore -from test_framework.script import OP_TRUE -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_not_equal, -) - - -class InvalidBlockRequestTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.setup_clean_chain = True - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - - def run_test(self): - # Add p2p connection to node0 - node = self.nodes[0] # convenience reference to the node - peer = node.add_p2p_connection(P2PDataStore()) - - best_block = node.getblock(node.getbestblockhash()) - tip = int(node.getbestblockhash(), 16) - height = best_block["height"] + 1 - block_time = best_block["time"] + 1 - - self.log.info("Create a new block with an anyone-can-spend coinbase") - - block = create_block(tip, create_coinbase(height), block_time) - block.solve() - # Save the coinbase for later - block1 = block - peer.send_blocks_and_test([block1], node, success=True) - - self.log.info("Mature the block.") - self.generatetoaddress(node, 100, node.get_deterministic_priv_key().address) - - best_block = node.getblock(node.getbestblockhash()) - tip = int(node.getbestblockhash(), 16) - height = best_block["height"] + 1 - block_time = best_block["time"] + 1 - - # Use merkle-root malleability to generate an invalid block with - # same blockheader (CVE-2012-2459). - # Manufacture a block with 3 transactions (coinbase, spend of prior - # coinbase, spend of that spend). Duplicate the 3rd transaction to - # leave merkle root and blockheader unchanged but invalidate the block. - # For more information on merkle-root malleability see src/consensus/merkle.cpp. - self.log.info("Test merkle root malleability.") - - tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN) - tx2 = create_tx_with_script(tx1, 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN) - block2 = create_block(tip, create_coinbase(height), block_time, txlist=[tx1, tx2]) - block_time += 1 - block2.solve() - orig_hash = block2.sha256 - block2_orig = copy.deepcopy(block2) - - # Mutate block 2 - block2.vtx.append(tx2) - assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) - assert_equal(orig_hash, block2.rehash()) - assert block2_orig.vtx != block2.vtx - - peer.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate') - - # Check transactions for duplicate inputs (CVE-2018-17144) - self.log.info("Test duplicate input block.") - - block2_dup = copy.deepcopy(block2_orig) - block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0]) - block2_dup.vtx[2].rehash() - block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root() - block2_dup.solve() - peer.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate') - - self.log.info("Test very broken block.") - - block3 = create_block(tip, create_coinbase(height, nValue=100), block_time) - block_time += 1 - block3.solve() - - peer.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount') - - - # Complete testing of CVE-2012-2459 by sending the original block. - # It should be accepted even though it has the same hash as the mutated one. - - self.log.info("Test accepting original block after rejecting its mutated version.") - peer.send_blocks_and_test([block2_orig], node, success=True, timeout=5) - - # Update tip info - height += 1 - block_time += 1 - tip = int(block2_orig.hash, 16) - - # Complete testing of CVE-2018-17144, by checking for the inflation bug. - # Create a block that spends the output of a tx in a previous block. - tx3 = create_tx_with_script(tx2, 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN) - tx3.vin.append(tx3.vin[0]) # Duplicates input - tx3.rehash() - block4 = create_block(tip, create_coinbase(height), block_time, txlist=[tx3]) - block4.solve() - self.log.info("Test inflation by duplicating input") - peer.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate') - - self.log.info("Test accepting identical block after rejecting it due to a future timestamp.") - t = int(time.time()) - node.setmocktime(t) - # Set block time +1 second past max future validity - block = create_block(tip, create_coinbase(height), t + MAX_FUTURE_BLOCK_TIME + 1) - block.solve() - # Need force_send because the block will get rejected without a getdata otherwise - peer.send_blocks_and_test([block], node, force_send=True, success=False, reject_reason='time-too-new') - node.setmocktime(t + 1) - peer.send_blocks_and_test([block], node, success=True) - - -if __name__ == '__main__': - InvalidBlockRequestTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2015-2021 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test node responses to invalid blocks.) +) +In this test we connect to one node over p2p, and test block requests:) +1) Valid blocks should be requested and become chain tip.) +2) Invalid block with duplicated transaction should be re-requested.) +3) Invalid block with bad coinbase value should be rejected and not) +re-requested.) +4) Invalid block due to future timestamp is later accepted when that timestamp) +becomes valid.) +""") +import copy) +import time) +) +from test_framework.blocktools import () + MAX_FUTURE_BLOCK_TIME,) + create_block,) + create_coinbase,) + create_tx_with_script,) +)) +from test_framework.messages import COIN) +from test_framework.p2p import P2PDataStore) +from test_framework.script import OP_TRUE) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_equal,) + assert_not_equal,) +)) +) +) +class InvalidBlockRequestTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + self.setup_clean_chain = True) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) +) + def run_test(self):) + # Add p2p connection to node0) + node = self.nodes[0] # convenience reference to the node) + peer = node.add_p2p_connection(P2PDataStore())) +) + best_block = node.getblock(node.getbestblockhash())) + tip = int(node.getbestblockhash(), 16)) + height = best_block["height"] + 1) + block_time = best_block["time"] + 1) +) + self.log.info("Create a new block with an anyone-can-spend coinbase")) +) + block = create_block(tip, create_coinbase(height), block_time)) + block.solve()) + # Save the coinbase for later) + block1 = block) + peer.send_blocks_and_test([block1], node, success=True)) +) + self.log.info("Mature the block.")) + self.generatetoaddress(node, 100, node.get_deterministic_priv_key().address)) +) + best_block = node.getblock(node.getbestblockhash())) + tip = int(node.getbestblockhash(), 16)) + height = best_block["height"] + 1) + block_time = best_block["time"] + 1) +) + # Use merkle-root malleability to generate an invalid block with) + # same blockheader (CVE-2012-2459).) + # Manufacture a block with 3 transactions (coinbase, spend of prior) + # coinbase, spend of that spend). Duplicate the 3rd transaction to) + # leave merkle root and blockheader unchanged but invalidate the block.) + # For more information on merkle-root malleability see src/consensus/merkle.cpp.) + self.log.info("Test merkle root malleability.")) +) + tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN)) + tx2 = create_tx_with_script(tx1, 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN)) + block2 = create_block(tip, create_coinbase(height), block_time, txlist=[tx1, tx2])) + block_time += 1) + block2.solve()) + orig_hash = block2.sha256) + block2_orig = copy.deepcopy(block2)) +) + # Mutate block 2) + block2.vtx.append(tx2)) + assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())) + assert_equal(orig_hash, block2.rehash())) + assert_not_equal(block2_orig.vtx, block2.vtx)) +) + peer.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')) +) + # Check transactions for duplicate inputs (CVE-2018-17144)) + self.log.info("Test duplicate input block.")) +) + block2_dup = copy.deepcopy(block2_orig)) + block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])) + block2_dup.vtx[2].rehash()) + block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()) + block2_dup.solve()) + peer.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')) +) + self.log.info("Test very broken block.")) +) + block3 = create_block(tip, create_coinbase(height, nValue=100), block_time)) + block_time += 1) + block3.solve()) +) + peer.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')) +) +) + # Complete testing of CVE-2012-2459 by sending the original block.) + # It should be accepted even though it has the same hash as the mutated one.) +) + self.log.info("Test accepting original block after rejecting its mutated version.")) + peer.send_blocks_and_test([block2_orig], node, success=True, timeout=5)) +) + # Update tip info) + height += 1) + block_time += 1) + tip = int(block2_orig.hash, 16)) +) + # Complete testing of CVE-2018-17144, by checking for the inflation bug.) + # Create a block that spends the output of a tx in a previous block.) + tx3 = create_tx_with_script(tx2, 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN)) + tx3.vin.append(tx3.vin[0]) # Duplicates input) + tx3.rehash()) + block4 = create_block(tip, create_coinbase(height), block_time, txlist=[tx3])) + block4.solve()) + self.log.info("Test inflation by duplicating input")) + peer.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')) +) + self.log.info("Test accepting identical block after rejecting it due to a future timestamp.")) + t = int(time.time())) + node.setmocktime(t)) + # Set block time +1 second past max future validity) + block = create_block(tip, create_coinbase(height), t + MAX_FUTURE_BLOCK_TIME + 1)) + block.solve()) + # Need force_send because the block will get rejected without a getdata otherwise) + peer.send_blocks_and_test([block], node, force_send=True, success=False, reject_reason='time-too-new')) + node.setmocktime(t + 1)) + peer.send_blocks_and_test([block], node, success=True)) +) +) +if __name__ == '__main__':) + InvalidBlockRequestTest(__file__).main()) diff --git a/test/functional/p2p_orphan_handling.py b/test/functional/p2p_orphan_handling.py index 4f8502dbba9beb..33998a3668b4ed 100755 --- a/test/functional/p2p_orphan_handling.py +++ b/test/functional/p2p_orphan_handling.py @@ -1,648 +1,648 @@ -#!/usr/bin/env python3 -# Copyright (c) 2023 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -import time - -from test_framework.mempool_util import tx_in_orphanage -from test_framework.messages import ( - CInv, - CTxInWitness, - MSG_TX, - MSG_WITNESS_TX, - MSG_WTX, - msg_getdata, - msg_inv, - msg_notfound, - msg_tx, - tx_from_hex, -) -from test_framework.p2p import ( - GETDATA_TX_INTERVAL, - NONPREF_PEER_TX_DELAY, - OVERLOADED_PEER_TX_DELAY, - p2p_lock, - P2PInterface, - P2PTxInvStore, - TXID_RELAY_DELAY, -) -from test_framework.util import ( - assert_not_equal, - assert_equal, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.wallet import ( - MiniWallet, - MiniWalletMode, -) - -# Time to bump forward (using setmocktime) before waiting for the node to send getdata(tx) in response -# to an inv(tx), in seconds. This delay includes all possible delays + 1, so it should only be used -# when the value of the delay is not interesting. If we want to test that the node waits x seconds -# for one peer and y seconds for another, use specific values instead. -TXREQUEST_TIME_SKIP = NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY + OVERLOADED_PEER_TX_DELAY + 1 - -DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100 - -def cleanup(func): - # Time to fastfoward (using setmocktime) in between subtests to ensure they do not interfere with - # one another, in seconds. Equal to 12 hours, which is enough to expire anything that may exist - # (though nothing should since state should be cleared) in p2p data structures. - LONG_TIME_SKIP = 12 * 60 * 60 - - def wrapper(self): - try: - func(self) - finally: - # Clear mempool - self.generate(self.nodes[0], 1) - self.nodes[0].disconnect_p2ps() - self.nodes[0].bumpmocktime(LONG_TIME_SKIP) - return wrapper - -class PeerTxRelayer(P2PTxInvStore): - """A P2PTxInvStore that also remembers all of the getdata and tx messages it receives.""" - def __init__(self): - super().__init__() - self._tx_received = [] - self._getdata_received = [] - - @property - def tx_received(self): - with p2p_lock: - return self._tx_received - - @property - def getdata_received(self): - with p2p_lock: - return self._getdata_received - - def on_tx(self, message): - self._tx_received.append(message) - - def on_getdata(self, message): - self._getdata_received.append(message) - - def wait_for_parent_requests(self, txids): - """Wait for requests for missing parents by txid with witness data (MSG_WITNESS_TX or - WitnessTx). Requires that the getdata message match these txids exactly; all txids must be - requested and no additional requests are allowed.""" - def test_function(): - last_getdata = self.last_message.get('getdata') - if not last_getdata: - return False - return len(last_getdata.inv) == len(txids) and all([item.type == MSG_WITNESS_TX and item.hash in txids for item in last_getdata.inv]) - self.wait_until(test_function, timeout=10) - - def assert_no_immediate_response(self, message): - """Check that the node does not immediately respond to this message with any of getdata, - inv, tx. The node may respond later. - """ - prev_lastmessage = self.last_message - self.send_and_ping(message) - after_lastmessage = self.last_message - for msgtype in ["getdata", "inv", "tx"]: - if msgtype not in prev_lastmessage: - assert msgtype not in after_lastmessage - else: - assert_equal(prev_lastmessage[msgtype], after_lastmessage[msgtype]) - - def assert_never_requested(self, txhash): - """Check that the node has never sent us a getdata for this hash (int type)""" - for getdata in self.getdata_received: - for request in getdata.inv: - assert request.hash != txhash - -class OrphanHandlingTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [[]] - - def create_parent_and_child(self): - """Create package with 1 parent and 1 child, normal fees (no cpfp).""" - parent = self.wallet.create_self_transfer() - child = self.wallet.create_self_transfer(utxo_to_spend=parent['new_utxo']) - return child["tx"].getwtxid(), child["tx"], parent["tx"] - - def relay_transaction(self, peer, tx): - """Relay transaction using MSG_WTX""" - wtxid = int(tx.getwtxid(), 16) - peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=wtxid)])) - self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP) - peer.wait_for_getdata([wtxid]) - peer.send_and_ping(msg_tx(tx)) - - def create_malleated_version(self, tx): - """ - Create a malleated version of the tx where the witness is replaced with garbage data. - Returns a CTransaction object. - """ - tx_bad_wit = tx_from_hex(tx["hex"]) - tx_bad_wit.wit.vtxinwit = [CTxInWitness()] - # Add garbage data to witness 0. We cannot simply strip the witness, as the node would - # classify it as a transaction in which the witness was missing rather than wrong. - tx_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage'] - - assert_equal(tx["txid"], tx_bad_wit.rehash()) - assert tx["wtxid"] != tx_bad_wit.getwtxid() - - return tx_bad_wit - - @cleanup - def test_arrival_timing_orphan(self): - self.log.info("Test missing parents that arrive during delay are not requested") - node = self.nodes[0] - tx_parent_arrives = self.wallet.create_self_transfer() - tx_parent_doesnt_arrive = self.wallet.create_self_transfer() - # Fake orphan spends nonexistent outputs of the two parents - tx_fake_orphan = self.wallet.create_self_transfer_multi(utxos_to_spend=[ - {"txid": tx_parent_doesnt_arrive["txid"], "vout": 10, "value": tx_parent_doesnt_arrive["new_utxo"]["value"]}, - {"txid": tx_parent_arrives["txid"], "vout": 10, "value": tx_parent_arrives["new_utxo"]["value"]} - ]) - - peer_spy = node.add_p2p_connection(PeerTxRelayer()) - peer_normal = node.add_p2p_connection(PeerTxRelayer()) - # This transaction is an orphan because it is missing inputs. It is a "fake" orphan that the - # spy peer has crafted to learn information about tx_parent_arrives even though it isn't - # able to spend a real output of it, but it could also just be a normal, real child tx. - # The node should not immediately respond with a request for orphan parents. - # Also, no request should be sent later because it will be resolved by - # the time the request is scheduled to be sent. - peer_spy.assert_no_immediate_response(msg_tx(tx_fake_orphan["tx"])) - - # Node receives transaction. It attempts to obfuscate the exact timing at which this - # transaction entered its mempool. Send unsolicited because otherwise we need to wait for - # request delays. - peer_normal.send_and_ping(msg_tx(tx_parent_arrives["tx"])) - assert tx_parent_arrives["txid"] in node.getrawmempool() - - # Spy peer should not be able to query the node for the parent yet, since it hasn't been - # announced / insufficient time has elapsed. - parent_inv = CInv(t=MSG_WTX, h=int(tx_parent_arrives["tx"].getwtxid(), 16)) - assert_equal(len(peer_spy.get_invs()), 0) - peer_spy.assert_no_immediate_response(msg_getdata([parent_inv])) - - # Request would be scheduled with this delay because it is not a preferred relay peer. - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY) - peer_spy.assert_never_requested(int(tx_parent_arrives["txid"], 16)) - peer_spy.assert_never_requested(int(tx_parent_doesnt_arrive["txid"], 16)) - # Request would be scheduled with this delay because it is by txid. - self.nodes[0].bumpmocktime(TXID_RELAY_DELAY) - peer_spy.wait_for_parent_requests([int(tx_parent_doesnt_arrive["txid"], 16)]) - peer_spy.assert_never_requested(int(tx_parent_arrives["txid"], 16)) - - @cleanup - def test_orphan_rejected_parents_exceptions(self): - node = self.nodes[0] - peer1 = node.add_p2p_connection(PeerTxRelayer()) - peer2 = node.add_p2p_connection(PeerTxRelayer()) - - self.log.info("Test orphan handling when a nonsegwit parent is known to be invalid") - parent_low_fee_nonsegwit = self.wallet_nonsegwit.create_self_transfer(fee_rate=0) - assert_equal(parent_low_fee_nonsegwit["txid"], parent_low_fee_nonsegwit["tx"].getwtxid()) - parent_other = self.wallet_nonsegwit.create_self_transfer() - child_nonsegwit = self.wallet_nonsegwit.create_self_transfer_multi( - utxos_to_spend=[parent_other["new_utxo"], parent_low_fee_nonsegwit["new_utxo"]]) - - # Relay the parent. It should be rejected because it pays 0 fees. - self.relay_transaction(peer1, parent_low_fee_nonsegwit["tx"]) - assert parent_low_fee_nonsegwit["txid"] not in node.getrawmempool() - - # Relay the child. It should not be accepted because it has missing inputs. - # Its parent should not be requested because its hash (txid == wtxid) has been added to the rejection filter. - self.relay_transaction(peer2, child_nonsegwit["tx"]) - assert child_nonsegwit["txid"] not in node.getrawmempool() - assert not tx_in_orphanage(node, child_nonsegwit["tx"]) - - # No parents are requested. - self.nodes[0].bumpmocktime(GETDATA_TX_INTERVAL) - peer1.assert_never_requested(int(parent_other["txid"], 16)) - peer2.assert_never_requested(int(parent_other["txid"], 16)) - peer2.assert_never_requested(int(parent_low_fee_nonsegwit["txid"], 16)) - - self.log.info("Test orphan handling when a segwit parent was invalid but may be retried with another witness") - parent_low_fee = self.wallet.create_self_transfer(fee_rate=0) - child_low_fee = self.wallet.create_self_transfer(utxo_to_spend=parent_low_fee["new_utxo"]) - - # Relay the low fee parent. It should not be accepted. - self.relay_transaction(peer1, parent_low_fee["tx"]) - assert parent_low_fee["txid"] not in node.getrawmempool() - - # Relay the child. It should not be accepted because it has missing inputs. - self.relay_transaction(peer2, child_low_fee["tx"]) - assert child_low_fee["txid"] not in node.getrawmempool() - assert tx_in_orphanage(node, child_low_fee["tx"]) - - # The parent should be requested because even though the txid commits to the fee, it doesn't - # commit to the feerate. Delayed because it's by txid and this is not a preferred relay peer. - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - peer2.wait_for_getdata([int(parent_low_fee["tx"].rehash(), 16)]) - - self.log.info("Test orphan handling when a parent was previously downloaded with witness stripped") - parent_normal = self.wallet.create_self_transfer() - parent1_witness_stripped = tx_from_hex(parent_normal["tx"].serialize_without_witness().hex()) - child_invalid_witness = self.wallet.create_self_transfer(utxo_to_spend=parent_normal["new_utxo"]) - - # Relay the parent with witness stripped. It should not be accepted. - self.relay_transaction(peer1, parent1_witness_stripped) - assert_equal(parent_normal["txid"], parent1_witness_stripped.rehash()) - assert parent1_witness_stripped.rehash() not in node.getrawmempool() - - # Relay the child. It should not be accepted because it has missing inputs. - self.relay_transaction(peer2, child_invalid_witness["tx"]) - assert child_invalid_witness["txid"] not in node.getrawmempool() - assert tx_in_orphanage(node, child_invalid_witness["tx"]) - - # The parent should be requested since the unstripped wtxid would differ. Delayed because - # it's by txid and this is not a preferred relay peer. - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - peer2.wait_for_getdata([int(parent_normal["tx"].rehash(), 16)]) - - # parent_normal can be relayed again even though parent1_witness_stripped was rejected - self.relay_transaction(peer1, parent_normal["tx"]) - assert_equal(set(node.getrawmempool()), set([parent_normal["txid"], child_invalid_witness["txid"]])) - - @cleanup - def test_orphan_multiple_parents(self): - node = self.nodes[0] - peer = node.add_p2p_connection(PeerTxRelayer()) - - self.log.info("Test orphan parent requests with a mixture of confirmed, in-mempool and missing parents") - # This UTXO confirmed a long time ago. - utxo_conf_old = self.wallet.send_self_transfer(from_node=node)["new_utxo"] - txid_conf_old = utxo_conf_old["txid"] - self.generate(self.wallet, 10) - - # Create a fake reorg to trigger BlockDisconnected, which resets the rolling bloom filter. - # The alternative is to mine thousands of transactions to push it out of the filter. - last_block = node.getbestblockhash() - node.invalidateblock(last_block) - node.preciousblock(last_block) - node.syncwithvalidationinterfacequeue() - - # This UTXO confirmed recently. - utxo_conf_recent = self.wallet.send_self_transfer(from_node=node)["new_utxo"] - self.generate(node, 1) - - # This UTXO is unconfirmed and in the mempool. - assert_equal(len(node.getrawmempool()), 0) - mempool_tx = self.wallet.send_self_transfer(from_node=node) - utxo_unconf_mempool = mempool_tx["new_utxo"] - - # This UTXO is unconfirmed and missing. - missing_tx = self.wallet.create_self_transfer() - utxo_unconf_missing = missing_tx["new_utxo"] - assert missing_tx["txid"] not in node.getrawmempool() - - orphan = self.wallet.create_self_transfer_multi(utxos_to_spend=[utxo_conf_old, - utxo_conf_recent, utxo_unconf_mempool, utxo_unconf_missing]) - - self.relay_transaction(peer, orphan["tx"]) - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - peer.sync_with_ping() - assert tx_in_orphanage(node, orphan["tx"]) - assert_equal(len(peer.last_message["getdata"].inv), 2) - peer.wait_for_parent_requests([int(txid_conf_old, 16), int(missing_tx["txid"], 16)]) - - # Even though the peer would send a notfound for the "old" confirmed transaction, the node - # doesn't give up on the orphan. Once all of the missing parents are received, it should be - # submitted to mempool. - peer.send_message(msg_notfound(vec=[CInv(MSG_WITNESS_TX, int(txid_conf_old, 16))])) - # Sync with ping to ensure orphans are reconsidered - peer.send_and_ping(msg_tx(missing_tx["tx"])) - assert_equal(node.getmempoolentry(orphan["txid"])["ancestorcount"], 3) - - @cleanup - def test_orphans_overlapping_parents(self): - node = self.nodes[0] - # In the process of relaying inflight_parent_AB - peer_txrequest = node.add_p2p_connection(PeerTxRelayer()) - # Sends the orphans - peer_orphans = node.add_p2p_connection(PeerTxRelayer()) - - confirmed_utxos = [self.wallet_nonsegwit.get_utxo() for _ in range(4)] - assert all([utxo["confirmations"] > 0 for utxo in confirmed_utxos]) - self.log.info("Test handling of multiple orphans with missing parents that are already being requested") - # Parent of child_A only - missing_parent_A = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[0]) - # Parents of child_A and child_B - missing_parent_AB = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[1]) - inflight_parent_AB = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[2]) - # Parent of child_B only - missing_parent_B = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[3]) - child_A = self.wallet_nonsegwit.create_self_transfer_multi( - utxos_to_spend=[missing_parent_A["new_utxo"], missing_parent_AB["new_utxo"], inflight_parent_AB["new_utxo"]] - ) - child_B = self.wallet_nonsegwit.create_self_transfer_multi( - utxos_to_spend=[missing_parent_B["new_utxo"], missing_parent_AB["new_utxo"], inflight_parent_AB["new_utxo"]] - ) - - # The wtxid and txid need to be the same for the node to recognize that the missing input - # and in-flight request for inflight_parent_AB are the same transaction. - assert_equal(inflight_parent_AB["txid"], inflight_parent_AB["tx"].getwtxid()) - - # Announce inflight_parent_AB and wait for getdata - peer_txrequest.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(inflight_parent_AB["tx"].getwtxid(), 16))])) - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY) - peer_txrequest.wait_for_getdata([int(inflight_parent_AB["tx"].getwtxid(), 16)]) - - self.log.info("Test that the node does not request a parent if it has an in-flight txrequest") - # Relay orphan child_A - self.relay_transaction(peer_orphans, child_A["tx"]) - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - assert tx_in_orphanage(node, child_A["tx"]) - # There are 3 missing parents. missing_parent_A and missing_parent_AB should be requested. - # But inflight_parent_AB should not, because there is already an in-flight request for it. - peer_orphans.wait_for_parent_requests([int(missing_parent_A["txid"], 16), int(missing_parent_AB["txid"], 16)]) - - self.log.info("Test that the node does not request a parent if it has an in-flight orphan parent request") - # Relay orphan child_B - self.relay_transaction(peer_orphans, child_B["tx"]) - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - assert tx_in_orphanage(node, child_B["tx"]) - # Only missing_parent_B should be requested. Not inflight_parent_AB or missing_parent_AB - # because they are already being requested from peer_txrequest and peer_orphans respectively. - peer_orphans.wait_for_parent_requests([int(missing_parent_B["txid"], 16)]) - peer_orphans.assert_never_requested(int(inflight_parent_AB["txid"], 16)) - - @cleanup - def test_orphan_of_orphan(self): - node = self.nodes[0] - peer = node.add_p2p_connection(PeerTxRelayer()) - - self.log.info("Test handling of an orphan with a parent who is another orphan") - missing_grandparent = self.wallet_nonsegwit.create_self_transfer() - missing_parent_orphan = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=missing_grandparent["new_utxo"]) - missing_parent = self.wallet_nonsegwit.create_self_transfer() - orphan = self.wallet_nonsegwit.create_self_transfer_multi(utxos_to_spend=[missing_parent["new_utxo"], missing_parent_orphan["new_utxo"]]) - - # The node should put missing_parent_orphan into the orphanage and request missing_grandparent - self.relay_transaction(peer, missing_parent_orphan["tx"]) - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - assert tx_in_orphanage(node, missing_parent_orphan["tx"]) - peer.wait_for_parent_requests([int(missing_grandparent["txid"], 16)]) - - # The node should put the orphan into the orphanage and request missing_parent, skipping - # missing_parent_orphan because it already has it in the orphanage. - self.relay_transaction(peer, orphan["tx"]) - self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - assert tx_in_orphanage(node, orphan["tx"]) - peer.wait_for_parent_requests([int(missing_parent["txid"], 16)]) - - @cleanup - def test_orphan_inherit_rejection(self): - node = self.nodes[0] - peer1 = node.add_p2p_connection(PeerTxRelayer()) - peer2 = node.add_p2p_connection(PeerTxRelayer()) - peer3 = node.add_p2p_connection(PeerTxRelayer()) - - self.log.info("Test that an orphan with rejected parents, along with any descendants, cannot be retried with an alternate witness") - parent_low_fee_nonsegwit = self.wallet_nonsegwit.create_self_transfer(fee_rate=0) - assert_equal(parent_low_fee_nonsegwit["txid"], parent_low_fee_nonsegwit["tx"].getwtxid()) - child = self.wallet.create_self_transfer(utxo_to_spend=parent_low_fee_nonsegwit["new_utxo"]) - grandchild = self.wallet.create_self_transfer(utxo_to_spend=child["new_utxo"]) - assert child["txid"] != child["tx"].getwtxid() - assert grandchild["txid"] != grandchild["tx"].getwtxid() - - # Relay the parent. It should be rejected because it pays 0 fees. - self.relay_transaction(peer1, parent_low_fee_nonsegwit["tx"]) - assert parent_low_fee_nonsegwit["txid"] not in node.getrawmempool() - - # Relay the child. It should be rejected for having missing parents, and this rejection is - # cached by txid and wtxid. - self.relay_transaction(peer1, child["tx"]) - assert_equal(0, len(node.getrawmempool())) - assert not tx_in_orphanage(node, child["tx"]) - peer1.assert_never_requested(parent_low_fee_nonsegwit["txid"]) - - # Grandchild should also not be kept in orphanage because its parent has been rejected. - self.relay_transaction(peer2, grandchild["tx"]) - assert_equal(0, len(node.getrawmempool())) - assert not tx_in_orphanage(node, grandchild["tx"]) - peer2.assert_never_requested(child["txid"]) - peer2.assert_never_requested(child["tx"].getwtxid()) - - # The child should never be requested, even if announced again with potentially different witness. - # Sync with ping to ensure orphans are reconsidered - peer3.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(child["txid"], 16))])) - self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP) - peer3.assert_never_requested(child["txid"]) - - @cleanup - def test_same_txid_orphan(self): - self.log.info("Check what happens when orphan with same txid is already in orphanage") - node = self.nodes[0] - - tx_parent = self.wallet.create_self_transfer() - - # Create the real child - tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) - - # Create a fake version of the child - tx_orphan_bad_wit = self.create_malleated_version(tx_child) - - bad_peer = node.add_p2p_connection(P2PInterface()) - honest_peer = node.add_p2p_connection(P2PInterface()) - - # 1. Fake orphan is received first. It is missing an input. - bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) - assert tx_in_orphanage(node, tx_orphan_bad_wit) - - # 2. Node requests the missing parent by txid. - parent_txid_int = int(tx_parent["txid"], 16) - node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - bad_peer.wait_for_getdata([parent_txid_int]) - - # 3. Honest peer relays the real child, which is also missing parents and should be placed - # in the orphanage. - with node.assert_debug_log(["missingorspent"]): - honest_peer.send_and_ping(msg_tx(tx_child["tx"])) - assert tx_in_orphanage(node, tx_child["tx"]) - - # Time out the previous request for the parent (node will not request the same transaction - # from multiple nodes at the same time) - node.bumpmocktime(GETDATA_TX_INTERVAL) - - # 4. The parent is requested. Honest peer sends it. - honest_peer.wait_for_getdata([parent_txid_int]) - # Sync with ping to ensure orphans are reconsidered - honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) - - # 5. After parent is accepted, orphans should be reconsidered. - # The real child should be accepted and the fake one rejected. - node_mempool = node.getrawmempool() - assert tx_parent["txid"] in node_mempool - assert tx_child["txid"] in node_mempool - assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) - - @cleanup - def test_same_txid_orphan_of_orphan(self): - self.log.info("Check what happens when orphan's parent with same txid is already in orphanage") - node = self.nodes[0] - - tx_grandparent = self.wallet.create_self_transfer() - - # Create middle tx (both parent and child) which will be in orphanage. - tx_middle = self.wallet.create_self_transfer(utxo_to_spend=tx_grandparent["new_utxo"]) - - # Create a fake version of the middle tx - tx_orphan_bad_wit = self.create_malleated_version(tx_middle) - - # Create grandchild spending from tx_middle (and spending from tx_orphan_bad_wit since they - # have the same txid). - tx_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_middle["new_utxo"]) - - bad_peer = node.add_p2p_connection(P2PInterface()) - honest_peer = node.add_p2p_connection(P2PInterface()) - - # 1. Fake orphan is received first. It is missing an input. - bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) - assert tx_in_orphanage(node, tx_orphan_bad_wit) - - # 2. Node requests missing tx_grandparent by txid. - grandparent_txid_int = int(tx_grandparent["txid"], 16) - node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - bad_peer.wait_for_getdata([grandparent_txid_int]) - - # 3. Honest peer relays the grandchild, which is missing a parent. The parent by txid already - # exists in orphanage, but should be re-requested because the node shouldn't assume that the - # witness data is the same. In this case, a same-txid-different-witness transaction exists! - honest_peer.send_and_ping(msg_tx(tx_grandchild["tx"])) - assert tx_in_orphanage(node, tx_grandchild["tx"]) - middle_txid_int = int(tx_middle["txid"], 16) - node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - honest_peer.wait_for_getdata([middle_txid_int]) - - # 4. Honest peer relays the real child, which is also missing parents and should be placed - # in the orphanage. - honest_peer.send_and_ping(msg_tx(tx_middle["tx"])) - assert tx_in_orphanage(node, tx_middle["tx"]) - assert_equal(len(node.getrawmempool()), 0) - - # 5. Honest peer sends tx_grandparent - honest_peer.send_and_ping(msg_tx(tx_grandparent["tx"])) - - # 6. After parent is accepted, orphans should be reconsidered. - # The real child should be accepted and the fake one rejected. - node_mempool = node.getrawmempool() - assert tx_grandparent["txid"] in node_mempool - assert tx_middle["txid"] in node_mempool - assert tx_grandchild["txid"] in node_mempool - assert_equal(node.getmempoolentry(tx_middle["txid"])["wtxid"], tx_middle["wtxid"]) - assert_equal(len(node.getorphantxs()), 0) - - @cleanup - def test_orphan_txid_inv(self): - self.log.info("Check node does not ignore announcement with same txid as tx in orphanage") - node = self.nodes[0] - - tx_parent = self.wallet.create_self_transfer() - - # Create the real child and fake version - tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"]) - tx_orphan_bad_wit = self.create_malleated_version(tx_child) - - bad_peer = node.add_p2p_connection(PeerTxRelayer()) - # Must not send wtxidrelay because otherwise the inv(TX) will be ignored later - honest_peer = node.add_p2p_connection(P2PInterface(wtxidrelay=False)) - - # 1. Fake orphan is received first. It is missing an input. - bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit)) - assert tx_in_orphanage(node, tx_orphan_bad_wit) - - # 2. Node requests the missing parent by txid. - parent_txid_int = int(tx_parent["txid"], 16) - node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY) - bad_peer.wait_for_getdata([parent_txid_int]) - - # 3. Honest peer announces the real child, by txid (this isn't common but the node should - # still keep track of it). - child_txid_int = int(tx_child["txid"], 16) - honest_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=child_txid_int)])) - - # 4. The child is requested. Honest peer sends it. - node.bumpmocktime(TXREQUEST_TIME_SKIP) - honest_peer.wait_for_getdata([child_txid_int]) - honest_peer.send_and_ping(msg_tx(tx_child["tx"])) - assert tx_in_orphanage(node, tx_child["tx"]) - - # 5. After first parent request times out, the node sends another one for the missing parent - # of the real orphan child. - node.bumpmocktime(GETDATA_TX_INTERVAL) - honest_peer.wait_for_getdata([parent_txid_int]) - honest_peer.send_and_ping(msg_tx(tx_parent["tx"])) - - # 6. After parent is accepted, orphans should be reconsidered. - # The real child should be accepted and the fake one rejected. This may happen in either - # order since the message-processing is randomized. If tx_orphan_bad_wit is validated first, - # its consensus error leads to disconnection of bad_peer. If tx_child is validated first, - # tx_orphan_bad_wit is rejected for txn-same-nonwitness-data-in-mempool (no punishment). - node_mempool = node.getrawmempool() - assert tx_parent["txid"] in node_mempool - assert tx_child["txid"] in node_mempool - assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"]) - assert_equal(len(node.getorphantxs()), 0) - - @cleanup - def test_max_orphan_amount(self): - self.log.info("Check that we never exceed our storage limits for orphans") - - node = self.nodes[0] - self.generate(self.wallet, 1) - peer_1 = node.add_p2p_connection(P2PInterface()) - - self.log.info("Check that orphanage is empty on start of test") - assert len(node.getorphantxs()) == 0 - - self.log.info("Filling up orphanage with " + str(DEFAULT_MAX_ORPHAN_TRANSACTIONS) + "(DEFAULT_MAX_ORPHAN_TRANSACTIONS) orphans") - orphans = [] - parent_orphans = [] - for _ in range(DEFAULT_MAX_ORPHAN_TRANSACTIONS): - tx_parent_1 = self.wallet.create_self_transfer() - tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"]) - parent_orphans.append(tx_parent_1["tx"]) - orphans.append(tx_child_1["tx"]) - peer_1.send_message(msg_tx(tx_child_1["tx"])) - - peer_1.sync_with_ping() - orphanage = node.getorphantxs() - assert_equal(len(orphanage), DEFAULT_MAX_ORPHAN_TRANSACTIONS) - - for orphan in orphans: - assert tx_in_orphanage(node, orphan) - - self.log.info("Check that we do not add more than the max orphan amount") - tx_parent_1 = self.wallet.create_self_transfer() - tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"]) - peer_1.send_and_ping(msg_tx(tx_child_1["tx"])) - parent_orphans.append(tx_parent_1["tx"]) - orphanage = node.getorphantxs() - assert_equal(len(orphanage), DEFAULT_MAX_ORPHAN_TRANSACTIONS) - - self.log.info("Clearing the orphanage") - for index, parent_orphan in enumerate(parent_orphans): - peer_1.send_and_ping(msg_tx(parent_orphan)) - assert_equal(len(node.getorphantxs()),0) - - - def run_test(self): - self.nodes[0].setmocktime(int(time.time())) - self.wallet_nonsegwit = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK) - self.generate(self.wallet_nonsegwit, 10) - self.wallet = MiniWallet(self.nodes[0]) - self.generate(self.wallet, 160) - self.test_arrival_timing_orphan() - self.test_orphan_rejected_parents_exceptions() - self.test_orphan_multiple_parents() - self.test_orphans_overlapping_parents() - self.test_orphan_of_orphan() - self.test_orphan_inherit_rejection() - self.test_same_txid_orphan() - self.test_same_txid_orphan_of_orphan() - self.test_orphan_txid_inv() - self.test_max_orphan_amount() - - -if __name__ == '__main__': - OrphanHandlingTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2023 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +) +import time) +) +from test_framework.mempool_util import tx_in_orphanage) +from test_framework.messages import () + CInv,) + CTxInWitness,) + MSG_TX,) + MSG_WITNESS_TX,) + MSG_WTX,) + msg_getdata,) + msg_inv,) + msg_notfound,) + msg_tx,) + tx_from_hex,) +)) +from test_framework.p2p import () + GETDATA_TX_INTERVAL,) + NONPREF_PEER_TX_DELAY,) + OVERLOADED_PEER_TX_DELAY,) + p2p_lock,) + P2PInterface,) + P2PTxInvStore,) + TXID_RELAY_DELAY,) +)) +from test_framework.util import () + assert_not_equal,) + assert_equal,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.wallet import () + MiniWallet,) + MiniWalletMode,) +)) +) +# Time to bump forward (using setmocktime) before waiting for the node to send getdata(tx) in response) +# to an inv(tx), in seconds. This delay includes all possible delays + 1, so it should only be used) +# when the value of the delay is not interesting. If we want to test that the node waits x seconds) +# for one peer and y seconds for another, use specific values instead.) +TXREQUEST_TIME_SKIP = NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY + OVERLOADED_PEER_TX_DELAY + 1) +) +DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100) +) +def cleanup(func):) + # Time to fastfoward (using setmocktime) in between subtests to ensure they do not interfere with) + # one another, in seconds. Equal to 12 hours, which is enough to expire anything that may exist) + # (though nothing should since state should be cleared) in p2p data structures.) + LONG_TIME_SKIP = 12 * 60 * 60) +) + def wrapper(self):) + try:) + func(self)) + finally:) + # Clear mempool) + self.generate(self.nodes[0], 1)) + self.nodes[0].disconnect_p2ps()) + self.nodes[0].bumpmocktime(LONG_TIME_SKIP)) + return wrapper) +) +class PeerTxRelayer(P2PTxInvStore):) + """A P2PTxInvStore that also remembers all of the getdata and tx messages it receives.""") + def __init__(self):) + super().__init__()) + self._tx_received = []) + self._getdata_received = []) +) + @property) + def tx_received(self):) + with p2p_lock:) + return self._tx_received) +) + @property) + def getdata_received(self):) + with p2p_lock:) + return self._getdata_received) +) + def on_tx(self, message):) + self._tx_received.append(message)) +) + def on_getdata(self, message):) + self._getdata_received.append(message)) +) + def wait_for_parent_requests(self, txids):) + """Wait for requests for missing parents by txid with witness data (MSG_WITNESS_TX or) + WitnessTx). Requires that the getdata message match these txids exactly; all txids must be) + requested and no additional requests are allowed.""") + def test_function():) + last_getdata = self.last_message.get('getdata')) + if not last_getdata:) + return False) + return len(last_getdata.inv) == len(txids) and all([item.type == MSG_WITNESS_TX and item.hash in txids for item in last_getdata.inv])) + self.wait_until(test_function, timeout=10)) +) + def assert_no_immediate_response(self, message):) + """Check that the node does not immediately respond to this message with any of getdata,) + inv, tx. The node may respond later.) + """) + prev_lastmessage = self.last_message) + self.send_and_ping(message)) + after_lastmessage = self.last_message) + for msgtype in ["getdata", "inv", "tx"]:) + if msgtype not in prev_lastmessage:) + assert msgtype not in after_lastmessage) + else:) + assert_equal(prev_lastmessage[msgtype], after_lastmessage[msgtype])) +) + def assert_never_requested(self, txhash):) + """Check that the node has never sent us a getdata for this hash (int type)""") + for getdata in self.getdata_received:) + for request in getdata.inv:) + assert_not_equal(request.hash, txhash)) +) +class OrphanHandlingTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + self.extra_args = [[]]) +) + def create_parent_and_child(self):) + """Create package with 1 parent and 1 child, normal fees (no cpfp).""") + parent = self.wallet.create_self_transfer()) + child = self.wallet.create_self_transfer(utxo_to_spend=parent['new_utxo'])) + return child["tx"].getwtxid(), child["tx"], parent["tx"]) +) + def relay_transaction(self, peer, tx):) + """Relay transaction using MSG_WTX""") + wtxid = int(tx.getwtxid(), 16)) + peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=wtxid)]))) + self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP)) + peer.wait_for_getdata([wtxid])) + peer.send_and_ping(msg_tx(tx))) +) + def create_malleated_version(self, tx):) + """) + Create a malleated version of the tx where the witness is replaced with garbage data.) + Returns a CTransaction object.) + """) + tx_bad_wit = tx_from_hex(tx["hex"])) + tx_bad_wit.wit.vtxinwit = [CTxInWitness()]) + # Add garbage data to witness 0. We cannot simply strip the witness, as the node would) + # classify it as a transaction in which the witness was missing rather than wrong.) + tx_bad_wit.wit.vtxinwit[0].scriptWitness.stack = [b'garbage']) +) + assert_equal(tx["txid"], tx_bad_wit.rehash())) + assert_not_equal(tx["wtxid"], tx_bad_wit.getwtxid())) +) + return tx_bad_wit) +) + @cleanup) + def test_arrival_timing_orphan(self):) + self.log.info("Test missing parents that arrive during delay are not requested")) + node = self.nodes[0]) + tx_parent_arrives = self.wallet.create_self_transfer()) + tx_parent_doesnt_arrive = self.wallet.create_self_transfer()) + # Fake orphan spends nonexistent outputs of the two parents) + tx_fake_orphan = self.wallet.create_self_transfer_multi(utxos_to_spend=[) + {"txid": tx_parent_doesnt_arrive["txid"], "vout": 10, "value": tx_parent_doesnt_arrive["new_utxo"]["value"]},) + {"txid": tx_parent_arrives["txid"], "vout": 10, "value": tx_parent_arrives["new_utxo"]["value"]}) + ])) +) + peer_spy = node.add_p2p_connection(PeerTxRelayer())) + peer_normal = node.add_p2p_connection(PeerTxRelayer())) + # This transaction is an orphan because it is missing inputs. It is a "fake" orphan that the) + # spy peer has crafted to learn information about tx_parent_arrives even though it isn't) + # able to spend a real output of it, but it could also just be a normal, real child tx.) + # The node should not immediately respond with a request for orphan parents.) + # Also, no request should be sent later because it will be resolved by) + # the time the request is scheduled to be sent.) + peer_spy.assert_no_immediate_response(msg_tx(tx_fake_orphan["tx"]))) +) + # Node receives transaction. It attempts to obfuscate the exact timing at which this) + # transaction entered its mempool. Send unsolicited because otherwise we need to wait for) + # request delays.) + peer_normal.send_and_ping(msg_tx(tx_parent_arrives["tx"]))) + assert tx_parent_arrives["txid"] in node.getrawmempool()) +) + # Spy peer should not be able to query the node for the parent yet, since it hasn't been) + # announced / insufficient time has elapsed.) + parent_inv = CInv(t=MSG_WTX, h=int(tx_parent_arrives["tx"].getwtxid(), 16))) + assert_equal(len(peer_spy.get_invs()), 0)) + peer_spy.assert_no_immediate_response(msg_getdata([parent_inv]))) +) + # Request would be scheduled with this delay because it is not a preferred relay peer.) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY)) + peer_spy.assert_never_requested(int(tx_parent_arrives["txid"], 16))) + peer_spy.assert_never_requested(int(tx_parent_doesnt_arrive["txid"], 16))) + # Request would be scheduled with this delay because it is by txid.) + self.nodes[0].bumpmocktime(TXID_RELAY_DELAY)) + peer_spy.wait_for_parent_requests([int(tx_parent_doesnt_arrive["txid"], 16)])) + peer_spy.assert_never_requested(int(tx_parent_arrives["txid"], 16))) +) + @cleanup) + def test_orphan_rejected_parents_exceptions(self):) + node = self.nodes[0]) + peer1 = node.add_p2p_connection(PeerTxRelayer())) + peer2 = node.add_p2p_connection(PeerTxRelayer())) +) + self.log.info("Test orphan handling when a nonsegwit parent is known to be invalid")) + parent_low_fee_nonsegwit = self.wallet_nonsegwit.create_self_transfer(fee_rate=0)) + assert_equal(parent_low_fee_nonsegwit["txid"], parent_low_fee_nonsegwit["tx"].getwtxid())) + parent_other = self.wallet_nonsegwit.create_self_transfer()) + child_nonsegwit = self.wallet_nonsegwit.create_self_transfer_multi() + utxos_to_spend=[parent_other["new_utxo"], parent_low_fee_nonsegwit["new_utxo"]])) +) + # Relay the parent. It should be rejected because it pays 0 fees.) + self.relay_transaction(peer1, parent_low_fee_nonsegwit["tx"])) + assert parent_low_fee_nonsegwit["txid"] not in node.getrawmempool()) +) + # Relay the child. It should not be accepted because it has missing inputs.) + # Its parent should not be requested because its hash (txid == wtxid) has been added to the rejection filter.) + self.relay_transaction(peer2, child_nonsegwit["tx"])) + assert child_nonsegwit["txid"] not in node.getrawmempool()) + assert not tx_in_orphanage(node, child_nonsegwit["tx"])) +) + # No parents are requested.) + self.nodes[0].bumpmocktime(GETDATA_TX_INTERVAL)) + peer1.assert_never_requested(int(parent_other["txid"], 16))) + peer2.assert_never_requested(int(parent_other["txid"], 16))) + peer2.assert_never_requested(int(parent_low_fee_nonsegwit["txid"], 16))) +) + self.log.info("Test orphan handling when a segwit parent was invalid but may be retried with another witness")) + parent_low_fee = self.wallet.create_self_transfer(fee_rate=0)) + child_low_fee = self.wallet.create_self_transfer(utxo_to_spend=parent_low_fee["new_utxo"])) +) + # Relay the low fee parent. It should not be accepted.) + self.relay_transaction(peer1, parent_low_fee["tx"])) + assert parent_low_fee["txid"] not in node.getrawmempool()) +) + # Relay the child. It should not be accepted because it has missing inputs.) + self.relay_transaction(peer2, child_low_fee["tx"])) + assert child_low_fee["txid"] not in node.getrawmempool()) + assert tx_in_orphanage(node, child_low_fee["tx"])) +) + # The parent should be requested because even though the txid commits to the fee, it doesn't) + # commit to the feerate. Delayed because it's by txid and this is not a preferred relay peer.) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + peer2.wait_for_getdata([int(parent_low_fee["tx"].rehash(), 16)])) +) + self.log.info("Test orphan handling when a parent was previously downloaded with witness stripped")) + parent_normal = self.wallet.create_self_transfer()) + parent1_witness_stripped = tx_from_hex(parent_normal["tx"].serialize_without_witness().hex())) + child_invalid_witness = self.wallet.create_self_transfer(utxo_to_spend=parent_normal["new_utxo"])) +) + # Relay the parent with witness stripped. It should not be accepted.) + self.relay_transaction(peer1, parent1_witness_stripped)) + assert_equal(parent_normal["txid"], parent1_witness_stripped.rehash())) + assert parent1_witness_stripped.rehash() not in node.getrawmempool()) +) + # Relay the child. It should not be accepted because it has missing inputs.) + self.relay_transaction(peer2, child_invalid_witness["tx"])) + assert child_invalid_witness["txid"] not in node.getrawmempool()) + assert tx_in_orphanage(node, child_invalid_witness["tx"])) +) + # The parent should be requested since the unstripped wtxid would differ. Delayed because) + # it's by txid and this is not a preferred relay peer.) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + peer2.wait_for_getdata([int(parent_normal["tx"].rehash(), 16)])) +) + # parent_normal can be relayed again even though parent1_witness_stripped was rejected) + self.relay_transaction(peer1, parent_normal["tx"])) + assert_equal(set(node.getrawmempool()), set([parent_normal["txid"], child_invalid_witness["txid"]]))) +) + @cleanup) + def test_orphan_multiple_parents(self):) + node = self.nodes[0]) + peer = node.add_p2p_connection(PeerTxRelayer())) +) + self.log.info("Test orphan parent requests with a mixture of confirmed, in-mempool and missing parents")) + # This UTXO confirmed a long time ago.) + utxo_conf_old = self.wallet.send_self_transfer(from_node=node)["new_utxo"]) + txid_conf_old = utxo_conf_old["txid"]) + self.generate(self.wallet, 10)) +) + # Create a fake reorg to trigger BlockDisconnected, which resets the rolling bloom filter.) + # The alternative is to mine thousands of transactions to push it out of the filter.) + last_block = node.getbestblockhash()) + node.invalidateblock(last_block)) + node.preciousblock(last_block)) + node.syncwithvalidationinterfacequeue()) +) + # This UTXO confirmed recently.) + utxo_conf_recent = self.wallet.send_self_transfer(from_node=node)["new_utxo"]) + self.generate(node, 1)) +) + # This UTXO is unconfirmed and in the mempool.) + assert_equal(len(node.getrawmempool()), 0)) + mempool_tx = self.wallet.send_self_transfer(from_node=node)) + utxo_unconf_mempool = mempool_tx["new_utxo"]) +) + # This UTXO is unconfirmed and missing.) + missing_tx = self.wallet.create_self_transfer()) + utxo_unconf_missing = missing_tx["new_utxo"]) + assert missing_tx["txid"] not in node.getrawmempool()) +) + orphan = self.wallet.create_self_transfer_multi(utxos_to_spend=[utxo_conf_old,) + utxo_conf_recent, utxo_unconf_mempool, utxo_unconf_missing])) +) + self.relay_transaction(peer, orphan["tx"])) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + peer.sync_with_ping()) + assert tx_in_orphanage(node, orphan["tx"])) + assert_equal(len(peer.last_message["getdata"].inv), 2)) + peer.wait_for_parent_requests([int(txid_conf_old, 16), int(missing_tx["txid"], 16)])) +) + # Even though the peer would send a notfound for the "old" confirmed transaction, the node) + # doesn't give up on the orphan. Once all of the missing parents are received, it should be) + # submitted to mempool.) + peer.send_message(msg_notfound(vec=[CInv(MSG_WITNESS_TX, int(txid_conf_old, 16))]))) + # Sync with ping to ensure orphans are reconsidered) + peer.send_and_ping(msg_tx(missing_tx["tx"]))) + assert_equal(node.getmempoolentry(orphan["txid"])["ancestorcount"], 3)) +) + @cleanup) + def test_orphans_overlapping_parents(self):) + node = self.nodes[0]) + # In the process of relaying inflight_parent_AB) + peer_txrequest = node.add_p2p_connection(PeerTxRelayer())) + # Sends the orphans) + peer_orphans = node.add_p2p_connection(PeerTxRelayer())) +) + confirmed_utxos = [self.wallet_nonsegwit.get_utxo() for _ in range(4)]) + assert all([utxo["confirmations"] > 0 for utxo in confirmed_utxos])) + self.log.info("Test handling of multiple orphans with missing parents that are already being requested")) + # Parent of child_A only) + missing_parent_A = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[0])) + # Parents of child_A and child_B) + missing_parent_AB = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[1])) + inflight_parent_AB = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[2])) + # Parent of child_B only) + missing_parent_B = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=confirmed_utxos[3])) + child_A = self.wallet_nonsegwit.create_self_transfer_multi() + utxos_to_spend=[missing_parent_A["new_utxo"], missing_parent_AB["new_utxo"], inflight_parent_AB["new_utxo"]]) + )) + child_B = self.wallet_nonsegwit.create_self_transfer_multi() + utxos_to_spend=[missing_parent_B["new_utxo"], missing_parent_AB["new_utxo"], inflight_parent_AB["new_utxo"]]) + )) +) + # The wtxid and txid need to be the same for the node to recognize that the missing input) + # and in-flight request for inflight_parent_AB are the same transaction.) + assert_equal(inflight_parent_AB["txid"], inflight_parent_AB["tx"].getwtxid())) +) + # Announce inflight_parent_AB and wait for getdata) + peer_txrequest.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(inflight_parent_AB["tx"].getwtxid(), 16))]))) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY)) + peer_txrequest.wait_for_getdata([int(inflight_parent_AB["tx"].getwtxid(), 16)])) +) + self.log.info("Test that the node does not request a parent if it has an in-flight txrequest")) + # Relay orphan child_A) + self.relay_transaction(peer_orphans, child_A["tx"])) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + assert tx_in_orphanage(node, child_A["tx"])) + # There are 3 missing parents. missing_parent_A and missing_parent_AB should be requested.) + # But inflight_parent_AB should not, because there is already an in-flight request for it.) + peer_orphans.wait_for_parent_requests([int(missing_parent_A["txid"], 16), int(missing_parent_AB["txid"], 16)])) +) + self.log.info("Test that the node does not request a parent if it has an in-flight orphan parent request")) + # Relay orphan child_B) + self.relay_transaction(peer_orphans, child_B["tx"])) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + assert tx_in_orphanage(node, child_B["tx"])) + # Only missing_parent_B should be requested. Not inflight_parent_AB or missing_parent_AB) + # because they are already being requested from peer_txrequest and peer_orphans respectively.) + peer_orphans.wait_for_parent_requests([int(missing_parent_B["txid"], 16)])) + peer_orphans.assert_never_requested(int(inflight_parent_AB["txid"], 16))) +) + @cleanup) + def test_orphan_of_orphan(self):) + node = self.nodes[0]) + peer = node.add_p2p_connection(PeerTxRelayer())) +) + self.log.info("Test handling of an orphan with a parent who is another orphan")) + missing_grandparent = self.wallet_nonsegwit.create_self_transfer()) + missing_parent_orphan = self.wallet_nonsegwit.create_self_transfer(utxo_to_spend=missing_grandparent["new_utxo"])) + missing_parent = self.wallet_nonsegwit.create_self_transfer()) + orphan = self.wallet_nonsegwit.create_self_transfer_multi(utxos_to_spend=[missing_parent["new_utxo"], missing_parent_orphan["new_utxo"]])) +) + # The node should put missing_parent_orphan into the orphanage and request missing_grandparent) + self.relay_transaction(peer, missing_parent_orphan["tx"])) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + assert tx_in_orphanage(node, missing_parent_orphan["tx"])) + peer.wait_for_parent_requests([int(missing_grandparent["txid"], 16)])) +) + # The node should put the orphan into the orphanage and request missing_parent, skipping) + # missing_parent_orphan because it already has it in the orphanage.) + self.relay_transaction(peer, orphan["tx"])) + self.nodes[0].bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + assert tx_in_orphanage(node, orphan["tx"])) + peer.wait_for_parent_requests([int(missing_parent["txid"], 16)])) +) + @cleanup) + def test_orphan_inherit_rejection(self):) + node = self.nodes[0]) + peer1 = node.add_p2p_connection(PeerTxRelayer())) + peer2 = node.add_p2p_connection(PeerTxRelayer())) + peer3 = node.add_p2p_connection(PeerTxRelayer())) +) + self.log.info("Test that an orphan with rejected parents, along with any descendants, cannot be retried with an alternate witness")) + parent_low_fee_nonsegwit = self.wallet_nonsegwit.create_self_transfer(fee_rate=0)) + assert_equal(parent_low_fee_nonsegwit["txid"], parent_low_fee_nonsegwit["tx"].getwtxid())) + child = self.wallet.create_self_transfer(utxo_to_spend=parent_low_fee_nonsegwit["new_utxo"])) + grandchild = self.wallet.create_self_transfer(utxo_to_spend=child["new_utxo"])) + assert_not_equal(child["txid"], child["tx"].getwtxid())) + assert_not_equal(grandchild["txid"], grandchild["tx"].getwtxid())) +) + # Relay the parent. It should be rejected because it pays 0 fees.) + self.relay_transaction(peer1, parent_low_fee_nonsegwit["tx"])) + assert parent_low_fee_nonsegwit["txid"] not in node.getrawmempool()) +) + # Relay the child. It should be rejected for having missing parents, and this rejection is) + # cached by txid and wtxid.) + self.relay_transaction(peer1, child["tx"])) + assert_equal(0, len(node.getrawmempool()))) + assert not tx_in_orphanage(node, child["tx"])) + peer1.assert_never_requested(parent_low_fee_nonsegwit["txid"])) +) + # Grandchild should also not be kept in orphanage because its parent has been rejected.) + self.relay_transaction(peer2, grandchild["tx"])) + assert_equal(0, len(node.getrawmempool()))) + assert not tx_in_orphanage(node, grandchild["tx"])) + peer2.assert_never_requested(child["txid"])) + peer2.assert_never_requested(child["tx"].getwtxid())) +) + # The child should never be requested, even if announced again with potentially different witness.) + # Sync with ping to ensure orphans are reconsidered) + peer3.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(child["txid"], 16))]))) + self.nodes[0].bumpmocktime(TXREQUEST_TIME_SKIP)) + peer3.assert_never_requested(child["txid"])) +) + @cleanup) + def test_same_txid_orphan(self):) + self.log.info("Check what happens when orphan with same txid is already in orphanage")) + node = self.nodes[0]) +) + tx_parent = self.wallet.create_self_transfer()) +) + # Create the real child) + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"])) +) + # Create a fake version of the child) + tx_orphan_bad_wit = self.create_malleated_version(tx_child)) +) + bad_peer = node.add_p2p_connection(P2PInterface())) + honest_peer = node.add_p2p_connection(P2PInterface())) +) + # 1. Fake orphan is received first. It is missing an input.) + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit))) + assert tx_in_orphanage(node, tx_orphan_bad_wit)) +) + # 2. Node requests the missing parent by txid.) + parent_txid_int = int(tx_parent["txid"], 16)) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + bad_peer.wait_for_getdata([parent_txid_int])) +) + # 3. Honest peer relays the real child, which is also missing parents and should be placed) + # in the orphanage.) + with node.assert_debug_log(["missingorspent"]):) + honest_peer.send_and_ping(msg_tx(tx_child["tx"]))) + assert tx_in_orphanage(node, tx_child["tx"])) +) + # Time out the previous request for the parent (node will not request the same transaction) + # from multiple nodes at the same time)) + node.bumpmocktime(GETDATA_TX_INTERVAL)) +) + # 4. The parent is requested. Honest peer sends it.) + honest_peer.wait_for_getdata([parent_txid_int])) + # Sync with ping to ensure orphans are reconsidered) + honest_peer.send_and_ping(msg_tx(tx_parent["tx"]))) +) + # 5. After parent is accepted, orphans should be reconsidered.) + # The real child should be accepted and the fake one rejected.) + node_mempool = node.getrawmempool()) + assert tx_parent["txid"] in node_mempool) + assert tx_child["txid"] in node_mempool) + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"])) +) + @cleanup) + def test_same_txid_orphan_of_orphan(self):) + self.log.info("Check what happens when orphan's parent with same txid is already in orphanage")) + node = self.nodes[0]) +) + tx_grandparent = self.wallet.create_self_transfer()) +) + # Create middle tx (both parent and child) which will be in orphanage.) + tx_middle = self.wallet.create_self_transfer(utxo_to_spend=tx_grandparent["new_utxo"])) +) + # Create a fake version of the middle tx) + tx_orphan_bad_wit = self.create_malleated_version(tx_middle)) +) + # Create grandchild spending from tx_middle (and spending from tx_orphan_bad_wit since they) + # have the same txid).) + tx_grandchild = self.wallet.create_self_transfer(utxo_to_spend=tx_middle["new_utxo"])) +) + bad_peer = node.add_p2p_connection(P2PInterface())) + honest_peer = node.add_p2p_connection(P2PInterface())) +) + # 1. Fake orphan is received first. It is missing an input.) + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit))) + assert tx_in_orphanage(node, tx_orphan_bad_wit)) +) + # 2. Node requests missing tx_grandparent by txid.) + grandparent_txid_int = int(tx_grandparent["txid"], 16)) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + bad_peer.wait_for_getdata([grandparent_txid_int])) +) + # 3. Honest peer relays the grandchild, which is missing a parent. The parent by txid already) + # exists in orphanage, but should be re-requested because the node shouldn't assume that the) + # witness data is the same. In this case, a same-txid-different-witness transaction exists!) + honest_peer.send_and_ping(msg_tx(tx_grandchild["tx"]))) + assert tx_in_orphanage(node, tx_grandchild["tx"])) + middle_txid_int = int(tx_middle["txid"], 16)) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + honest_peer.wait_for_getdata([middle_txid_int])) +) + # 4. Honest peer relays the real child, which is also missing parents and should be placed) + # in the orphanage.) + honest_peer.send_and_ping(msg_tx(tx_middle["tx"]))) + assert tx_in_orphanage(node, tx_middle["tx"])) + assert_equal(len(node.getrawmempool()), 0)) +) + # 5. Honest peer sends tx_grandparent) + honest_peer.send_and_ping(msg_tx(tx_grandparent["tx"]))) +) + # 6. After parent is accepted, orphans should be reconsidered.) + # The real child should be accepted and the fake one rejected.) + node_mempool = node.getrawmempool()) + assert tx_grandparent["txid"] in node_mempool) + assert tx_middle["txid"] in node_mempool) + assert tx_grandchild["txid"] in node_mempool) + assert_equal(node.getmempoolentry(tx_middle["txid"])["wtxid"], tx_middle["wtxid"])) + assert_equal(len(node.getorphantxs()), 0)) +) + @cleanup) + def test_orphan_txid_inv(self):) + self.log.info("Check node does not ignore announcement with same txid as tx in orphanage")) + node = self.nodes[0]) +) + tx_parent = self.wallet.create_self_transfer()) +) + # Create the real child and fake version) + tx_child = self.wallet.create_self_transfer(utxo_to_spend=tx_parent["new_utxo"])) + tx_orphan_bad_wit = self.create_malleated_version(tx_child)) +) + bad_peer = node.add_p2p_connection(PeerTxRelayer())) + # Must not send wtxidrelay because otherwise the inv(TX) will be ignored later) + honest_peer = node.add_p2p_connection(P2PInterface(wtxidrelay=False))) +) + # 1. Fake orphan is received first. It is missing an input.) + bad_peer.send_and_ping(msg_tx(tx_orphan_bad_wit))) + assert tx_in_orphanage(node, tx_orphan_bad_wit)) +) + # 2. Node requests the missing parent by txid.) + parent_txid_int = int(tx_parent["txid"], 16)) + node.bumpmocktime(NONPREF_PEER_TX_DELAY + TXID_RELAY_DELAY)) + bad_peer.wait_for_getdata([parent_txid_int])) +) + # 3. Honest peer announces the real child, by txid (this isn't common but the node should) + # still keep track of it).) + child_txid_int = int(tx_child["txid"], 16)) + honest_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=child_txid_int)]))) +) + # 4. The child is requested. Honest peer sends it.) + node.bumpmocktime(TXREQUEST_TIME_SKIP)) + honest_peer.wait_for_getdata([child_txid_int])) + honest_peer.send_and_ping(msg_tx(tx_child["tx"]))) + assert tx_in_orphanage(node, tx_child["tx"])) +) + # 5. After first parent request times out, the node sends another one for the missing parent) + # of the real orphan child.) + node.bumpmocktime(GETDATA_TX_INTERVAL)) + honest_peer.wait_for_getdata([parent_txid_int])) + honest_peer.send_and_ping(msg_tx(tx_parent["tx"]))) +) + # 6. After parent is accepted, orphans should be reconsidered.) + # The real child should be accepted and the fake one rejected. This may happen in either) + # order since the message-processing is randomized. If tx_orphan_bad_wit is validated first,) + # its consensus error leads to disconnection of bad_peer. If tx_child is validated first,) + # tx_orphan_bad_wit is rejected for txn-same-nonwitness-data-in-mempool (no punishment).) + node_mempool = node.getrawmempool()) + assert tx_parent["txid"] in node_mempool) + assert tx_child["txid"] in node_mempool) + assert_equal(node.getmempoolentry(tx_child["txid"])["wtxid"], tx_child["wtxid"])) + assert_equal(len(node.getorphantxs()), 0)) +) + @cleanup) + def test_max_orphan_amount(self):) + self.log.info("Check that we never exceed our storage limits for orphans")) +) + node = self.nodes[0]) + self.generate(self.wallet, 1)) + peer_1 = node.add_p2p_connection(P2PInterface())) +) + self.log.info("Check that orphanage is empty on start of test")) + assert len(node.getorphantxs()) == 0) +) + self.log.info("Filling up orphanage with " + str(DEFAULT_MAX_ORPHAN_TRANSACTIONS) + "(DEFAULT_MAX_ORPHAN_TRANSACTIONS) orphans")) + orphans = []) + parent_orphans = []) + for _ in range(DEFAULT_MAX_ORPHAN_TRANSACTIONS):) + tx_parent_1 = self.wallet.create_self_transfer()) + tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"])) + parent_orphans.append(tx_parent_1["tx"])) + orphans.append(tx_child_1["tx"])) + peer_1.send_message(msg_tx(tx_child_1["tx"]))) +) + peer_1.sync_with_ping()) + orphanage = node.getorphantxs()) + assert_equal(len(orphanage), DEFAULT_MAX_ORPHAN_TRANSACTIONS)) +) + for orphan in orphans:) + assert tx_in_orphanage(node, orphan)) +) + self.log.info("Check that we do not add more than the max orphan amount")) + tx_parent_1 = self.wallet.create_self_transfer()) + tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"])) + peer_1.send_and_ping(msg_tx(tx_child_1["tx"]))) + parent_orphans.append(tx_parent_1["tx"])) + orphanage = node.getorphantxs()) + assert_equal(len(orphanage), DEFAULT_MAX_ORPHAN_TRANSACTIONS)) +) + self.log.info("Clearing the orphanage")) + for index, parent_orphan in enumerate(parent_orphans):) + peer_1.send_and_ping(msg_tx(parent_orphan))) + assert_equal(len(node.getorphantxs()),0)) +) +) + def run_test(self):) + self.nodes[0].setmocktime(int(time.time()))) + self.wallet_nonsegwit = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK)) + self.generate(self.wallet_nonsegwit, 10)) + self.wallet = MiniWallet(self.nodes[0])) + self.generate(self.wallet, 160)) + self.test_arrival_timing_orphan()) + self.test_orphan_rejected_parents_exceptions()) + self.test_orphan_multiple_parents()) + self.test_orphans_overlapping_parents()) + self.test_orphan_of_orphan()) + self.test_orphan_inherit_rejection()) + self.test_same_txid_orphan()) + self.test_same_txid_orphan_of_orphan()) + self.test_orphan_txid_inv()) + self.test_max_orphan_amount()) +) +) +if __name__ == '__main__':) + OrphanHandlingTest(__file__).main()) diff --git a/test/functional/p2p_ping.py b/test/functional/p2p_ping.py index 992ee8bfa76a1e..e8fb764814d21c 100755 --- a/test/functional/p2p_ping.py +++ b/test/functional/p2p_ping.py @@ -1,123 +1,123 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test ping message -""" - -import time - -from test_framework.messages import msg_pong -from test_framework.p2p import P2PInterface -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_not_equal, -) - - -PING_INTERVAL = 2 * 60 -TIMEOUT_INTERVAL = 20 * 60 - - -class msg_pong_corrupt(msg_pong): - def serialize(self): - return b"" - - -class NodeNoPong(P2PInterface): - def on_ping(self, message): - pass - - -class PingPongTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - # Set the peer connection timeout low. It does not matter for this - # test, as long as it is less than TIMEOUT_INTERVAL. - self.extra_args = [['-peertimeout=1']] - - def check_peer_info(self, *, pingtime, minping, pingwait): - stats = self.nodes[0].getpeerinfo()[0] - assert_equal(stats.pop('pingtime', None), pingtime) - assert_equal(stats.pop('minping', None), minping) - assert_equal(stats.pop('pingwait', None), pingwait) - - def mock_forward(self, delta): - self.mock_time += delta - self.nodes[0].setmocktime(self.mock_time) - - def run_test(self): - self.mock_time = int(time.time()) - self.mock_forward(0) - - self.log.info('Check that ping is sent after connection is established') - no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong()) - self.mock_forward(3) - assert no_pong_node.last_message.pop('ping').nonce != 0 - self.check_peer_info(pingtime=None, minping=None, pingwait=3) - - self.log.info('Reply without nonce cancels ping') - with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']): - no_pong_node.send_and_ping(msg_pong_corrupt()) - self.check_peer_info(pingtime=None, minping=None, pingwait=None) - - self.log.info('Reply without ping') - with self.nodes[0].assert_debug_log([ - 'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes', - ]): - no_pong_node.send_and_ping(msg_pong()) - self.check_peer_info(pingtime=None, minping=None, pingwait=None) - - self.log.info('Reply with wrong nonce does not cancel ping') - assert 'ping' not in no_pong_node.last_message - with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']): - # mock time PING_INTERVAL ahead to trigger node into sending a ping - self.mock_forward(PING_INTERVAL + 1) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - self.mock_forward(9) - # Send the wrong pong - no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1)) - self.check_peer_info(pingtime=None, minping=None, pingwait=9) - - self.log.info('Reply with zero nonce does cancel ping') - with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']): - no_pong_node.send_and_ping(msg_pong(0)) - self.check_peer_info(pingtime=None, minping=None, pingwait=None) - - self.log.info('Check that ping is properly reported on RPC') - assert 'ping' not in no_pong_node.last_message - # mock time PING_INTERVAL ahead to trigger node into sending a ping - self.mock_forward(PING_INTERVAL + 1) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - ping_delay = 29 - self.mock_forward(ping_delay) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce)) - self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) - - self.log.info('Check that minping is decreased after a fast roundtrip') - # mock time PING_INTERVAL ahead to trigger node into sending a ping - self.mock_forward(PING_INTERVAL + 1) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - ping_delay = 9 - self.mock_forward(ping_delay) - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce)) - self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) - - self.log.info('Check that peer is disconnected after ping timeout') - assert 'ping' not in no_pong_node.last_message - self.nodes[0].ping() - no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) - with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']): - self.mock_forward(TIMEOUT_INTERVAL // 2) - # Check that sending a ping does not prevent the disconnect - no_pong_node.sync_with_ping() - self.mock_forward(TIMEOUT_INTERVAL // 2 + 1) - no_pong_node.wait_for_disconnect() - - -if __name__ == '__main__': - PingPongTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2020-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test ping message) +""") +) +import time) +) +from test_framework.messages import msg_pong) +from test_framework.p2p import P2PInterface) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_equal,) + assert_not_equal,) +)) +) +) +PING_INTERVAL = 2 * 60) +TIMEOUT_INTERVAL = 20 * 60) +) +) +class msg_pong_corrupt(msg_pong):) + def serialize(self):) + return b"") +) +) +class NodeNoPong(P2PInterface):) + def on_ping(self, message):) + pass) +) +) +class PingPongTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 1) + # Set the peer connection timeout low. It does not matter for this) + # test, as long as it is less than TIMEOUT_INTERVAL.) + self.extra_args = [['-peertimeout=1']]) +) + def check_peer_info(self, *, pingtime, minping, pingwait):) + stats = self.nodes[0].getpeerinfo()[0]) + assert_equal(stats.pop('pingtime', None), pingtime)) + assert_equal(stats.pop('minping', None), minping)) + assert_equal(stats.pop('pingwait', None), pingwait)) +) + def mock_forward(self, delta):) + self.mock_time += delta) + self.nodes[0].setmocktime(self.mock_time)) +) + def run_test(self):) + self.mock_time = int(time.time())) + self.mock_forward(0)) +) + self.log.info('Check that ping is sent after connection is established')) + no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong())) + self.mock_forward(3)) + assert_not_equal(no_pong_node.last_message.pop('ping').nonce, 0)) + self.check_peer_info(pingtime=None, minping=None, pingwait=3)) +) + self.log.info('Reply without nonce cancels ping')) + with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']):) + no_pong_node.send_and_ping(msg_pong_corrupt())) + self.check_peer_info(pingtime=None, minping=None, pingwait=None)) +) + self.log.info('Reply without ping')) + with self.nodes[0].assert_debug_log([) + 'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes',) + ]):) + no_pong_node.send_and_ping(msg_pong())) + self.check_peer_info(pingtime=None, minping=None, pingwait=None)) +) + self.log.info('Reply with wrong nonce does not cancel ping')) + assert 'ping' not in no_pong_node.last_message) + with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']):) + # mock time PING_INTERVAL ahead to trigger node into sending a ping) + self.mock_forward(PING_INTERVAL + 1)) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)) + self.mock_forward(9)) + # Send the wrong pong) + no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1))) + self.check_peer_info(pingtime=None, minping=None, pingwait=9)) +) + self.log.info('Reply with zero nonce does cancel ping')) + with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']):) + no_pong_node.send_and_ping(msg_pong(0))) + self.check_peer_info(pingtime=None, minping=None, pingwait=None)) +) + self.log.info('Check that ping is properly reported on RPC')) + assert 'ping' not in no_pong_node.last_message) + # mock time PING_INTERVAL ahead to trigger node into sending a ping) + self.mock_forward(PING_INTERVAL + 1)) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)) + ping_delay = 29) + self.mock_forward(ping_delay)) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)) + no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))) + self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)) +) + self.log.info('Check that minping is decreased after a fast roundtrip')) + # mock time PING_INTERVAL ahead to trigger node into sending a ping) + self.mock_forward(PING_INTERVAL + 1)) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)) + ping_delay = 9) + self.mock_forward(ping_delay)) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)) + no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))) + self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)) +) + self.log.info('Check that peer is disconnected after ping timeout')) + assert 'ping' not in no_pong_node.last_message) + self.nodes[0].ping()) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)) + with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']):) + self.mock_forward(TIMEOUT_INTERVAL // 2)) + # Check that sending a ping does not prevent the disconnect) + no_pong_node.sync_with_ping()) + self.mock_forward(TIMEOUT_INTERVAL // 2 + 1)) + no_pong_node.wait_for_disconnect()) +) +) +if __name__ == '__main__':) + PingPongTest(__file__).main()) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 723d0fd797f7de..cfdf24aadbf10e 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -1,2071 +1,2071 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test segwit transactions and blocks on P2P network.""" -from decimal import Decimal -import random -import time - -from test_framework.blocktools import ( - WITNESS_COMMITMENT_HEADER, - add_witness_commitment, - create_block, - create_coinbase, -) -from test_framework.messages import ( - MAX_BIP125_RBF_SEQUENCE, - CBlockHeader, - CInv, - COutPoint, - CTransaction, - CTxIn, - CTxInWitness, - CTxOut, - CTxWitness, - MAX_BLOCK_WEIGHT, - MSG_BLOCK, - MSG_TX, - MSG_WITNESS_FLAG, - MSG_WITNESS_TX, - MSG_WTX, - NODE_NETWORK, - NODE_WITNESS, - msg_no_witness_block, - msg_getdata, - msg_headers, - msg_inv, - msg_tx, - msg_block, - msg_no_witness_tx, - ser_uint256, - ser_vector, - sha256, -) -from test_framework.p2p import ( - P2PInterface, - p2p_lock, - P2P_SERVICES, -) -from test_framework.script import ( - CScript, - CScriptNum, - CScriptOp, - MAX_SCRIPT_ELEMENT_SIZE, - OP_0, - OP_1, - OP_2, - OP_16, - OP_2DROP, - OP_CHECKMULTISIG, - OP_CHECKSIG, - OP_DROP, - OP_ELSE, - OP_ENDIF, - OP_IF, - OP_RETURN, - OP_TRUE, - SIGHASH_ALL, - SIGHASH_ANYONECANPAY, - SIGHASH_NONE, - SIGHASH_SINGLE, - hash160, - sign_input_legacy, - sign_input_segwitv0, -) -from test_framework.script_util import ( - key_to_p2pk_script, - key_to_p2wpkh_script, - keyhash_to_p2pkh_script, - script_to_p2sh_script, - script_to_p2wsh_script, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - softfork_active, - assert_raises_rpc_error, -) -from test_framework.wallet import MiniWallet -from test_framework.wallet_util import generate_keypair - - -MAX_SIGOP_COST = 80000 - -SEGWIT_HEIGHT = 120 - -class UTXO(): - """Used to keep track of anyone-can-spend outputs that we can use in the tests.""" - def __init__(self, sha256, n, value): - self.sha256 = sha256 - self.n = n - self.nValue = value - - -def subtest(func): - """Wraps the subtests for logging and state assertions.""" - def func_wrapper(self, *args, **kwargs): - self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active)) - # Assert segwit status is as expected - assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active) - func(self, *args, **kwargs) - # Each subtest should leave some utxos for the next subtest - assert self.utxo - self.sync_blocks() - # Assert segwit status is as expected at end of subtest - assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active) - - return func_wrapper - - -def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key): - """Add signature for a P2PK witness script.""" - tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [script] - sign_input_segwitv0(tx_to, in_idx, script, value, key, hashtype) - -def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None): - """Send a transaction to the node and check that it's accepted to the mempool - - - Submit the transaction over the p2p interface - - use the getrawmempool rpc to check for acceptance.""" - reason = [reason] if reason else [] - with node.assert_debug_log(expected_msgs=reason): - p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx)) - assert_equal(tx.hash in node.getrawmempool(), accepted) - - -def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None): - """Send a block to the node and check that it's accepted - - - Submit the block over the p2p interface - - use the getbestblockhash rpc to check for acceptance.""" - reason = [reason] if reason else [] - with node.assert_debug_log(expected_msgs=reason): - p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block)) - assert_equal(node.getbestblockhash() == block.hash, accepted) - - -class TestP2PConn(P2PInterface): - def __init__(self, wtxidrelay=False): - super().__init__(wtxidrelay=wtxidrelay) - self.getdataset = set() - self.last_wtxidrelay = [] - self.lastgetdata = [] - self.wtxidrelay = wtxidrelay - - # Don't send getdata message replies to invs automatically. - # We'll send the getdata messages explicitly in the test logic. - def on_inv(self, message): - pass - - def on_getdata(self, message): - self.lastgetdata = message.inv - for inv in message.inv: - self.getdataset.add(inv.hash) - - def on_wtxidrelay(self, message): - self.last_wtxidrelay.append(message) - - def announce_tx_and_wait_for_getdata(self, tx, success=True, use_wtxid=False): - if success: - # sanity check - assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid) - with p2p_lock: - self.last_message.pop("getdata", None) - if use_wtxid: - wtxid = tx.calc_sha256(True) - self.send_message(msg_inv(inv=[CInv(MSG_WTX, wtxid)])) - else: - self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)])) - - if success: - if use_wtxid: - self.wait_for_getdata([wtxid]) - else: - self.wait_for_getdata([tx.sha256]) - else: - time.sleep(5) - assert not self.last_message.get("getdata") - - def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): - with p2p_lock: - self.last_message.pop("getdata", None) - msg = msg_headers() - msg.headers = [CBlockHeader(block)] - if use_header: - self.send_message(msg) - else: - self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)])) - self.wait_for_getheaders(block_hash=block.hashPrevBlock, timeout=timeout) - self.send_message(msg) - self.wait_for_getdata([block.sha256], timeout=timeout) - - def request_block(self, blockhash, inv_type, timeout=60): - with p2p_lock: - self.last_message.pop("block", None) - self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) - self.wait_for_block(blockhash, timeout=timeout) - return self.last_message["block"].block - -class SegWitTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation. - self.extra_args = [ - ["-acceptnonstdtxn=1", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}", "-par=1"], - ["-acceptnonstdtxn=0", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}"], - ] - self.supports_cli = False - - # Helper functions - - def build_next_block(self): - """Build a block on top of node0's tip.""" - tip = self.nodes[0].getbestblockhash() - height = self.nodes[0].getblockcount() + 1 - block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 - block = create_block(int(tip, 16), create_coinbase(height), block_time) - block.rehash() - return block - - def update_witness_block_with_transactions(self, block, tx_list, nonce=0): - """Add list of transactions to block, adds witness commitment, then solves.""" - block.vtx.extend(tx_list) - add_witness_commitment(block, nonce) - block.solve() - - def run_test(self): - # Setup the p2p connections - # self.test_node sets P2P_SERVICES, i.e. NODE_WITNESS | NODE_NETWORK - self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=P2P_SERVICES) - # self.old_node sets only NODE_NETWORK - self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK) - # self.std_node is for testing node1 (requires standard txs) - self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=P2P_SERVICES) - # self.std_wtx_node is for testing node1 with wtxid relay - self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=P2P_SERVICES) - - assert self.test_node.nServices & NODE_WITNESS != 0 - - # Keep a place to store utxo's that can be used in later tests - self.utxo = [] - - self.log.info("Starting tests before segwit activation") - self.segwit_active = False - self.wallet = MiniWallet(self.nodes[0]) - - self.test_non_witness_transaction() - self.test_v0_outputs_arent_spendable() - self.test_block_relay() - self.test_unnecessary_witness_before_segwit_activation() - self.test_witness_tx_relay_before_segwit_activation() - self.test_standardness_v0() - - self.log.info("Advancing to segwit activation") - self.advance_to_segwit_active() - - # Segwit status 'active' - - self.test_p2sh_witness() - self.test_witness_commitments() - self.test_block_malleability() - self.test_witness_block_size() - self.test_submit_block() - self.test_extra_witness_data() - self.test_max_witness_push_length() - self.test_max_witness_script_length() - self.test_witness_input_length() - self.test_block_relay() - self.test_tx_relay_after_segwit_activation() - self.test_standardness_v0() - self.test_segwit_versions() - self.test_premature_coinbase_witness_spend() - self.test_uncompressed_pubkey() - self.test_signature_version_1() - self.test_non_standard_witness_blinding() - self.test_non_standard_witness() - self.test_witness_sigops() - self.test_superfluous_witness() - self.test_wtxid_relay() - - # Individual tests - - @subtest - def test_non_witness_transaction(self): - """See if sending a regular transaction works, and create a utxo to use in later tests.""" - # Mine a block with an anyone-can-spend coinbase, - # let it mature, then try to spend it. - - block = self.build_next_block() - block.solve() - self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed - txid = block.vtx[0].sha256 - - self.generate(self.wallet, 99) # let the block mature - - # Create a transaction that spends the coinbase - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) - tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) - tx.calc_sha256() - - # Check that serializing it with or without witness is the same - # This is a sanity check of our testing framework. - assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize()) - - self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed - assert tx.hash in self.nodes[0].getrawmempool() - # Save this transaction for later - self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000)) - self.generate(self.nodes[0], 1) - - @subtest - def test_unnecessary_witness_before_segwit_activation(self): - """Verify that blocks with witnesses are rejected before activation.""" - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE]))) - tx.wit.vtxinwit.append(CTxInWitness()) - tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] - - # Verify the hash with witness differs from the txid - # (otherwise our testing framework must be broken!) - tx.rehash() - assert tx.sha256 != tx.calc_sha256(with_witness=True) - - # Construct a block that includes the transaction. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - # Sending witness data before activation is not allowed (anti-spam - # rule). - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness') - - # But it should not be permanently marked bad... - # Resend without witness information. - self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - # Update our utxo list; we spent the first entry. - self.utxo.pop(0) - self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) - - @subtest - def test_block_relay(self): - """Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG. - - This is true regardless of segwit activation. - Also test that we don't ask for blocks from unupgraded peers.""" - - blocktype = 2 | MSG_WITNESS_FLAG - - # test_node has set NODE_WITNESS, so all getdata requests should be for - # witness blocks. - # Test announcing a block via inv results in a getdata, and that - # announcing a block with a header results in a getdata - block1 = self.build_next_block() - block1.solve() - - # Send an empty headers message, to clear out any prior getheaders - # messages that our peer may be waiting for us on. - self.test_node.send_message(msg_headers()) - - self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False) - assert self.test_node.last_message["getdata"].inv[0].type == blocktype - test_witness_block(self.nodes[0], self.test_node, block1, True) - - block2 = self.build_next_block() - block2.solve() - - self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True) - assert self.test_node.last_message["getdata"].inv[0].type == blocktype - test_witness_block(self.nodes[0], self.test_node, block2, True) - - # Check that we can getdata for witness blocks or regular blocks, - # and the right thing happens. - if not self.segwit_active: - # Before activation, we should be able to request old blocks with - # or without witness, and they should be the same. - chain_height = self.nodes[0].getblockcount() - # Pick 10 random blocks on main chain, and verify that getdata's - # for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal. - all_heights = list(range(chain_height + 1)) - random.shuffle(all_heights) - all_heights = all_heights[0:10] - for height in all_heights: - block_hash = self.nodes[0].getblockhash(height) - rpc_block = self.nodes[0].getblock(block_hash, False) - block_hash = int(block_hash, 16) - block = self.test_node.request_block(block_hash, 2) - wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG) - assert_equal(block.serialize(), wit_block.serialize()) - assert_equal(block.serialize(), bytes.fromhex(rpc_block)) - else: - # After activation, witness blocks and non-witness blocks should - # be different. Verify rpc getblock() returns witness blocks, while - # getdata respects the requested type. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, []) - # This gives us a witness commitment. - assert len(block.vtx[0].wit.vtxinwit) == 1 - assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1 - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - # Now try to retrieve it... - rpc_block = self.nodes[0].getblock(block.hash, False) - non_wit_block = self.test_node.request_block(block.sha256, 2) - wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG) - assert_equal(wit_block.serialize(), bytes.fromhex(rpc_block)) - assert_equal(wit_block.serialize(False), non_wit_block.serialize()) - assert_equal(wit_block.serialize(), block.serialize()) - - # Test size, vsize, weight - rpc_details = self.nodes[0].getblock(block.hash, True) - assert_equal(rpc_details["size"], len(block.serialize())) - assert_equal(rpc_details["strippedsize"], len(block.serialize(False))) - assert_equal(rpc_details["weight"], block.get_weight()) - - # Upgraded node should not ask for blocks from unupgraded - block4 = self.build_next_block() - block4.solve() - self.old_node.getdataset = set() - - # Blocks can be requested via direct-fetch (immediately upon processing the announcement) - # or via parallel download (with an indeterminate delay from processing the announcement) - # so to test that a block is NOT requested, we could guess a time period to sleep for, - # and then check. We can avoid the sleep() by taking advantage of transaction getdata's - # being processed after block getdata's, and announce a transaction as well, - # and then check to see if that particular getdata has been received. - # Since 0.14, inv's will only be responded to with a getheaders, so send a header - # to announce this block. - msg = msg_headers() - msg.headers = [CBlockHeader(block4)] - self.old_node.send_message(msg) - self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0]) - assert block4.sha256 not in self.old_node.getdataset - - @subtest - def test_v0_outputs_arent_spendable(self): - """Test that v0 outputs aren't spendable before segwit activation. - - ~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was - backdated so that it applies to all blocks, going back to the genesis - block. - - Consequently, version 0 witness outputs are never spendable without - witness, and so can't be spent before segwit activation (the point at which - blocks are permitted to contain witnesses).""" - - # Create two outputs, a p2wsh and p2sh-p2wsh - witness_script = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - p2sh_script_pubkey = script_to_p2sh_script(script_pubkey) - - value = self.utxo[0].nValue // 3 - - tx = CTransaction() - tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')] - tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)] - tx.vout.append(CTxOut(value, CScript([OP_TRUE]))) - tx.rehash() - txid = tx.sha256 - - # Add it to a block - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - # Verify that segwit isn't activated. A block serialized with witness - # should be rejected prior to activation. - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness') - # Now send the block without witness. It should be accepted - test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False) - - # Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled. - p2wsh_tx = CTransaction() - p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')] - p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))] - p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) - p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] - p2wsh_tx.rehash() - - p2sh_p2wsh_tx = CTransaction() - p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))] - p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))] - p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) - p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] - p2sh_p2wsh_tx.rehash() - - for tx in [p2wsh_tx, p2sh_p2wsh_tx]: - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - - # When the block is serialized with a witness, the block will be rejected because witness - # data isn't allowed in blocks that don't commit to witness data. - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness') - - # When the block is serialized without witness, validation fails because the transaction is - # invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction - # without a witness is invalid). - # Note: The reject reason for this failure could be - # 'block-validation-failed' (if script check threads > 1) or - # 'mandatory-script-verify-flag-failed (Witness program was passed an - # empty witness)' (otherwise). - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False, - reason='mandatory-script-verify-flag-failed (Witness program was passed an empty witness)') - - self.utxo.pop(0) - self.utxo.append(UTXO(txid, 2, value)) - - @subtest - def test_witness_tx_relay_before_segwit_activation(self): - - # Generate a transaction that doesn't require a witness, but send it - # with a witness. Should be rejected for premature-witness, but should - # not be added to recently rejected list. - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) - tx.wit.vtxinwit.append(CTxInWitness()) - tx.wit.vtxinwit[0].scriptWitness.stack = [b'a'] - tx.rehash() - - tx_hash = tx.sha256 - tx_value = tx.vout[0].nValue - - # Verify that if a peer doesn't set nServices to include NODE_WITNESS, - # the getdata is just for the non-witness portion. - self.old_node.announce_tx_and_wait_for_getdata(tx) - assert self.old_node.last_message["getdata"].inv[0].type == MSG_TX - - # Since we haven't delivered the tx yet, inv'ing the same tx from - # a witness transaction ought not result in a getdata. - self.test_node.announce_tx_and_wait_for_getdata(tx, success=False) - - # Delivering this transaction with witness should fail (no matter who - # its from) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - assert_equal(len(self.nodes[1].getrawmempool()), 0) - test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False) - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False) - - # But eliminating the witness should fix it - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) - - # Cleanup: mine the first transaction and update utxo - self.generate(self.nodes[0], 1) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - - self.utxo.pop(0) - self.utxo.append(UTXO(tx_hash, 0, tx_value)) - - @subtest - def test_standardness_v0(self): - """Test V0 txout standardness. - - V0 segwit outputs and inputs are always standard. - V0 segwit inputs may only be mined after activation, but not before.""" - - witness_script = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - p2sh_script_pubkey = script_to_p2sh_script(witness_script) - - # First prepare a p2sh output (so that spending it will pass standardness) - p2sh_tx = CTransaction() - p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] - p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)] - p2sh_tx.rehash() - - # Mine it on test_node to create the confirmed output. - test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True) - self.generate(self.nodes[0], 1) - - # Now test standardness of v0 P2WSH outputs. - # Start by creating a transaction with two outputs. - tx = CTransaction() - tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_script]))] - tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)] - tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later - tx.vin[0].nSequence = MAX_BIP125_RBF_SEQUENCE # Just to have the option to bump this tx from the mempool - tx.rehash() - - # This is always accepted, since the mempool policy is to consider segwit as always active - # and thus allow segwit outputs - test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True) - - # Now create something that looks like a P2PKH output. This won't be spendable. - witness_hash = sha256(witness_script) - script_pubkey = CScript([OP_0, hash160(witness_hash)]) - tx2 = CTransaction() - # tx was accepted, so we spend the second output. - tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")] - tx2.vout = [CTxOut(7000, script_pubkey)] - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - tx2.rehash() - - test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True) - - # Now update self.utxo for later tests. - tx3 = CTransaction() - # tx and tx2 were both accepted. Don't bother trying to reclaim the - # P2PKH output; just send tx's first output back to an anyone-can-spend. - self.sync_mempools([self.nodes[0], self.nodes[1]]) - tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] - tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))] - tx3.wit.vtxinwit.append(CTxInWitness()) - tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - tx3.rehash() - if not self.segwit_active: - # Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed - # in blocks and the tx is impossible to mine right now. - testres3 = self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]) - testres3[0]["fees"].pop("effective-feerate") - testres3[0]["fees"].pop("effective-includes") - assert_equal(testres3, - [{ - 'txid': tx3.hash, - 'wtxid': tx3.getwtxid(), - 'allowed': True, - 'vsize': tx3.get_vsize(), - 'fees': { - 'base': Decimal('0.00001000'), - }, - }], - ) - # Create the same output as tx3, but by replacing tx - tx3_out = tx3.vout[0] - tx3 = tx - tx3.vout = [tx3_out] - tx3.rehash() - testres3_replaced = self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]) - testres3_replaced[0]["fees"].pop("effective-feerate") - testres3_replaced[0]["fees"].pop("effective-includes") - assert_equal(testres3_replaced, - [{ - 'txid': tx3.hash, - 'wtxid': tx3.getwtxid(), - 'allowed': True, - 'vsize': tx3.get_vsize(), - 'fees': { - 'base': Decimal('0.00011000'), - }, - }], - ) - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True) - - self.generate(self.nodes[0], 1) - self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - assert_equal(len(self.nodes[1].getrawmempool()), 0) - - @subtest - def advance_to_segwit_active(self): - """Mine enough blocks to activate segwit.""" - assert not softfork_active(self.nodes[0], 'segwit') - height = self.nodes[0].getblockcount() - self.generate(self.nodes[0], SEGWIT_HEIGHT - height - 2) - assert not softfork_active(self.nodes[0], 'segwit') - self.generate(self.nodes[0], 1) - assert softfork_active(self.nodes[0], 'segwit') - self.segwit_active = True - - @subtest - def test_p2sh_witness(self): - """Test P2SH wrapped witness programs.""" - - # Prepare the p2sh-wrapped witness output - witness_script = CScript([OP_DROP, OP_TRUE]) - p2wsh_pubkey = script_to_p2wsh_script(witness_script) - script_pubkey = script_to_p2sh_script(p2wsh_pubkey) - script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script - - # Fund the P2SH output - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) - tx.rehash() - - # Verify mempool acceptance and block validity - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True) - self.sync_blocks() - - # Now test attempts to spend the output. - spend_tx = CTransaction() - spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig)) - spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) - spend_tx.rehash() - - # This transaction should not be accepted into the mempool pre- or - # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which - # will require a witness to spend a witness program regardless of - # segwit activation. Note that older bitcoind's that are not - # segwit-aware would also reject this for failing CLEANSTACK. - with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): - test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) - - # Try to put the witness script in the scriptSig, should also fail. - spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) - spend_tx.rehash() - with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']): - test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) - - # Now put the witness script in the witness, should succeed after - # segwit activates. - spend_tx.vin[0].scriptSig = script_sig - spend_tx.rehash() - spend_tx.wit.vtxinwit.append(CTxInWitness()) - spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_script] - - # Verify mempool acceptance - test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [spend_tx]) - - # If we're after activation, then sending this with witnesses should be valid. - # This no longer works before activation, because SCRIPT_VERIFY_WITNESS - # is always set. - # TODO: rewrite this test to make clear that it only works after activation. - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Update self.utxo - self.utxo.pop(0) - self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) - - @subtest - def test_witness_commitments(self): - """Test witness commitments. - - This test can only be run after segwit has activated.""" - - # First try a correct witness commitment. - block = self.build_next_block() - add_witness_commitment(block) - block.solve() - - # Test the test -- witness serialization should be different - assert msg_block(block).serialize() != msg_no_witness_block(block).serialize() - - # This empty block should be valid. - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Try to tweak the nonce - block_2 = self.build_next_block() - add_witness_commitment(block_2, nonce=28) - block_2.solve() - - # The commitment should have changed! - assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1] - - # This should also be valid. - test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True) - - # Now test commitments with actual transactions - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - - # Let's construct a witness script - witness_script = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) - tx.rehash() - - # tx2 will spend tx1, and send back to a regular anyone-can-spend address - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script)) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - tx2.rehash() - - block_3 = self.build_next_block() - self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1) - # Add an extra OP_RETURN output that matches the witness commitment template, - # even though it has extra data after the incorrect commitment. - # This block should fail. - block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10]))) - block_3.vtx[0].rehash() - block_3.hashMerkleRoot = block_3.calc_merkle_root() - block_3.solve() - - test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False, reason='bad-witness-merkle-match') - - # Add a different commitment with different nonce, but in the - # right location, and with some funds burned(!). - # This should succeed (nValue shouldn't affect finding the - # witness commitment). - add_witness_commitment(block_3, nonce=0) - block_3.vtx[0].vout[0].nValue -= 1 - block_3.vtx[0].vout[-1].nValue += 1 - block_3.vtx[0].rehash() - block_3.hashMerkleRoot = block_3.calc_merkle_root() - assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns - block_3.solve() - test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True) - - # Finally test that a block with no witness transactions can - # omit the commitment. - block_4 = self.build_next_block() - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) - tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script)) - tx3.rehash() - block_4.vtx.append(tx3) - block_4.hashMerkleRoot = block_4.calc_merkle_root() - block_4.solve() - test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True) - - # Update available utxo's for use in later test. - self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - - @subtest - def test_block_malleability(self): - - # Make sure that a block that has too big a virtual size - # because of a too-large coinbase witness is not permanently - # marked bad. - block = self.build_next_block() - add_witness_commitment(block) - block.solve() - - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000) - assert block.get_weight() > MAX_BLOCK_WEIGHT - - # We can't send over the p2p network, because this is too big to relay - # TODO: repeat this test with a block that can be relayed - assert_equal('bad-witness-nonce-size', self.nodes[0].submitblock(block.serialize().hex())) - - assert self.nodes[0].getbestblockhash() != block.hash - - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() - assert block.get_weight() < MAX_BLOCK_WEIGHT - assert_equal(None, self.nodes[0].submitblock(block.serialize().hex())) - - assert self.nodes[0].getbestblockhash() == block.hash - - # Now make sure that malleating the witness reserved value doesn't - # result in a block permanently marked bad. - block = self.build_next_block() - add_witness_commitment(block) - block.solve() - - # Change the nonce -- should not cause the block to be permanently - # failed - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)] - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='bad-witness-merkle-match') - - # Changing the witness reserved value doesn't change the block hash - block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - @subtest - def test_witness_block_size(self): - # TODO: Test that non-witness carrying blocks can't exceed 1MB - # Skipping this test for now; this is covered in feature_block.py - - # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. - block = self.build_next_block() - - assert len(self.utxo) > 0 - - # Create a P2WSH transaction. - # The witness script will be a bunch of OP_2DROP's, followed by OP_TRUE. - # This should give us plenty of room to tweak the spending tx's - # virtual size. - NUM_DROPS = 200 # 201 max ops per script! - NUM_OUTPUTS = 50 - - witness_script = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - - prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) - value = self.utxo[0].nValue - - parent_tx = CTransaction() - parent_tx.vin.append(CTxIn(prevout, b"")) - child_value = int(value / NUM_OUTPUTS) - for _ in range(NUM_OUTPUTS): - parent_tx.vout.append(CTxOut(child_value, script_pubkey)) - parent_tx.vout[0].nValue -= 50000 - assert parent_tx.vout[0].nValue > 0 - parent_tx.rehash() - - child_tx = CTransaction() - for i in range(NUM_OUTPUTS): - child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) - child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] - for _ in range(NUM_OUTPUTS): - child_tx.wit.vtxinwit.append(CTxInWitness()) - child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_script] - child_tx.rehash() - self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) - - additional_bytes = MAX_BLOCK_WEIGHT - block.get_weight() - i = 0 - while additional_bytes > 0: - # Add some more bytes to each input until we hit MAX_BLOCK_WEIGHT+1 - extra_bytes = min(additional_bytes + 1, 55) - block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes) - additional_bytes -= extra_bytes - i += 1 - - block.vtx[0].vout.pop() # Remove old commitment - add_witness_commitment(block) - block.solve() - assert_equal(block.get_weight(), MAX_BLOCK_WEIGHT + 1) - # Make sure that our test case would exceed the old max-network-message - # limit - assert len(block.serialize()) > 2 * 1024 * 1024 - - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='bad-blk-weight') - - # Now resize the second transaction to make the block fit. - cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0]) - block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1) - block.vtx[0].vout.pop() - add_witness_commitment(block) - block.solve() - assert block.get_weight() == MAX_BLOCK_WEIGHT - - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Update available utxo's - self.utxo.pop(0) - self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) - - @subtest - def test_submit_block(self): - """Test that submitblock adds the nonce automatically when possible.""" - block = self.build_next_block() - - # Try using a custom nonce and then don't supply it. - # This shouldn't possibly work. - add_witness_commitment(block, nonce=1) - block.vtx[0].wit = CTxWitness() # drop the nonce - block.solve() - assert_equal('bad-witness-merkle-match', self.nodes[0].submitblock(block.serialize().hex())) - assert self.nodes[0].getbestblockhash() != block.hash - - # Now redo commitment with the standard nonce, but let bitcoind fill it in. - add_witness_commitment(block, nonce=0) - block.vtx[0].wit = CTxWitness() - block.solve() - assert_equal(None, self.nodes[0].submitblock(block.serialize().hex())) - assert_equal(self.nodes[0].getbestblockhash(), block.hash) - - # This time, add a tx with non-empty witness, but don't supply - # the commitment. - block_2 = self.build_next_block() - - add_witness_commitment(block_2) - - block_2.solve() - - # Drop commitment and nonce -- submitblock should not fill in. - block_2.vtx[0].vout.pop() - block_2.vtx[0].wit = CTxWitness() - - assert_equal('bad-txnmrklroot', self.nodes[0].submitblock(block_2.serialize().hex())) - # Tip should not advance! - assert self.nodes[0].getbestblockhash() != block_2.hash - - @subtest - def test_extra_witness_data(self): - """Test extra witness data in a transaction.""" - - block = self.build_next_block() - - witness_script = CScript([OP_DROP, OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - - # First try extra witness data on a tx that doesn't require a witness - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey)) - tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output - tx.wit.vtxinwit.append(CTxInWitness()) - tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])] - tx.rehash() - self.update_witness_block_with_transactions(block, [tx]) - - # Extra witness data should not be allowed. - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Witness provided for non-witness script)') - - # Try extra signature data. Ok if we're not spending a witness output. - block.vtx[1].wit.vtxinwit = [] - block.vtx[1].vin[0].scriptSig = CScript([OP_0]) - block.vtx[1].rehash() - add_witness_commitment(block) - block.solve() - - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Now try extra witness/signature data on an input that DOES require a - # witness - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness - tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) - tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) - tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_script] - tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])] - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2]) - - # This has extra witness data, so it should fail. - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Stack size must be exactly one after execution)') - - # Now get rid of the extra witness, but add extra scriptSig data - tx2.vin[0].scriptSig = CScript([OP_TRUE]) - tx2.vin[1].scriptSig = CScript([OP_TRUE]) - tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0) - tx2.wit.vtxinwit[1].scriptWitness.stack = [] - tx2.rehash() - add_witness_commitment(block) - block.solve() - - # This has extra signature data for a witness input, so it should fail. - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Witness requires empty scriptSig)') - - # Now get rid of the extra scriptsig on the witness input, and verify - # success (even with extra scriptsig data in the non-witness input) - tx2.vin[0].scriptSig = b"" - tx2.rehash() - add_witness_commitment(block) - block.solve() - - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Update utxo for later tests - self.utxo.pop(0) - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - @subtest - def test_max_witness_push_length(self): - """Test that witness stack can only allow up to MAX_SCRIPT_ELEMENT_SIZE byte pushes.""" - - block = self.build_next_block() - - witness_script = CScript([OP_DROP, OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) - tx.rehash() - - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) - tx2.wit.vtxinwit.append(CTxInWitness()) - # First try a 521-byte stack element - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_script] - tx2.rehash() - - self.update_witness_block_with_transactions(block, [tx, tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Push value size limit exceeded)') - - # Now reduce the length of the stack element - tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE) - - add_witness_commitment(block) - block.solve() - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Update the utxo for later tests - self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - @subtest - def test_max_witness_script_length(self): - """Test that witness outputs greater than 10kB can't be spent.""" - - MAX_WITNESS_SCRIPT_LENGTH = 10000 - - # This script is 19 max pushes (9937 bytes), then 64 more opcode-bytes. - long_witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE]) - assert len(long_witness_script) == MAX_WITNESS_SCRIPT_LENGTH + 1 - long_script_pubkey = script_to_p2wsh_script(long_witness_script) - - block = self.build_next_block() - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey)) - tx.rehash() - - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_script] - tx2.rehash() - - self.update_witness_block_with_transactions(block, [tx, tx2]) - - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Script is too big)') - - # Try again with one less byte in the witness script - witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE]) - assert len(witness_script) == MAX_WITNESS_SCRIPT_LENGTH - script_pubkey = script_to_p2wsh_script(witness_script) - - tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey) - tx.rehash() - tx2.vin[0].prevout.hash = tx.sha256 - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_script] - tx2.rehash() - block.vtx = [block.vtx[0]] - self.update_witness_block_with_transactions(block, [tx, tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - @subtest - def test_witness_input_length(self): - """Test that vin length must match vtxinwit length.""" - - witness_script = CScript([OP_DROP, OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - - # Create a transaction that splits our utxo into many outputs - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - value = self.utxo[0].nValue - for _ in range(10): - tx.vout.append(CTxOut(int(value / 10), script_pubkey)) - tx.vout[0].nValue -= 1000 - assert tx.vout[0].nValue >= 0 - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Try various ways to spend tx that should all break. - # This "broken" transaction serializer will not normalize - # the length of vtxinwit. - class BrokenCTransaction(CTransaction): - def serialize_with_witness(self): - flags = 0 - if not self.wit.is_null(): - flags |= 1 - r = b"" - r += self.version.to_bytes(4, "little") - if flags: - dummy = [] - r += ser_vector(dummy) - r += flags.to_bytes(1, "little") - r += ser_vector(self.vin) - r += ser_vector(self.vout) - if flags & 1: - r += self.wit.serialize() - r += self.nLockTime.to_bytes(4, "little") - return r - - tx2 = BrokenCTransaction() - for i in range(10): - tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) - tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE]))) - - # First try using a too long vtxinwit - for i in range(11): - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_script] - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='bad-txnmrklroot') - - # Now try using a too short vtxinwit - tx2.wit.vtxinwit.pop() - tx2.wit.vtxinwit.pop() - - block.vtx = [block.vtx[0]] - self.update_witness_block_with_transactions(block, [tx2]) - # This block doesn't result in a specific reject reason, but an iostream exception: - # "Exception 'CDataStream::read(): end of data: unspecified iostream_category error' (...) caught" - test_witness_block(self.nodes[0], self.test_node, block, accepted=False) - - # Now make one of the intermediate witnesses be incorrect - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_script] - tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_script] - - block.vtx = [block.vtx[0]] - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Operation not valid with the current stack size)') - - # Fix the broken witness and the block should be accepted. - tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_script] - block.vtx = [block.vtx[0]] - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - @subtest - def test_tx_relay_after_segwit_activation(self): - """Test transaction relay after segwit activation. - - After segwit activates, verify that mempool: - - rejects transactions with unnecessary/extra witnesses - - accepts transactions with valid witnesses - and that witness transactions are relayed to non-upgraded peers.""" - - # Generate a transaction that doesn't require a witness, but send it - # with a witness. Should be rejected because we can't use a witness - # when spending a non-witness output. - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) - tx.wit.vtxinwit.append(CTxInWitness()) - tx.wit.vtxinwit[0].scriptWitness.stack = [b'a'] - tx.rehash() - - tx_hash = tx.sha256 - - # Verify that unnecessary witnesses are rejected. - self.test_node.announce_tx_and_wait_for_getdata(tx) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False) - - # Verify that removing the witness succeeds. - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) - - # Now try to add extra witness data to a valid witness tx. - witness_script = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) - tx2.rehash() - - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) - tx3.wit.vtxinwit.append(CTxInWitness()) - - # Add too-large for IsStandard witness and check that it does not enter reject filter - p2sh_script = CScript([OP_TRUE]) - witness_script2 = CScript([b'a' * 400000]) - tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_to_p2sh_script(p2sh_script))) - tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script2] - tx3.rehash() - - # Node will not be blinded to the transaction, requesting it any number of times - # if it is being announced via txid relay. - # Node will be blinded to the transaction via wtxid, however. - self.std_node.announce_tx_and_wait_for_getdata(tx3) - self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True) - test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') - self.std_node.announce_tx_and_wait_for_getdata(tx3) - self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False) - - # Remove witness stuffing, instead add extra witness push on stack - tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) - tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_script] - tx3.rehash() - - test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False) - - # Get rid of the extra witness, and verify acceptance. - tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - # Also check that old_node gets a tx announcement, even though this is - # a witness transaction. - self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True) - self.old_node.wait_for_inv([CInv(MSG_TX, tx3.sha256)]) - - # Test that getrawtransaction returns correct witness information - # hash, size, vsize - raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1) - assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True)) - assert_equal(raw_tx["size"], len(tx3.serialize_with_witness())) - vsize = tx3.get_vsize() - assert_equal(raw_tx["vsize"], vsize) - assert_equal(raw_tx["weight"], tx3.get_weight()) - assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1) - assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_script.hex()) - assert vsize != raw_tx["size"] - - # Cleanup: mine the transactions and update utxo for next test - self.generate(self.nodes[0], 1) - assert_equal(len(self.nodes[0].getrawmempool()), 0) - - self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - - @subtest - def test_segwit_versions(self): - """Test validity of future segwit version transactions. - - Future segwit versions are non-standard to spend, but valid in blocks. - Sending to future segwit versions is always allowed. - Can run this before and after segwit activation.""" - - NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16 - if len(self.utxo) < NUM_SEGWIT_VERSIONS: - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS - for _ in range(NUM_SEGWIT_VERSIONS): - tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) - tx.rehash() - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - self.utxo.pop(0) - for i in range(NUM_SEGWIT_VERSIONS): - self.utxo.append(UTXO(tx.sha256, i, split_value)) - - self.sync_blocks() - temp_utxo = [] - tx = CTransaction() - witness_script = CScript([OP_TRUE]) - witness_hash = sha256(witness_script) - assert_equal(len(self.nodes[1].getrawmempool()), 0) - for version in list(range(OP_1, OP_16 + 1)) + [OP_0]: - # First try to spend to a future version segwit script_pubkey. - if version == OP_1: - # Don't use 32-byte v1 witness (used by Taproot; see BIP 341) - script_pubkey = CScript([CScriptOp(version), witness_hash + b'\x00']) - else: - script_pubkey = CScript([CScriptOp(version), witness_hash]) - tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] - tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)] - tx.rehash() - test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False) - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True) - self.utxo.pop(0) - temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) - - self.generate(self.nodes[0], 1) # Mine all the transactions - assert len(self.nodes[0].getrawmempool()) == 0 - - # Finally, verify that version 0 -> version 2 transactions - # are standard - script_pubkey = CScript([CScriptOp(OP_2), witness_hash]) - tx2 = CTransaction() - tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] - tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)] - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - tx2.rehash() - # Gets accepted to both policy-enforcing nodes and others. - test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) - test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True) - temp_utxo.pop() # last entry in temp_utxo was the output we just spent - temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - # Spend everything in temp_utxo into an segwit v1 output. - tx3 = CTransaction() - total_value = 0 - for i in temp_utxo: - tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) - tx3.wit.vtxinwit.append(CTxInWitness()) - total_value += i.nValue - tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_script] - tx3.vout.append(CTxOut(total_value - 1000, script_pubkey)) - tx3.rehash() - - # First we test this transaction against std_node - # making sure the txid is added to the reject filter - self.std_node.announce_tx_and_wait_for_getdata(tx3) - test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs") - # Now the node will no longer ask for getdata of this transaction when advertised by same txid - self.std_node.announce_tx_and_wait_for_getdata(tx3, success=False) - - # Spending a higher version witness output is not allowed by policy, - # even with the node that accepts non-standard txs. - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades") - - # Building a block with the transaction must be valid, however. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2, tx3]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - self.sync_blocks() - - # Add utxo to our list - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - - @subtest - def test_premature_coinbase_witness_spend(self): - - block = self.build_next_block() - # Change the output of the block to be a witness output. - witness_script = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - block.vtx[0].vout[0].scriptPubKey = script_pubkey - # This next line will rehash the coinbase and update the merkle - # root, and solve. - self.update_witness_block_with_transactions(block, []) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - spend_tx = CTransaction() - spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] - spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_script)] - spend_tx.wit.vtxinwit.append(CTxInWitness()) - spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - spend_tx.rehash() - - # Now test a premature spend. - self.generate(self.nodes[0], 98) - block2 = self.build_next_block() - self.update_witness_block_with_transactions(block2, [spend_tx]) - test_witness_block(self.nodes[0], self.test_node, block2, accepted=False, reason='bad-txns-premature-spend-of-coinbase') - - # Advancing one more block should allow the spend. - self.generate(self.nodes[0], 1) - block2 = self.build_next_block() - self.update_witness_block_with_transactions(block2, [spend_tx]) - test_witness_block(self.nodes[0], self.test_node, block2, accepted=True) - self.sync_blocks() - - @subtest - def test_uncompressed_pubkey(self): - """Test uncompressed pubkey validity in segwit transactions. - - Uncompressed pubkeys are no longer supported in default relay policy, - but (for now) are still valid in blocks.""" - - # Segwit transactions using uncompressed pubkeys are not accepted - # under default policy, but should still pass consensus. - key, pubkey = generate_keypair(compressed=False) - assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey - - utxo = self.utxo.pop(0) - - # Test 1: P2WPKH - # First create a P2WPKH output that uses an uncompressed pubkey - pubkeyhash = hash160(pubkey) - script_pkh = key_to_p2wpkh_script(pubkey) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) - tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh)) - tx.rehash() - - # Confirm it in a block. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Now try to spend it. Send it to a P2WSH output, which we'll - # use in the next test. - witness_script = key_to_p2pk_script(pubkey) - script_wsh = script_to_p2wsh_script(witness_script) - - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh)) - script = keyhash_to_p2pkh_script(pubkeyhash) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [pubkey] - sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key) - - # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') - # But passes consensus. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Test 2: P2WSH - # Try to spend the P2WSH output created in last test. - # Send it to a P2SH(P2WSH) output, which we'll use in the next test. - script_p2sh = script_to_p2sh_script(script_wsh) - script_sig = CScript([script_wsh]) - - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) - tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh)) - tx3.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) - - # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') - # But passes consensus. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx3]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Test 3: P2SH(P2WSH) - # Try to spend the P2SH output created in the last test. - # Send it to a P2PKH output, which we'll use in the next test. - script_pubkey = keyhash_to_p2pkh_script(pubkeyhash) - tx4 = CTransaction() - tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig)) - tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey)) - tx4.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) - - # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx4]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Test 4: Uncompressed pubkeys should still be valid in non-segwit - # transactions. - tx5 = CTransaction() - tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) - tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE]))) - tx5.vin[0].scriptSig = CScript([pubkey]) - sign_input_legacy(tx5, 0, script_pubkey, key) - # Should pass policy and consensus. - test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True) - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx5]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) - - @subtest - def test_signature_version_1(self): - key, pubkey = generate_keypair() - witness_script = key_to_p2pk_script(pubkey) - script_pubkey = script_to_p2wsh_script(witness_script) - - # First create a witness output for use in the tests. - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) - tx.rehash() - - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True) - # Mine this transaction in preparation for following tests. - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - self.sync_blocks() - self.utxo.pop(0) - - # Test each hashtype - prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) - for sigflag in [0, SIGHASH_ANYONECANPAY]: - for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: - hashtype |= sigflag - block = self.build_next_block() - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) - tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey)) - tx.wit.vtxinwit.append(CTxInWitness()) - # Too-large input value - sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue + 1, key) - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Script evaluated without error ' - 'but finished with a false/empty top stack element') - - # Too-small input value - sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue - 1, key) - block.vtx.pop() # remove last tx - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Script evaluated without error ' - 'but finished with a false/empty top stack element') - - # Now try correct value - sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue, key) - block.vtx.pop() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) - - # Test combinations of signature hashes. - # Split the utxo into a lot of outputs. - # Randomly choose up to 10 to spend, sign with different hashtypes, and - # output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times. - # Ensure that we've tested a situation where we use SIGHASH_SINGLE with - # an input index > number of outputs. - NUM_SIGHASH_TESTS = 500 - temp_utxos = [] - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) - split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS - for _ in range(NUM_SIGHASH_TESTS): - tx.vout.append(CTxOut(split_value, script_pubkey)) - tx.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) - for i in range(NUM_SIGHASH_TESTS): - temp_utxos.append(UTXO(tx.sha256, i, split_value)) - - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - block = self.build_next_block() - used_sighash_single_out_of_bounds = False - for i in range(NUM_SIGHASH_TESTS): - # Ping regularly to keep the connection alive - if (not i % 100): - self.test_node.sync_with_ping() - # Choose random number of inputs to use. - num_inputs = random.randint(1, 10) - # Create a slight bias for producing more utxos - num_outputs = random.randint(1, 11) - random.shuffle(temp_utxos) - assert len(temp_utxos) > num_inputs - tx = CTransaction() - total_value = 0 - for i in range(num_inputs): - tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) - tx.wit.vtxinwit.append(CTxInWitness()) - total_value += temp_utxos[i].nValue - split_value = total_value // num_outputs - for _ in range(num_outputs): - tx.vout.append(CTxOut(split_value, script_pubkey)) - for i in range(num_inputs): - # Now try to sign each input, using a random hashtype. - anyonecanpay = 0 - if random.randint(0, 1): - anyonecanpay = SIGHASH_ANYONECANPAY - hashtype = random.randint(1, 3) | anyonecanpay - sign_p2pk_witness_input(witness_script, tx, i, hashtype, temp_utxos[i].nValue, key) - if (hashtype == SIGHASH_SINGLE and i >= num_outputs): - used_sighash_single_out_of_bounds = True - tx.rehash() - for i in range(num_outputs): - temp_utxos.append(UTXO(tx.sha256, i, split_value)) - temp_utxos = temp_utxos[num_inputs:] - - block.vtx.append(tx) - - # Test the block periodically, if we're close to maxblocksize - if block.get_weight() > MAX_BLOCK_WEIGHT - 4000: - self.update_witness_block_with_transactions(block, []) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - block = self.build_next_block() - - if (not used_sighash_single_out_of_bounds): - self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") - # Test the transactions we've added to the block - if (len(block.vtx) > 1): - self.update_witness_block_with_transactions(block, []) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - # Now test witness version 0 P2PKH transactions - pubkeyhash = hash160(pubkey) - script_pkh = key_to_p2wpkh_script(pubkey) - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) - tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh)) - tx.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) - - script = keyhash_to_p2pkh_script(pubkeyhash) - tx2.wit.vtxinwit.append(CTxInWitness()) - sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key) - signature = tx2.wit.vtxinwit[0].scriptWitness.stack.pop() - - # Check that we can't have a scriptSig - tx2.vin[0].scriptSig = CScript([signature, pubkey]) - tx2.rehash() - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx, tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=False, - reason='mandatory-script-verify-flag-failed (Witness requires empty scriptSig)') - - # Move the signature to the witness. - block.vtx.pop() - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey] - tx2.vin[0].scriptSig = b"" - tx2.rehash() - - self.update_witness_block_with_transactions(block, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - temp_utxos.pop(0) - - # Update self.utxos for later tests by creating two outputs - # that consolidate all the coins in temp_utxos. - output_value = sum(i.nValue for i in temp_utxos) // 2 - - tx = CTransaction() - index = 0 - # Just spend to our usual anyone-can-spend output - tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2 - for i in temp_utxos: - # Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up - # the signatures as we go. - tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) - tx.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_script, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key) - index += 1 - block = self.build_next_block() - self.update_witness_block_with_transactions(block, [tx]) - test_witness_block(self.nodes[0], self.test_node, block, accepted=True) - - for i in range(len(tx.vout)): - self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) - - @subtest - def test_non_standard_witness_blinding(self): - """Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction""" - - # Create a p2sh output -- this is so we can pass the standardness - # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped - # in P2SH). - p2sh_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2sh_script(p2sh_program) - - # Now check that unnecessary witnesses can't be used to blind a node - # to a transaction, eg by violating standardness checks. - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) - tx.rehash() - test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True) - self.generate(self.nodes[0], 1) - - # We'll add an unnecessary witness to this transaction that would cause - # it to be non-standard, to test that violating policy with a witness - # doesn't blind a node to a transaction. Transactions - # rejected for having a witness shouldn't be added - # to the rejection cache. - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program]))) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400] - tx2.rehash() - # This will be rejected due to a policy check: - # No witness is allowed, since it is not a witness program but a p2sh program - test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard') - - # If we send without witness, it should be accepted. - test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True) - - # Now create a new anyone-can-spend utxo for the next test. - tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) - tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) - tx3.rehash() - test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True) - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True) - - self.generate(self.nodes[0], 1) - - # Update our utxo list; we spent the first entry. - self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) - - @subtest - def test_non_standard_witness(self): - """Test detection of non-standard P2WSH witness""" - pad = chr(1).encode('latin-1') - - # Create scripts for tests - scripts = [] - scripts.append(CScript([OP_DROP] * 100)) - scripts.append(CScript([OP_DROP] * 99)) - scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) - scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) - - p2wsh_scripts = [] - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - - # For each script, generate a pair of P2WSH and P2SH-P2WSH output. - outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2) - for i in scripts: - p2wsh = script_to_p2wsh_script(i) - p2wsh_scripts.append(p2wsh) - tx.vout.append(CTxOut(outputvalue, p2wsh)) - tx.vout.append(CTxOut(outputvalue, script_to_p2sh_script(p2wsh))) - tx.rehash() - txid = tx.sha256 - test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) - - self.generate(self.nodes[0], 1) - - # Creating transactions for tests - p2wsh_txs = [] - p2sh_txs = [] - for i in range(len(scripts)): - p2wsh_tx = CTransaction() - p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2))) - p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")]))) - p2wsh_tx.wit.vtxinwit.append(CTxInWitness()) - p2wsh_tx.rehash() - p2wsh_txs.append(p2wsh_tx) - p2sh_tx = CTransaction() - p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]]))) - p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")]))) - p2sh_tx.wit.vtxinwit.append(CTxInWitness()) - p2sh_tx.rehash() - p2sh_txs.append(p2sh_tx) - - # Testing native P2WSH - # Witness stack size, excluding witnessScript, over 100 is non-standard - p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard') - # Non-standard nodes should accept - test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True) - - # Stack element size over 80 bytes is non-standard - p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard') - # Non-standard nodes should accept - test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True) - # Standard nodes should accept if element size is not over 80 bytes - p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True) - - # witnessScript size at 3600 bytes is standard - p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] - test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True) - test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True) - - # witnessScript size at 3601 bytes is non-standard - p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard') - # Non-standard nodes should accept - test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True) - - # Repeating the same tests with P2SH-P2WSH - p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard') - test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True) - p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard') - test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True) - p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True) - p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]] - test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True) - test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True) - p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]] - test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard') - test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True) - - self.generate(self.nodes[0], 1) # Mine and clean up the mempool of non-standard node - # Valid but non-standard transactions in a block should be accepted by standard node - self.sync_blocks() - assert_equal(len(self.nodes[0].getrawmempool()), 0) - assert_equal(len(self.nodes[1].getrawmempool()), 0) - - self.utxo.pop(0) - - @subtest - def test_witness_sigops(self): - """Test sigop counting is correct inside witnesses.""" - - # Keep this under MAX_OPS_PER_SCRIPT (201) - witness_script = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF]) - script_pubkey = script_to_p2wsh_script(witness_script) - - sigops_per_script = 20 * 5 + 193 * 1 - # We'll produce 2 extra outputs, one with a program that would take us - # over max sig ops, and one with a program that would exactly reach max - # sig ops - outputs = (MAX_SIGOP_COST // sigops_per_script) + 2 - extra_sigops_available = MAX_SIGOP_COST % sigops_per_script - - # We chose the number of checkmultisigs/checksigs to make this work: - assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT - - # This script, when spent with the first - # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, - # would push us just over the block sigop limit. - witness_script_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF]) - script_pubkey_toomany = script_to_p2wsh_script(witness_script_toomany) - - # If we spend this script instead, we would exactly reach our sigop - # limit (for witness sigops). - witness_script_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF]) - script_pubkey_justright = script_to_p2wsh_script(witness_script_justright) - - # First split our available utxo into a bunch of outputs - split_value = self.utxo[0].nValue // outputs - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - for _ in range(outputs): - tx.vout.append(CTxOut(split_value, script_pubkey)) - tx.vout[-2].scriptPubKey = script_pubkey_toomany - tx.vout[-1].scriptPubKey = script_pubkey_justright - tx.rehash() - - block_1 = self.build_next_block() - self.update_witness_block_with_transactions(block_1, [tx]) - test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True) - - tx2 = CTransaction() - # If we try to spend the first n-1 outputs from tx, that should be - # too many sigops. - total_value = 0 - for i in range(outputs - 1): - tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script] - total_value += tx.vout[i].nValue - tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_toomany] - tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) - tx2.rehash() - - block_2 = self.build_next_block() - self.update_witness_block_with_transactions(block_2, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False, reason='bad-blk-sigops') - - # Try dropping the last input in tx2, and add an output that has - # too many sigops (contributing to legacy sigop count). - checksig_count = (extra_sigops_available // 4) + 1 - script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count) - tx2.vout.append(CTxOut(0, script_pubkey_checksigs)) - tx2.vin.pop() - tx2.wit.vtxinwit.pop() - tx2.vout[0].nValue -= tx.vout[-2].nValue - tx2.rehash() - block_3 = self.build_next_block() - self.update_witness_block_with_transactions(block_3, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False, reason='bad-blk-sigops') - - # If we drop the last checksig in this output, the tx should succeed. - block_4 = self.build_next_block() - tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1)) - tx2.rehash() - self.update_witness_block_with_transactions(block_4, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True) - - # Reset the tip back down for the next test - self.sync_blocks() - for x in self.nodes: - x.invalidateblock(block_4.hash) - - # Try replacing the last input of tx2 to be spending the last - # output of tx - block_5 = self.build_next_block() - tx2.vout.pop() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b"")) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_justright] - tx2.rehash() - self.update_witness_block_with_transactions(block_5, [tx2]) - test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True) - - # TODO: test p2sh sigop counting - - # Cleanup and prep for next test - self.utxo.pop(0) - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - - @subtest - def test_superfluous_witness(self): - # Serialization of tx that puts witness flag to 3 always - def serialize_with_bogus_witness(tx): - flags = 3 - r = b"" - r += tx.version.to_bytes(4, "little") - if flags: - dummy = [] - r += ser_vector(dummy) - r += flags.to_bytes(1, "little") - r += ser_vector(tx.vin) - r += ser_vector(tx.vout) - if flags & 1: - if (len(tx.wit.vtxinwit) != len(tx.vin)): - # vtxinwit must have the same length as vin - tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)] - for _ in range(len(tx.wit.vtxinwit), len(tx.vin)): - tx.wit.vtxinwit.append(CTxInWitness()) - r += tx.wit.serialize() - r += tx.nLockTime.to_bytes(4, "little") - return r - - class msg_bogus_tx(msg_tx): - def serialize(self): - return serialize_with_bogus_witness(self.tx) - - tx = self.wallet.create_self_transfer()['tx'] - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True) - with self.nodes[0].assert_debug_log(['Unknown transaction optional data']): - self.test_node.send_and_ping(msg_bogus_tx(tx)) - tx.wit.vtxinwit = [] # drop witness - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True) - with self.nodes[0].assert_debug_log(['Superfluous witness record']): - self.test_node.send_and_ping(msg_bogus_tx(tx)) - - @subtest - def test_wtxid_relay(self): - # Use brand new nodes to avoid contamination from earlier tests - self.wtx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=P2P_SERVICES) - self.tx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=False), services=P2P_SERVICES) - - # Check wtxidrelay feature negotiation message through connecting a new peer - def received_wtxidrelay(): - return (len(self.wtx_node.last_wtxidrelay) > 0) - self.wtx_node.wait_until(received_wtxidrelay) - - # Create a Segwit output from the latest UTXO - # and announce it to the network - witness_script = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_script) - - tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) - tx.rehash() - - # Create a Segwit transaction - tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) - tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] - tx2.rehash() - - # Announce Segwit transaction with wtxid - # and wait for getdata - self.wtx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=True) - with p2p_lock: - lgd = self.wtx_node.lastgetdata[:] - assert_equal(lgd, [CInv(MSG_WTX, tx2.calc_sha256(True))]) - - # Announce Segwit transaction from non wtxidrelay peer - # and wait for getdata - self.tx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=False) - with p2p_lock: - lgd = self.tx_node.lastgetdata[:] - assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx2.sha256)]) - - # Send tx2 through; it's an orphan so won't be accepted - with p2p_lock: - self.wtx_node.last_message.pop("getdata", None) - test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False) - - # Expect a request for parent (tx) by txid despite use of WTX peer - self.wtx_node.wait_for_getdata([tx.sha256], timeout=60) - with p2p_lock: - lgd = self.wtx_node.lastgetdata[:] - assert_equal(lgd, [CInv(MSG_WITNESS_TX, tx.sha256)]) - - # Send tx through - test_transaction_acceptance(self.nodes[0], self.wtx_node, tx, with_witness=False, accepted=True) - - # Check tx2 is there now - assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True) - - -if __name__ == '__main__': - SegWitTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2016-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test segwit transactions and blocks on P2P network.""") +from decimal import Decimal) +import random) +import time) +) +from test_framework.blocktools import () + WITNESS_COMMITMENT_HEADER,) + add_witness_commitment,) + create_block,) + create_coinbase,) +)) +from test_framework.messages import () + MAX_BIP125_RBF_SEQUENCE,) + CBlockHeader,) + CInv,) + COutPoint,) + CTransaction,) + CTxIn,) + CTxInWitness,) + CTxOut,) + CTxWitness,) + MAX_BLOCK_WEIGHT,) + MSG_BLOCK,) + MSG_TX,) + MSG_WITNESS_FLAG,) + MSG_WITNESS_TX,) + MSG_WTX,) + NODE_NETWORK,) + NODE_WITNESS,) + msg_no_witness_block,) + msg_getdata,) + msg_headers,) + msg_inv,) + msg_tx,) + msg_block,) + msg_no_witness_tx,) + ser_uint256,) + ser_vector,) + sha256,) +)) +from test_framework.p2p import () + P2PInterface,) + p2p_lock,) + P2P_SERVICES,) +)) +from test_framework.script import () + CScript,) + CScriptNum,) + CScriptOp,) + MAX_SCRIPT_ELEMENT_SIZE,) + OP_0,) + OP_1,) + OP_2,) + OP_16,) + OP_2DROP,) + OP_CHECKMULTISIG,) + OP_CHECKSIG,) + OP_DROP,) + OP_ELSE,) + OP_ENDIF,) + OP_IF,) + OP_RETURN,) + OP_TRUE,) + SIGHASH_ALL,) + SIGHASH_ANYONECANPAY,) + SIGHASH_NONE,) + SIGHASH_SINGLE,) + hash160,) + sign_input_legacy,) + sign_input_segwitv0,) +)) +from test_framework.script_util import () + key_to_p2pk_script,) + key_to_p2wpkh_script,) + keyhash_to_p2pkh_script,) + script_to_p2sh_script,) + script_to_p2wsh_script,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + softfork_active,) + assert_raises_rpc_error,) +)) +from test_framework.wallet import MiniWallet) +from test_framework.wallet_util import generate_keypair) +) +) +MAX_SIGOP_COST = 80000) +) +SEGWIT_HEIGHT = 120) +) +class UTXO():) + """Used to keep track of anyone-can-spend outputs that we can use in the tests.""") + def __init__(self, sha256, n, value):) + self.sha256 = sha256) + self.n = n) + self.nValue = value) +) +) +def subtest(func):) + """Wraps the subtests for logging and state assertions.""") + def func_wrapper(self, *args, **kwargs):) + self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))) + # Assert segwit status is as expected) + assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)) + func(self, *args, **kwargs)) + # Each subtest should leave some utxos for the next subtest) + assert self.utxo) + self.sync_blocks()) + # Assert segwit status is as expected at end of subtest) + assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)) +) + return func_wrapper) +) +) +def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):) + """Add signature for a P2PK witness script.""") + tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [script]) + sign_input_segwitv0(tx_to, in_idx, script, value, key, hashtype)) +) +def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):) + """Send a transaction to the node and check that it's accepted to the mempool) +) + - Submit the transaction over the p2p interface) + - use the getrawmempool rpc to check for acceptance.""") + reason = [reason] if reason else []) + with node.assert_debug_log(expected_msgs=reason):) + p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))) + assert_equal(tx.hash in node.getrawmempool(), accepted)) +) +) +def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):) + """Send a block to the node and check that it's accepted) +) + - Submit the block over the p2p interface) + - use the getbestblockhash rpc to check for acceptance.""") + reason = [reason] if reason else []) + with node.assert_debug_log(expected_msgs=reason):) + p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block))) + assert_equal(node.getbestblockhash() == block.hash, accepted)) +) +) +class TestP2PConn(P2PInterface):) + def __init__(self, wtxidrelay=False):) + super().__init__(wtxidrelay=wtxidrelay)) + self.getdataset = set()) + self.last_wtxidrelay = []) + self.lastgetdata = []) + self.wtxidrelay = wtxidrelay) +) + # Don't send getdata message replies to invs automatically.) + # We'll send the getdata messages explicitly in the test logic.) + def on_inv(self, message):) + pass) +) + def on_getdata(self, message):) + self.lastgetdata = message.inv) + for inv in message.inv:) + self.getdataset.add(inv.hash)) +) + def on_wtxidrelay(self, message):) + self.last_wtxidrelay.append(message)) +) + def announce_tx_and_wait_for_getdata(self, tx, success=True, use_wtxid=False):) + if success:) + # sanity check) + assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid)) + with p2p_lock:) + self.last_message.pop("getdata", None)) + if use_wtxid:) + wtxid = tx.calc_sha256(True)) + self.send_message(msg_inv(inv=[CInv(MSG_WTX, wtxid)]))) + else:) + self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)]))) +) + if success:) + if use_wtxid:) + self.wait_for_getdata([wtxid])) + else:) + self.wait_for_getdata([tx.sha256])) + else:) + time.sleep(5)) + assert not self.last_message.get("getdata")) +) + def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):) + with p2p_lock:) + self.last_message.pop("getdata", None)) + msg = msg_headers()) + msg.headers = [CBlockHeader(block)]) + if use_header:) + self.send_message(msg)) + else:) + self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)]))) + self.wait_for_getheaders(block_hash=block.hashPrevBlock, timeout=timeout)) + self.send_message(msg)) + self.wait_for_getdata([block.sha256], timeout=timeout)) +) + def request_block(self, blockhash, inv_type, timeout=60):) + with p2p_lock:) + self.last_message.pop("block", None)) + self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))) + self.wait_for_block(blockhash, timeout=timeout)) + return self.last_message["block"].block) +) +class SegWitTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 2) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) + # This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.) + self.extra_args = [) + ["-acceptnonstdtxn=1", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}", "-par=1"],) + ["-acceptnonstdtxn=0", f"-testactivationheight=segwit@{SEGWIT_HEIGHT}"],) + ]) + self.supports_cli = False) +) + # Helper functions) +) + def build_next_block(self):) + """Build a block on top of node0's tip.""") + tip = self.nodes[0].getbestblockhash()) + height = self.nodes[0].getblockcount() + 1) + block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1) + block = create_block(int(tip, 16), create_coinbase(height), block_time)) + block.rehash()) + return block) +) + def update_witness_block_with_transactions(self, block, tx_list, nonce=0):) + """Add list of transactions to block, adds witness commitment, then solves.""") + block.vtx.extend(tx_list)) + add_witness_commitment(block, nonce)) + block.solve()) +) + def run_test(self):) + # Setup the p2p connections) + # self.test_node sets P2P_SERVICES, i.e. NODE_WITNESS | NODE_NETWORK) + self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=P2P_SERVICES)) + # self.old_node sets only NODE_NETWORK) + self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)) + # self.std_node is for testing node1 (requires standard txs)) + self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=P2P_SERVICES)) + # self.std_wtx_node is for testing node1 with wtxid relay) + self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=P2P_SERVICES)) +) + assert_not_equal(self.test_node.nServices & NODE_WITNESS, 0)) +) + # Keep a place to store utxo's that can be used in later tests) + self.utxo = []) +) + self.log.info("Starting tests before segwit activation")) + self.segwit_active = False) + self.wallet = MiniWallet(self.nodes[0])) +) + self.test_non_witness_transaction()) + self.test_v0_outputs_arent_spendable()) + self.test_block_relay()) + self.test_unnecessary_witness_before_segwit_activation()) + self.test_witness_tx_relay_before_segwit_activation()) + self.test_standardness_v0()) +) + self.log.info("Advancing to segwit activation")) + self.advance_to_segwit_active()) +) + # Segwit status 'active') +) + self.test_p2sh_witness()) + self.test_witness_commitments()) + self.test_block_malleability()) + self.test_witness_block_size()) + self.test_submit_block()) + self.test_extra_witness_data()) + self.test_max_witness_push_length()) + self.test_max_witness_script_length()) + self.test_witness_input_length()) + self.test_block_relay()) + self.test_tx_relay_after_segwit_activation()) + self.test_standardness_v0()) + self.test_segwit_versions()) + self.test_premature_coinbase_witness_spend()) + self.test_uncompressed_pubkey()) + self.test_signature_version_1()) + self.test_non_standard_witness_blinding()) + self.test_non_standard_witness()) + self.test_witness_sigops()) + self.test_superfluous_witness()) + self.test_wtxid_relay()) +) + # Individual tests) +) + @subtest) + def test_non_witness_transaction(self):) + """See if sending a regular transaction works, and create a utxo to use in later tests.""") + # Mine a block with an anyone-can-spend coinbase,) + # let it mature, then try to spend it.) +) + block = self.build_next_block()) + block.solve()) + self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed) + txid = block.vtx[0].sha256) +) + self.generate(self.wallet, 99) # let the block mature) +) + # Create a transaction that spends the coinbase) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(txid, 0), b""))) + tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))) + tx.calc_sha256()) +) + # Check that serializing it with or without witness is the same) + # This is a sanity check of our testing framework.) + assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())) +) + self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed) + assert tx.hash in self.nodes[0].getrawmempool()) + # Save this transaction for later) + self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))) + self.generate(self.nodes[0], 1)) +) + @subtest) + def test_unnecessary_witness_before_segwit_activation(self):) + """Verify that blocks with witnesses are rejected before activation.""") +) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))) + tx.wit.vtxinwit.append(CTxInWitness())) + tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]) +) + # Verify the hash with witness differs from the txid) + # (otherwise our testing framework must be broken!)) + tx.rehash()) + assert_not_equal(tx.sha256, tx.calc_sha256(with_witness=True))) +) + # Construct a block that includes the transaction.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + # Sending witness data before activation is not allowed (anti-spam) + # rule).) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')) +) + # But it should not be permanently marked bad...) + # Resend without witness information.) + self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed) + assert_equal(self.nodes[0].getbestblockhash(), block.hash)) +) + # Update our utxo list; we spent the first entry.) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))) +) + @subtest) + def test_block_relay(self):) + """Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.) +) + This is true regardless of segwit activation.) + Also test that we don't ask for blocks from unupgraded peers.""") +) + blocktype = 2 | MSG_WITNESS_FLAG) +) + # test_node has set NODE_WITNESS, so all getdata requests should be for) + # witness blocks.) + # Test announcing a block via inv results in a getdata, and that) + # announcing a block with a header results in a getdata) + block1 = self.build_next_block()) + block1.solve()) +) + # Send an empty headers message, to clear out any prior getheaders) + # messages that our peer may be waiting for us on.) + self.test_node.send_message(msg_headers())) +) + self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)) + assert self.test_node.last_message["getdata"].inv[0].type == blocktype) + test_witness_block(self.nodes[0], self.test_node, block1, True)) +) + block2 = self.build_next_block()) + block2.solve()) +) + self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)) + assert self.test_node.last_message["getdata"].inv[0].type == blocktype) + test_witness_block(self.nodes[0], self.test_node, block2, True)) +) + # Check that we can getdata for witness blocks or regular blocks,) + # and the right thing happens.) + if not self.segwit_active:) + # Before activation, we should be able to request old blocks with) + # or without witness, and they should be the same.) + chain_height = self.nodes[0].getblockcount()) + # Pick 10 random blocks on main chain, and verify that getdata's) + # for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.) + all_heights = list(range(chain_height + 1))) + random.shuffle(all_heights)) + all_heights = all_heights[0:10]) + for height in all_heights:) + block_hash = self.nodes[0].getblockhash(height)) + rpc_block = self.nodes[0].getblock(block_hash, False)) + block_hash = int(block_hash, 16)) + block = self.test_node.request_block(block_hash, 2)) + wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)) + assert_equal(block.serialize(), wit_block.serialize())) + assert_equal(block.serialize(), bytes.fromhex(rpc_block))) + else:) + # After activation, witness blocks and non-witness blocks should) + # be different. Verify rpc getblock() returns witness blocks, while) + # getdata respects the requested type.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [])) + # This gives us a witness commitment.) + assert len(block.vtx[0].wit.vtxinwit) == 1) + assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) + # Now try to retrieve it...) + rpc_block = self.nodes[0].getblock(block.hash, False)) + non_wit_block = self.test_node.request_block(block.sha256, 2)) + wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)) + assert_equal(wit_block.serialize(), bytes.fromhex(rpc_block))) + assert_equal(wit_block.serialize(False), non_wit_block.serialize())) + assert_equal(wit_block.serialize(), block.serialize())) +) + # Test size, vsize, weight) + rpc_details = self.nodes[0].getblock(block.hash, True)) + assert_equal(rpc_details["size"], len(block.serialize()))) + assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))) + assert_equal(rpc_details["weight"], block.get_weight())) +) + # Upgraded node should not ask for blocks from unupgraded) + block4 = self.build_next_block()) + block4.solve()) + self.old_node.getdataset = set()) +) + # Blocks can be requested via direct-fetch (immediately upon processing the announcement)) + # or via parallel download (with an indeterminate delay from processing the announcement)) + # so to test that a block is NOT requested, we could guess a time period to sleep for,) + # and then check. We can avoid the sleep() by taking advantage of transaction getdata's) + # being processed after block getdata's, and announce a transaction as well,) + # and then check to see if that particular getdata has been received.) + # Since 0.14, inv's will only be responded to with a getheaders, so send a header) + # to announce this block.) + msg = msg_headers()) + msg.headers = [CBlockHeader(block4)]) + self.old_node.send_message(msg)) + self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])) + assert block4.sha256 not in self.old_node.getdataset) +) + @subtest) + def test_v0_outputs_arent_spendable(self):) + """Test that v0 outputs aren't spendable before segwit activation.) +) + ~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was) + backdated so that it applies to all blocks, going back to the genesis) + block.) +) + Consequently, version 0 witness outputs are never spendable without) + witness, and so can't be spent before segwit activation (the point at which) + blocks are permitted to contain witnesses).""") +) + # Create two outputs, a p2wsh and p2sh-p2wsh) + witness_script = CScript([OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) + p2sh_script_pubkey = script_to_p2sh_script(script_pubkey)) +) + value = self.utxo[0].nValue // 3) +) + tx = CTransaction()) + tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]) + tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]) + tx.vout.append(CTxOut(value, CScript([OP_TRUE])))) + tx.rehash()) + txid = tx.sha256) +) + # Add it to a block) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + # Verify that segwit isn't activated. A block serialized with witness) + # should be rejected prior to activation.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')) + # Now send the block without witness. It should be accepted) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)) +) + # Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.) + p2wsh_tx = CTransaction()) + p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]) + p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]) + p2wsh_tx.wit.vtxinwit.append(CTxInWitness())) + p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]) + p2wsh_tx.rehash()) +) + p2sh_p2wsh_tx = CTransaction()) + p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]) + p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]) + p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())) + p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]) + p2sh_p2wsh_tx.rehash()) +) + for tx in [p2wsh_tx, p2sh_p2wsh_tx]:) +) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) +) + # When the block is serialized with a witness, the block will be rejected because witness) + # data isn't allowed in blocks that don't commit to witness data.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')) +) + # When the block is serialized without witness, validation fails because the transaction is) + # invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction) + # without a witness is invalid).) + # Note: The reject reason for this failure could be) + # 'block-validation-failed' (if script check threads > 1) or) + # 'mandatory-script-verify-flag-failed (Witness program was passed an) + # empty witness)' (otherwise).) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False,) + reason='mandatory-script-verify-flag-failed (Witness program was passed an empty witness)')) +) + self.utxo.pop(0)) + self.utxo.append(UTXO(txid, 2, value))) +) + @subtest) + def test_witness_tx_relay_before_segwit_activation(self):) +) + # Generate a transaction that doesn't require a witness, but send it) + # with a witness. Should be rejected for premature-witness, but should) + # not be added to recently rejected list.) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))) + tx.wit.vtxinwit.append(CTxInWitness())) + tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']) + tx.rehash()) +) + tx_hash = tx.sha256) + tx_value = tx.vout[0].nValue) +) + # Verify that if a peer doesn't set nServices to include NODE_WITNESS,) + # the getdata is just for the non-witness portion.) + self.old_node.announce_tx_and_wait_for_getdata(tx)) + assert self.old_node.last_message["getdata"].inv[0].type == MSG_TX) +) + # Since we haven't delivered the tx yet, inv'ing the same tx from) + # a witness transaction ought not result in a getdata.) + self.test_node.announce_tx_and_wait_for_getdata(tx, success=False)) +) + # Delivering this transaction with witness should fail (no matter who) + # its from)) + assert_equal(len(self.nodes[0].getrawmempool()), 0)) + assert_equal(len(self.nodes[1].getrawmempool()), 0)) + test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)) +) + # But eliminating the witness should fix it) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)) +) + # Cleanup: mine the first transaction and update utxo) + self.generate(self.nodes[0], 1)) + assert_equal(len(self.nodes[0].getrawmempool()), 0)) +) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx_hash, 0, tx_value))) +) + @subtest) + def test_standardness_v0(self):) + """Test V0 txout standardness.) +) + V0 segwit outputs and inputs are always standard.) + V0 segwit inputs may only be mined after activation, but not before.""") +) + witness_script = CScript([OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) + p2sh_script_pubkey = script_to_p2sh_script(witness_script)) +) + # First prepare a p2sh output (so that spending it will pass standardness)) + p2sh_tx = CTransaction()) + p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]) + p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]) + p2sh_tx.rehash()) +) + # Mine it on test_node to create the confirmed output.) + test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)) + self.generate(self.nodes[0], 1)) +) + # Now test standardness of v0 P2WSH outputs.) + # Start by creating a transaction with two outputs.) + tx = CTransaction()) + tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_script]))]) + tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]) + tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later) + tx.vin[0].nSequence = MAX_BIP125_RBF_SEQUENCE # Just to have the option to bump this tx from the mempool) + tx.rehash()) +) + # This is always accepted, since the mempool policy is to consider segwit as always active) + # and thus allow segwit outputs) + test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)) +) + # Now create something that looks like a P2PKH output. This won't be spendable.) + witness_hash = sha256(witness_script)) + script_pubkey = CScript([OP_0, hash160(witness_hash)])) + tx2 = CTransaction()) + # tx was accepted, so we spend the second output.) + tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]) + tx2.vout = [CTxOut(7000, script_pubkey)]) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + tx2.rehash()) +) + test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)) +) + # Now update self.utxo for later tests.) + tx3 = CTransaction()) + # tx and tx2 were both accepted. Don't bother trying to reclaim the) + # P2PKH output; just send tx's first output back to an anyone-can-spend.) + self.sync_mempools([self.nodes[0], self.nodes[1]])) + tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]) + tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]) + tx3.wit.vtxinwit.append(CTxInWitness())) + tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + tx3.rehash()) + if not self.segwit_active:) + # Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed) + # in blocks and the tx is impossible to mine right now.) + testres3 = self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()])) + testres3[0]["fees"].pop("effective-feerate")) + testres3[0]["fees"].pop("effective-includes")) + assert_equal(testres3,) + [{) + 'txid': tx3.hash,) + 'wtxid': tx3.getwtxid(),) + 'allowed': True,) + 'vsize': tx3.get_vsize(),) + 'fees': {) + 'base': Decimal('0.00001000'),) + },) + }],) + )) + # Create the same output as tx3, but by replacing tx) + tx3_out = tx3.vout[0]) + tx3 = tx) + tx3.vout = [tx3_out]) + tx3.rehash()) + testres3_replaced = self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()])) + testres3_replaced[0]["fees"].pop("effective-feerate")) + testres3_replaced[0]["fees"].pop("effective-includes")) + assert_equal(testres3_replaced,) + [{) + 'txid': tx3.hash,) + 'wtxid': tx3.getwtxid(),) + 'allowed': True,) + 'vsize': tx3.get_vsize(),) + 'fees': {) + 'base': Decimal('0.00011000'),) + },) + }],) + )) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)) +) + self.generate(self.nodes[0], 1)) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))) + assert_equal(len(self.nodes[1].getrawmempool()), 0)) +) + @subtest) + def advance_to_segwit_active(self):) + """Mine enough blocks to activate segwit.""") + assert not softfork_active(self.nodes[0], 'segwit')) + height = self.nodes[0].getblockcount()) + self.generate(self.nodes[0], SEGWIT_HEIGHT - height - 2)) + assert not softfork_active(self.nodes[0], 'segwit')) + self.generate(self.nodes[0], 1)) + assert softfork_active(self.nodes[0], 'segwit')) + self.segwit_active = True) +) + @subtest) + def test_p2sh_witness(self):) + """Test P2SH wrapped witness programs.""") +) + # Prepare the p2sh-wrapped witness output) + witness_script = CScript([OP_DROP, OP_TRUE])) + p2wsh_pubkey = script_to_p2wsh_script(witness_script)) + script_pubkey = script_to_p2sh_script(p2wsh_pubkey)) + script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script) +) + # Fund the P2SH output) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))) + tx.rehash()) +) + # Verify mempool acceptance and block validity) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)) + self.sync_blocks()) +) + # Now test attempts to spend the output.) + spend_tx = CTransaction()) + spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))) + spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))) + spend_tx.rehash()) +) + # This transaction should not be accepted into the mempool pre- or) + # post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which) + # will require a witness to spend a witness program regardless of) + # segwit activation. Note that older bitcoind's that are not) + # segwit-aware would also reject this for failing CLEANSTACK.) + with self.nodes[0].assert_debug_log() + expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']):) + test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)) +) + # Try to put the witness script in the scriptSig, should also fail.) + spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])) + spend_tx.rehash()) + with self.nodes[0].assert_debug_log() + expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']):) + test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)) +) + # Now put the witness script in the witness, should succeed after) + # segwit activates.) + spend_tx.vin[0].scriptSig = script_sig) + spend_tx.rehash()) + spend_tx.wit.vtxinwit.append(CTxInWitness())) + spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_script]) +) + # Verify mempool acceptance) + test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [spend_tx])) +) + # If we're after activation, then sending this with witnesses should be valid.) + # This no longer works before activation, because SCRIPT_VERIFY_WITNESS) + # is always set.) + # TODO: rewrite this test to make clear that it only works after activation.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Update self.utxo) + self.utxo.pop(0)) + self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))) +) + @subtest) + def test_witness_commitments(self):) + """Test witness commitments.) +) + This test can only be run after segwit has activated.""") +) + # First try a correct witness commitment.) + block = self.build_next_block()) + add_witness_commitment(block)) + block.solve()) +) + # Test the test -- witness serialization should be different) + assert_not_equal(msg_block(block).serialize(), msg_no_witness_block(block).serialize())) +) + # This empty block should be valid.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Try to tweak the nonce) + block_2 = self.build_next_block()) + add_witness_commitment(block_2, nonce=28)) + block_2.solve()) +) + # The commitment should have changed!) + assert_not_equal(block_2.vtx[0].vout[-1], block.vtx[0].vout[-1])) +) + # This should also be valid.) + test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)) +) + # Now test commitments with actual transactions) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) +) + # Let's construct a witness script) + witness_script = CScript([OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))) + tx.rehash()) +) + # tx2 will spend tx1, and send back to a regular anyone-can-spend address) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script))) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + tx2.rehash()) +) + block_3 = self.build_next_block()) + self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)) + # Add an extra OP_RETURN output that matches the witness commitment template,) + # even though it has extra data after the incorrect commitment.) + # This block should fail.) + block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))) + block_3.vtx[0].rehash()) + block_3.hashMerkleRoot = block_3.calc_merkle_root()) + block_3.solve()) +) + test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False, reason='bad-witness-merkle-match')) +) + # Add a different commitment with different nonce, but in the) + # right location, and with some funds burned(!).) + # This should succeed (nValue shouldn't affect finding the) + # witness commitment).) + add_witness_commitment(block_3, nonce=0)) + block_3.vtx[0].vout[0].nValue -= 1) + block_3.vtx[0].vout[-1].nValue += 1) + block_3.vtx[0].rehash()) + block_3.hashMerkleRoot = block_3.calc_merkle_root()) + assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns) + block_3.solve()) + test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)) +) + # Finally test that a block with no witness transactions can) + # omit the commitment.) + block_4 = self.build_next_block()) + tx3 = CTransaction()) + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))) + tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script))) + tx3.rehash()) + block_4.vtx.append(tx3)) + block_4.hashMerkleRoot = block_4.calc_merkle_root()) + block_4.solve()) + test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)) +) + # Update available utxo's for use in later test.) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))) +) + @subtest) + def test_block_malleability(self):) +) + # Make sure that a block that has too big a virtual size) + # because of a too-large coinbase witness is not permanently) + # marked bad.) + block = self.build_next_block()) + add_witness_commitment(block)) + block.solve()) +) + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)) + assert block.get_weight() > MAX_BLOCK_WEIGHT) +) + # We can't send over the p2p network, because this is too big to relay) + # TODO: repeat this test with a block that can be relayed) + assert_equal('bad-witness-nonce-size', self.nodes[0].submitblock(block.serialize().hex()))) +) + assert_not_equal(self.nodes[0].getbestblockhash(), block.hash)) +) + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()) + assert block.get_weight() < MAX_BLOCK_WEIGHT) + assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))) +) + assert self.nodes[0].getbestblockhash() == block.hash) +) + # Now make sure that malleating the witness reserved value doesn't) + # result in a block permanently marked bad.) + block = self.build_next_block()) + add_witness_commitment(block)) + block.solve()) +) + # Change the nonce -- should not cause the block to be permanently) + # failed) + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='bad-witness-merkle-match')) +) + # Changing the witness reserved value doesn't change the block hash) + block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + @subtest) + def test_witness_block_size(self):) + # TODO: Test that non-witness carrying blocks can't exceed 1MB) + # Skipping this test for now; this is covered in feature_block.py) +) + # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.) + block = self.build_next_block()) +) + assert len(self.utxo) > 0) +) + # Create a P2WSH transaction.) + # The witness script will be a bunch of OP_2DROP's, followed by OP_TRUE.) + # This should give us plenty of room to tweak the spending tx's) + # virtual size.) + NUM_DROPS = 200 # 201 max ops per script!) + NUM_OUTPUTS = 50) +) + witness_script = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)) + value = self.utxo[0].nValue) +) + parent_tx = CTransaction()) + parent_tx.vin.append(CTxIn(prevout, b""))) + child_value = int(value / NUM_OUTPUTS)) + for _ in range(NUM_OUTPUTS):) + parent_tx.vout.append(CTxOut(child_value, script_pubkey))) + parent_tx.vout[0].nValue -= 50000) + assert parent_tx.vout[0].nValue > 0) + parent_tx.rehash()) +) + child_tx = CTransaction()) + for i in range(NUM_OUTPUTS):) + child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))) + child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]) + for _ in range(NUM_OUTPUTS):) + child_tx.wit.vtxinwit.append(CTxInWitness())) + child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_script]) + child_tx.rehash()) + self.update_witness_block_with_transactions(block, [parent_tx, child_tx])) +) + additional_bytes = MAX_BLOCK_WEIGHT - block.get_weight()) + i = 0) + while additional_bytes > 0:) + # Add some more bytes to each input until we hit MAX_BLOCK_WEIGHT+1) + extra_bytes = min(additional_bytes + 1, 55)) + block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)) + additional_bytes -= extra_bytes) + i += 1) +) + block.vtx[0].vout.pop() # Remove old commitment) + add_witness_commitment(block)) + block.solve()) + assert_equal(block.get_weight(), MAX_BLOCK_WEIGHT + 1)) + # Make sure that our test case would exceed the old max-network-message) + # limit) + assert len(block.serialize()) > 2 * 1024 * 1024) +) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='bad-blk-weight')) +) + # Now resize the second transaction to make the block fit.) + cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])) + block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)) + block.vtx[0].vout.pop()) + add_witness_commitment(block)) + block.solve()) + assert block.get_weight() == MAX_BLOCK_WEIGHT) +) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Update available utxo's) + self.utxo.pop(0)) + self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))) +) + @subtest) + def test_submit_block(self):) + """Test that submitblock adds the nonce automatically when possible.""") + block = self.build_next_block()) +) + # Try using a custom nonce and then don't supply it.) + # This shouldn't possibly work.) + add_witness_commitment(block, nonce=1)) + block.vtx[0].wit = CTxWitness() # drop the nonce) + block.solve()) + assert_equal('bad-witness-merkle-match', self.nodes[0].submitblock(block.serialize().hex()))) + assert_not_equal(self.nodes[0].getbestblockhash(), block.hash)) +) + # Now redo commitment with the standard nonce, but let bitcoind fill it in.) + add_witness_commitment(block, nonce=0)) + block.vtx[0].wit = CTxWitness()) + block.solve()) + assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))) + assert_equal(self.nodes[0].getbestblockhash(), block.hash)) +) + # This time, add a tx with non-empty witness, but don't supply) + # the commitment.) + block_2 = self.build_next_block()) +) + add_witness_commitment(block_2)) +) + block_2.solve()) +) + # Drop commitment and nonce -- submitblock should not fill in.) + block_2.vtx[0].vout.pop()) + block_2.vtx[0].wit = CTxWitness()) +) + assert_equal('bad-txnmrklroot', self.nodes[0].submitblock(block_2.serialize().hex()))) + # Tip should not advance!) + assert_not_equal(self.nodes[0].getbestblockhash(), block_2.hash)) +) + @subtest) + def test_extra_witness_data(self):) + """Test extra witness data in a transaction.""") +) + block = self.build_next_block()) +) + witness_script = CScript([OP_DROP, OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + # First try extra witness data on a tx that doesn't require a witness) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))) + tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output) + tx.wit.vtxinwit.append(CTxInWitness())) + tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]) + tx.rehash()) + self.update_witness_block_with_transactions(block, [tx])) +) + # Extra witness data should not be allowed.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Witness provided for non-witness script)')) +) + # Try extra signature data. Ok if we're not spending a witness output.) + block.vtx[1].wit.vtxinwit = []) + block.vtx[1].vin[0].scriptSig = CScript([OP_0])) + block.vtx[1].rehash()) + add_witness_commitment(block)) + block.solve()) +) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Now try extra witness/signature data on an input that DOES require a) + # witness) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness) + tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))) + tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])) + tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_script]) + tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]) +) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx2])) +) + # This has extra witness data, so it should fail.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Stack size must be exactly one after execution)')) +) + # Now get rid of the extra witness, but add extra scriptSig data) + tx2.vin[0].scriptSig = CScript([OP_TRUE])) + tx2.vin[1].scriptSig = CScript([OP_TRUE])) + tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)) + tx2.wit.vtxinwit[1].scriptWitness.stack = []) + tx2.rehash()) + add_witness_commitment(block)) + block.solve()) +) + # This has extra signature data for a witness input, so it should fail.) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Witness requires empty scriptSig)')) +) + # Now get rid of the extra scriptsig on the witness input, and verify) + # success (even with extra scriptsig data in the non-witness input)) + tx2.vin[0].scriptSig = b"") + tx2.rehash()) + add_witness_commitment(block)) + block.solve()) +) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Update utxo for later tests) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))) +) + @subtest) + def test_max_witness_push_length(self):) + """Test that witness stack can only allow up to MAX_SCRIPT_ELEMENT_SIZE byte pushes.""") +) + block = self.build_next_block()) +) + witness_script = CScript([OP_DROP, OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))) + tx.rehash()) +) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))) + tx2.wit.vtxinwit.append(CTxInWitness())) + # First try a 521-byte stack element) + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_script]) + tx2.rehash()) +) + self.update_witness_block_with_transactions(block, [tx, tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Push value size limit exceeded)')) +) + # Now reduce the length of the stack element) + tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)) +) + add_witness_commitment(block)) + block.solve()) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Update the utxo for later tests) + self.utxo.pop()) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))) +) + @subtest) + def test_max_witness_script_length(self):) + """Test that witness outputs greater than 10kB can't be spent.""") +) + MAX_WITNESS_SCRIPT_LENGTH = 10000) +) + # This script is 19 max pushes (9937 bytes), then 64 more opcode-bytes.) + long_witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])) + assert len(long_witness_script) == MAX_WITNESS_SCRIPT_LENGTH + 1) + long_script_pubkey = script_to_p2wsh_script(long_witness_script)) +) + block = self.build_next_block()) +) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))) + tx.rehash()) +) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_script]) + tx2.rehash()) +) + self.update_witness_block_with_transactions(block, [tx, tx2])) +) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Script is too big)')) +) + # Try again with one less byte in the witness script) + witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])) + assert len(witness_script) == MAX_WITNESS_SCRIPT_LENGTH) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)) + tx.rehash()) + tx2.vin[0].prevout.hash = tx.sha256) + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_script]) + tx2.rehash()) + block.vtx = [block.vtx[0]]) + self.update_witness_block_with_transactions(block, [tx, tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + self.utxo.pop()) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))) +) + @subtest) + def test_witness_input_length(self):) + """Test that vin length must match vtxinwit length.""") +) + witness_script = CScript([OP_DROP, OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + # Create a transaction that splits our utxo into many outputs) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + value = self.utxo[0].nValue) + for _ in range(10):) + tx.vout.append(CTxOut(int(value / 10), script_pubkey))) + tx.vout[0].nValue -= 1000) + assert tx.vout[0].nValue >= 0) +) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Try various ways to spend tx that should all break.) + # This "broken" transaction serializer will not normalize) + # the length of vtxinwit.) + class BrokenCTransaction(CTransaction):) + def serialize_with_witness(self):) + flags = 0) + if not self.wit.is_null():) + flags |= 1) + r = b"") + r += self.version.to_bytes(4, "little")) + if flags:) + dummy = []) + r += ser_vector(dummy)) + r += flags.to_bytes(1, "little")) + r += ser_vector(self.vin)) + r += ser_vector(self.vout)) + if flags & 1:) + r += self.wit.serialize()) + r += self.nLockTime.to_bytes(4, "little")) + return r) +) + tx2 = BrokenCTransaction()) + for i in range(10):) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))) + tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))) +) + # First try using a too long vtxinwit) + for i in range(11):) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_script]) +) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='bad-txnmrklroot')) +) + # Now try using a too short vtxinwit) + tx2.wit.vtxinwit.pop()) + tx2.wit.vtxinwit.pop()) +) + block.vtx = [block.vtx[0]]) + self.update_witness_block_with_transactions(block, [tx2])) + # This block doesn't result in a specific reject reason, but an iostream exception:) + # "Exception 'CDataStream::read(): end of data: unspecified iostream_category error' (...) caught") + test_witness_block(self.nodes[0], self.test_node, block, accepted=False)) +) + # Now make one of the intermediate witnesses be incorrect) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_script]) + tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_script]) +) + block.vtx = [block.vtx[0]]) + self.update_witness_block_with_transactions(block, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Operation not valid with the current stack size)')) +) + # Fix the broken witness and the block should be accepted.) + tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_script]) + block.vtx = [block.vtx[0]]) + self.update_witness_block_with_transactions(block, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + self.utxo.pop()) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))) +) + @subtest) + def test_tx_relay_after_segwit_activation(self):) + """Test transaction relay after segwit activation.) +) + After segwit activates, verify that mempool:) + - rejects transactions with unnecessary/extra witnesses) + - accepts transactions with valid witnesses) + and that witness transactions are relayed to non-upgraded peers.""") +) + # Generate a transaction that doesn't require a witness, but send it) + # with a witness. Should be rejected because we can't use a witness) + # when spending a non-witness output.) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))) + tx.wit.vtxinwit.append(CTxInWitness())) + tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']) + tx.rehash()) +) + tx_hash = tx.sha256) +) + # Verify that unnecessary witnesses are rejected.) + self.test_node.announce_tx_and_wait_for_getdata(tx)) + assert_equal(len(self.nodes[0].getrawmempool()), 0)) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)) +) + # Verify that removing the witness succeeds.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)) +) + # Now try to add extra witness data to a valid witness tx.) + witness_script = CScript([OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))) + tx2.rehash()) +) + tx3 = CTransaction()) + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))) + tx3.wit.vtxinwit.append(CTxInWitness())) +) + # Add too-large for IsStandard witness and check that it does not enter reject filter) + p2sh_script = CScript([OP_TRUE])) + witness_script2 = CScript([b'a' * 400000])) + tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_to_p2sh_script(p2sh_script)))) + tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script2]) + tx3.rehash()) +) + # Node will not be blinded to the transaction, requesting it any number of times) + # if it is being announced via txid relay.) + # Node will be blinded to the transaction via wtxid, however.) + self.std_node.announce_tx_and_wait_for_getdata(tx3)) + self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True)) + test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')) + self.std_node.announce_tx_and_wait_for_getdata(tx3)) + self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False)) +) + # Remove witness stuffing, instead add extra witness push on stack) + tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) + tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_script]) + tx3.rehash()) +) + test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)) +) + # Get rid of the extra witness, and verify acceptance.) + tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + # Also check that old_node gets a tx announcement, even though this is) + # a witness transaction.) + self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)) + self.old_node.wait_for_inv([CInv(MSG_TX, tx3.sha256)])) +) + # Test that getrawtransaction returns correct witness information) + # hash, size, vsize) + raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)) + assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))) + assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))) + vsize = tx3.get_vsize()) + assert_equal(raw_tx["vsize"], vsize)) + assert_equal(raw_tx["weight"], tx3.get_weight())) + assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)) + assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_script.hex())) + assert_not_equal(vsize, raw_tx["size"])) +) + # Cleanup: mine the transactions and update utxo for next test) + self.generate(self.nodes[0], 1)) + assert_equal(len(self.nodes[0].getrawmempool()), 0)) +) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))) +) + @subtest) + def test_segwit_versions(self):) + """Test validity of future segwit version transactions.) +) + Future segwit versions are non-standard to spend, but valid in blocks.) + Sending to future segwit versions is always allowed.) + Can run this before and after segwit activation.""") +) + NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16) + if len(self.utxo) < NUM_SEGWIT_VERSIONS:) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS) + for _ in range(NUM_SEGWIT_VERSIONS):) + tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))) + tx.rehash()) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) + self.utxo.pop(0)) + for i in range(NUM_SEGWIT_VERSIONS):) + self.utxo.append(UTXO(tx.sha256, i, split_value))) +) + self.sync_blocks()) + temp_utxo = []) + tx = CTransaction()) + witness_script = CScript([OP_TRUE])) + witness_hash = sha256(witness_script)) + assert_equal(len(self.nodes[1].getrawmempool()), 0)) + for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:) + # First try to spend to a future version segwit script_pubkey.) + if version == OP_1:) + # Don't use 32-byte v1 witness (used by Taproot; see BIP 341)) + script_pubkey = CScript([CScriptOp(version), witness_hash + b'\x00'])) + else:) + script_pubkey = CScript([CScriptOp(version), witness_hash])) + tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]) + tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]) + tx.rehash()) + test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)) + self.utxo.pop(0)) + temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))) +) + self.generate(self.nodes[0], 1) # Mine all the transactions) + assert len(self.nodes[0].getrawmempool()) == 0) +) + # Finally, verify that version 0 -> version 2 transactions) + # are standard) + script_pubkey = CScript([CScriptOp(OP_2), witness_hash])) + tx2 = CTransaction()) + tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]) + tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + tx2.rehash()) + # Gets accepted to both policy-enforcing nodes and others.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)) + test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)) + temp_utxo.pop() # last entry in temp_utxo was the output we just spent) + temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))) +) + # Spend everything in temp_utxo into an segwit v1 output.) + tx3 = CTransaction()) + total_value = 0) + for i in temp_utxo:) + tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))) + tx3.wit.vtxinwit.append(CTxInWitness())) + total_value += i.nValue) + tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_script]) + tx3.vout.append(CTxOut(total_value - 1000, script_pubkey))) + tx3.rehash()) +) + # First we test this transaction against std_node) + # making sure the txid is added to the reject filter) + self.std_node.announce_tx_and_wait_for_getdata(tx3)) + test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")) + # Now the node will no longer ask for getdata of this transaction when advertised by same txid) + self.std_node.announce_tx_and_wait_for_getdata(tx3, success=False)) +) + # Spending a higher version witness output is not allowed by policy,) + # even with the node that accepts non-standard txs.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")) +) + # Building a block with the transaction must be valid, however.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx2, tx3])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) + self.sync_blocks()) +) + # Add utxo to our list) + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))) +) + @subtest) + def test_premature_coinbase_witness_spend(self):) +) + block = self.build_next_block()) + # Change the output of the block to be a witness output.) + witness_script = CScript([OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) + block.vtx[0].vout[0].scriptPubKey = script_pubkey) + # This next line will rehash the coinbase and update the merkle) + # root, and solve.) + self.update_witness_block_with_transactions(block, [])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + spend_tx = CTransaction()) + spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]) + spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_script)]) + spend_tx.wit.vtxinwit.append(CTxInWitness())) + spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + spend_tx.rehash()) +) + # Now test a premature spend.) + self.generate(self.nodes[0], 98)) + block2 = self.build_next_block()) + self.update_witness_block_with_transactions(block2, [spend_tx])) + test_witness_block(self.nodes[0], self.test_node, block2, accepted=False, reason='bad-txns-premature-spend-of-coinbase')) +) + # Advancing one more block should allow the spend.) + self.generate(self.nodes[0], 1)) + block2 = self.build_next_block()) + self.update_witness_block_with_transactions(block2, [spend_tx])) + test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)) + self.sync_blocks()) +) + @subtest) + def test_uncompressed_pubkey(self):) + """Test uncompressed pubkey validity in segwit transactions.) +) + Uncompressed pubkeys are no longer supported in default relay policy,) + but (for now) are still valid in blocks.""") +) + # Segwit transactions using uncompressed pubkeys are not accepted) + # under default policy, but should still pass consensus.) + key, pubkey = generate_keypair(compressed=False)) + assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey) +) + utxo = self.utxo.pop(0)) +) + # Test 1: P2WPKH) + # First create a P2WPKH output that uses an uncompressed pubkey) + pubkeyhash = hash160(pubkey)) + script_pkh = key_to_p2wpkh_script(pubkey)) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))) + tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))) + tx.rehash()) +) + # Confirm it in a block.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Now try to spend it. Send it to a P2WSH output, which we'll) + # use in the next test.) + witness_script = key_to_p2pk_script(pubkey)) + script_wsh = script_to_p2wsh_script(witness_script)) +) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))) + script = keyhash_to_p2pkh_script(pubkeyhash)) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [pubkey]) + sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key)) +) + # Should fail policy test.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')) + # But passes consensus.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Test 2: P2WSH) + # Try to spend the P2WSH output created in last test.) + # Send it to a P2SH(P2WSH) output, which we'll use in the next test.) + script_p2sh = script_to_p2sh_script(script_wsh)) + script_sig = CScript([script_wsh])) +) + tx3 = CTransaction()) + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))) + tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))) + tx3.wit.vtxinwit.append(CTxInWitness())) + sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)) +) + # Should fail policy test.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')) + # But passes consensus.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx3])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Test 3: P2SH(P2WSH)) + # Try to spend the P2SH output created in the last test.) + # Send it to a P2PKH output, which we'll use in the next test.) + script_pubkey = keyhash_to_p2pkh_script(pubkeyhash)) + tx4 = CTransaction()) + tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))) + tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))) + tx4.wit.vtxinwit.append(CTxInWitness())) + sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)) +) + # Should fail policy test.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx4])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Test 4: Uncompressed pubkeys should still be valid in non-segwit) + # transactions.) + tx5 = CTransaction()) + tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))) + tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))) + tx5.vin[0].scriptSig = CScript([pubkey])) + sign_input_legacy(tx5, 0, script_pubkey, key)) + # Should pass policy and consensus.) + test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx5])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) + self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))) +) + @subtest) + def test_signature_version_1(self):) + key, pubkey = generate_keypair()) + witness_script = key_to_p2pk_script(pubkey)) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + # First create a witness output for use in the tests.) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))) + tx.rehash()) +) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)) + # Mine this transaction in preparation for following tests.) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) + self.sync_blocks()) + self.utxo.pop(0)) +) + # Test each hashtype) + prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)) + for sigflag in [0, SIGHASH_ANYONECANPAY]:) + for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:) + hashtype |= sigflag) + block = self.build_next_block()) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))) + tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))) + tx.wit.vtxinwit.append(CTxInWitness())) + # Too-large input value) + sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue + 1, key)) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Script evaluated without error ') + 'but finished with a false/empty top stack element')) +) + # Too-small input value) + sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue - 1, key)) + block.vtx.pop() # remove last tx) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Script evaluated without error ') + 'but finished with a false/empty top stack element')) +) + # Now try correct value) + sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue, key)) + block.vtx.pop()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)) +) + # Test combinations of signature hashes.) + # Split the utxo into a lot of outputs.) + # Randomly choose up to 10 to spend, sign with different hashtypes, and) + # output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.) + # Ensure that we've tested a situation where we use SIGHASH_SINGLE with) + # an input index > number of outputs.) + NUM_SIGHASH_TESTS = 500) + temp_utxos = []) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))) + split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS) + for _ in range(NUM_SIGHASH_TESTS):) + tx.vout.append(CTxOut(split_value, script_pubkey))) + tx.wit.vtxinwit.append(CTxInWitness())) + sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)) + for i in range(NUM_SIGHASH_TESTS):) + temp_utxos.append(UTXO(tx.sha256, i, split_value))) +) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + block = self.build_next_block()) + used_sighash_single_out_of_bounds = False) + for i in range(NUM_SIGHASH_TESTS):) + # Ping regularly to keep the connection alive) + if (not i % 100):) + self.test_node.sync_with_ping()) + # Choose random number of inputs to use.) + num_inputs = random.randint(1, 10)) + # Create a slight bias for producing more utxos) + num_outputs = random.randint(1, 11)) + random.shuffle(temp_utxos)) + assert len(temp_utxos) > num_inputs) + tx = CTransaction()) + total_value = 0) + for i in range(num_inputs):) + tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))) + tx.wit.vtxinwit.append(CTxInWitness())) + total_value += temp_utxos[i].nValue) + split_value = total_value // num_outputs) + for _ in range(num_outputs):) + tx.vout.append(CTxOut(split_value, script_pubkey))) + for i in range(num_inputs):) + # Now try to sign each input, using a random hashtype.) + anyonecanpay = 0) + if random.randint(0, 1):) + anyonecanpay = SIGHASH_ANYONECANPAY) + hashtype = random.randint(1, 3) | anyonecanpay) + sign_p2pk_witness_input(witness_script, tx, i, hashtype, temp_utxos[i].nValue, key)) + if (hashtype == SIGHASH_SINGLE and i >= num_outputs):) + used_sighash_single_out_of_bounds = True) + tx.rehash()) + for i in range(num_outputs):) + temp_utxos.append(UTXO(tx.sha256, i, split_value))) + temp_utxos = temp_utxos[num_inputs:]) +) + block.vtx.append(tx)) +) + # Test the block periodically, if we're close to maxblocksize) + if block.get_weight() > MAX_BLOCK_WEIGHT - 4000:) + self.update_witness_block_with_transactions(block, [])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) + block = self.build_next_block()) +) + if (not used_sighash_single_out_of_bounds):) + self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")) + # Test the transactions we've added to the block) + if (len(block.vtx) > 1):) + self.update_witness_block_with_transactions(block, [])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + # Now test witness version 0 P2PKH transactions) + pubkeyhash = hash160(pubkey)) + script_pkh = key_to_p2wpkh_script(pubkey)) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))) + tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))) + tx.wit.vtxinwit.append(CTxInWitness())) + sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))) +) + script = keyhash_to_p2pkh_script(pubkeyhash)) + tx2.wit.vtxinwit.append(CTxInWitness())) + sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key)) + signature = tx2.wit.vtxinwit[0].scriptWitness.stack.pop()) +) + # Check that we can't have a scriptSig) + tx2.vin[0].scriptSig = CScript([signature, pubkey])) + tx2.rehash()) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx, tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=False,) + reason='mandatory-script-verify-flag-failed (Witness requires empty scriptSig)')) +) + # Move the signature to the witness.) + block.vtx.pop()) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]) + tx2.vin[0].scriptSig = b"") + tx2.rehash()) +) + self.update_witness_block_with_transactions(block, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + temp_utxos.pop(0)) +) + # Update self.utxos for later tests by creating two outputs) + # that consolidate all the coins in temp_utxos.) + output_value = sum(i.nValue for i in temp_utxos) // 2) +) + tx = CTransaction()) + index = 0) + # Just spend to our usual anyone-can-spend output) + tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2) + for i in temp_utxos:) + # Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up) + # the signatures as we go.) + tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))) + tx.wit.vtxinwit.append(CTxInWitness())) + sign_p2pk_witness_input(witness_script, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)) + index += 1) + block = self.build_next_block()) + self.update_witness_block_with_transactions(block, [tx])) + test_witness_block(self.nodes[0], self.test_node, block, accepted=True)) +) + for i in range(len(tx.vout)):) + self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))) +) + @subtest) + def test_non_standard_witness_blinding(self):) + """Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction""") +) + # Create a p2sh output -- this is so we can pass the standardness) + # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped) + # in P2SH).) + p2sh_program = CScript([OP_TRUE])) + script_pubkey = script_to_p2sh_script(p2sh_program)) +) + # Now check that unnecessary witnesses can't be used to blind a node) + # to a transaction, eg by violating standardness checks.) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))) + tx.rehash()) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)) + self.generate(self.nodes[0], 1)) +) + # We'll add an unnecessary witness to this transaction that would cause) + # it to be non-standard, to test that violating policy with a witness) + # doesn't blind a node to a transaction. Transactions) + # rejected for having a witness shouldn't be added) + # to the rejection cache.) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]) + tx2.rehash()) + # This will be rejected due to a policy check:) + # No witness is allowed, since it is not a witness program but a p2sh program) + test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')) +) + # If we send without witness, it should be accepted.) + test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)) +) + # Now create a new anyone-can-spend utxo for the next test.) + tx3 = CTransaction()) + tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))) + tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))) + tx3.rehash()) + test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)) +) + self.generate(self.nodes[0], 1)) +) + # Update our utxo list; we spent the first entry.) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))) +) + @subtest) + def test_non_standard_witness(self):) + """Test detection of non-standard P2WSH witness""") + pad = chr(1).encode('latin-1')) +) + # Create scripts for tests) + scripts = []) + scripts.append(CScript([OP_DROP] * 100))) + scripts.append(CScript([OP_DROP] * 99))) + scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))) + scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))) +) + p2wsh_scripts = []) +) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) +) + # For each script, generate a pair of P2WSH and P2SH-P2WSH output.) + outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)) + for i in scripts:) + p2wsh = script_to_p2wsh_script(i)) + p2wsh_scripts.append(p2wsh)) + tx.vout.append(CTxOut(outputvalue, p2wsh))) + tx.vout.append(CTxOut(outputvalue, script_to_p2sh_script(p2wsh)))) + tx.rehash()) + txid = tx.sha256) + test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)) +) + self.generate(self.nodes[0], 1)) +) + # Creating transactions for tests) + p2wsh_txs = []) + p2sh_txs = []) + for i in range(len(scripts)):) + p2wsh_tx = CTransaction()) + p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))) + p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")])))) + p2wsh_tx.wit.vtxinwit.append(CTxInWitness())) + p2wsh_tx.rehash()) + p2wsh_txs.append(p2wsh_tx)) + p2sh_tx = CTransaction()) + p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))) + p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")])))) + p2sh_tx.wit.vtxinwit.append(CTxInWitness())) + p2sh_tx.rehash()) + p2sh_txs.append(p2sh_tx)) +) + # Testing native P2WSH) + # Witness stack size, excluding witnessScript, over 100 is non-standard) + p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')) + # Non-standard nodes should accept) + test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)) +) + # Stack element size over 80 bytes is non-standard) + p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')) + # Non-standard nodes should accept) + test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)) + # Standard nodes should accept if element size is not over 80 bytes) + p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)) +) + # witnessScript size at 3600 bytes is standard) + p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]) + test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)) + test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)) +) + # witnessScript size at 3601 bytes is non-standard) + p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')) + # Non-standard nodes should accept) + test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)) +) + # Repeating the same tests with P2SH-P2WSH) + p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')) + test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)) + p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')) + test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)) + p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)) + p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]) + test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)) + test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)) + p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]) + test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')) + test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)) +) + self.generate(self.nodes[0], 1) # Mine and clean up the mempool of non-standard node) + # Valid but non-standard transactions in a block should be accepted by standard node) + self.sync_blocks()) + assert_equal(len(self.nodes[0].getrawmempool()), 0)) + assert_equal(len(self.nodes[1].getrawmempool()), 0)) +) + self.utxo.pop(0)) +) + @subtest) + def test_witness_sigops(self):) + """Test sigop counting is correct inside witnesses.""") +) + # Keep this under MAX_OPS_PER_SCRIPT (201)) + witness_script = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + sigops_per_script = 20 * 5 + 193 * 1) + # We'll produce 2 extra outputs, one with a program that would take us) + # over max sig ops, and one with a program that would exactly reach max) + # sig ops) + outputs = (MAX_SIGOP_COST // sigops_per_script) + 2) + extra_sigops_available = MAX_SIGOP_COST % sigops_per_script) +) + # We chose the number of checkmultisigs/checksigs to make this work:) + assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT) +) + # This script, when spent with the first) + # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,) + # would push us just over the block sigop limit.) + witness_script_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])) + script_pubkey_toomany = script_to_p2wsh_script(witness_script_toomany)) +) + # If we spend this script instead, we would exactly reach our sigop) + # limit (for witness sigops).) + witness_script_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])) + script_pubkey_justright = script_to_p2wsh_script(witness_script_justright)) +) + # First split our available utxo into a bunch of outputs) + split_value = self.utxo[0].nValue // outputs) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + for _ in range(outputs):) + tx.vout.append(CTxOut(split_value, script_pubkey))) + tx.vout[-2].scriptPubKey = script_pubkey_toomany) + tx.vout[-1].scriptPubKey = script_pubkey_justright) + tx.rehash()) +) + block_1 = self.build_next_block()) + self.update_witness_block_with_transactions(block_1, [tx])) + test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)) +) + tx2 = CTransaction()) + # If we try to spend the first n-1 outputs from tx, that should be) + # too many sigops.) + total_value = 0) + for i in range(outputs - 1):) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script]) + total_value += tx.vout[i].nValue) + tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_toomany]) + tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))) + tx2.rehash()) +) + block_2 = self.build_next_block()) + self.update_witness_block_with_transactions(block_2, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False, reason='bad-blk-sigops')) +) + # Try dropping the last input in tx2, and add an output that has) + # too many sigops (contributing to legacy sigop count).) + checksig_count = (extra_sigops_available // 4) + 1) + script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)) + tx2.vout.append(CTxOut(0, script_pubkey_checksigs))) + tx2.vin.pop()) + tx2.wit.vtxinwit.pop()) + tx2.vout[0].nValue -= tx.vout[-2].nValue) + tx2.rehash()) + block_3 = self.build_next_block()) + self.update_witness_block_with_transactions(block_3, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False, reason='bad-blk-sigops')) +) + # If we drop the last checksig in this output, the tx should succeed.) + block_4 = self.build_next_block()) + tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))) + tx2.rehash()) + self.update_witness_block_with_transactions(block_4, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)) +) + # Reset the tip back down for the next test) + self.sync_blocks()) + for x in self.nodes:) + x.invalidateblock(block_4.hash)) +) + # Try replacing the last input of tx2 to be spending the last) + # output of tx) + block_5 = self.build_next_block()) + tx2.vout.pop()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_justright]) + tx2.rehash()) + self.update_witness_block_with_transactions(block_5, [tx2])) + test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)) +) + # TODO: test p2sh sigop counting) +) + # Cleanup and prep for next test) + self.utxo.pop(0)) + self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))) +) + @subtest) + def test_superfluous_witness(self):) + # Serialization of tx that puts witness flag to 3 always) + def serialize_with_bogus_witness(tx):) + flags = 3) + r = b"") + r += tx.version.to_bytes(4, "little")) + if flags:) + dummy = []) + r += ser_vector(dummy)) + r += flags.to_bytes(1, "little")) + r += ser_vector(tx.vin)) + r += ser_vector(tx.vout)) + if flags & 1:) + if (len(tx.wit.vtxinwit),len(tx.vin)):) + # vtxinwit must have the same length as vin) + tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]) + for _ in range(len(tx.wit.vtxinwit), len(tx.vin)):) + tx.wit.vtxinwit.append(CTxInWitness())) + r += tx.wit.serialize()) + r += tx.nLockTime.to_bytes(4, "little")) + return r) +) + class msg_bogus_tx(msg_tx):) + def serialize(self):) + return serialize_with_bogus_witness(self.tx)) +) + tx = self.wallet.create_self_transfer()['tx']) + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True)) + with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):) + self.test_node.send_and_ping(msg_bogus_tx(tx))) + tx.wit.vtxinwit = [] # drop witness) + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True)) + with self.nodes[0].assert_debug_log(['Superfluous witness record']):) + self.test_node.send_and_ping(msg_bogus_tx(tx))) +) + @subtest) + def test_wtxid_relay(self):) + # Use brand new nodes to avoid contamination from earlier tests) + self.wtx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=P2P_SERVICES)) + self.tx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=False), services=P2P_SERVICES)) +) + # Check wtxidrelay feature negotiation message through connecting a new peer) + def received_wtxidrelay():) + return (len(self.wtx_node.last_wtxidrelay) > 0)) + self.wtx_node.wait_until(received_wtxidrelay)) +) + # Create a Segwit output from the latest UTXO) + # and announce it to the network) + witness_script = CScript([OP_TRUE])) + script_pubkey = script_to_p2wsh_script(witness_script)) +) + tx = CTransaction()) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))) + tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))) + tx.rehash()) +) + # Create a Segwit transaction) + tx2 = CTransaction()) + tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))) + tx2.wit.vtxinwit.append(CTxInWitness())) + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script]) + tx2.rehash()) +) + # Announce Segwit transaction with wtxid) + # and wait for getdata) + self.wtx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=True)) + with p2p_lock:) + lgd = self.wtx_node.lastgetdata[:]) + assert_equal(lgd, [CInv(MSG_WTX, tx2.calc_sha256(True))])) +) + # Announce Segwit transaction from non wtxidrelay peer) + # and wait for getdata) + self.tx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=False)) + with p2p_lock:) + lgd = self.tx_node.lastgetdata[:]) + assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx2.sha256)])) +) + # Send tx2 through; it's an orphan so won't be accepted) + with p2p_lock:) + self.wtx_node.last_message.pop("getdata", None)) + test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False)) +) + # Expect a request for parent (tx) by txid despite use of WTX peer) + self.wtx_node.wait_for_getdata([tx.sha256], timeout=60)) + with p2p_lock:) + lgd = self.wtx_node.lastgetdata[:]) + assert_equal(lgd, [CInv(MSG_WITNESS_TX, tx.sha256)])) +) + # Send tx through) + test_transaction_acceptance(self.nodes[0], self.wtx_node, tx, with_witness=False, accepted=True)) +) + # Check tx2 is there now) + assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True)) +) +) +if __name__ == '__main__':) + SegWitTest(__file__).main()) diff --git a/test/functional/p2p_sendtxrcncl.py b/test/functional/p2p_sendtxrcncl.py index c3ad407189e892..f98988e367f126 100755 --- a/test/functional/p2p_sendtxrcncl.py +++ b/test/functional/p2p_sendtxrcncl.py @@ -1,238 +1,238 @@ -#!/usr/bin/env python3 -# Copyright (c) 2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test SENDTXRCNCL message -""" - -from test_framework.messages import ( - msg_sendtxrcncl, - msg_verack, - msg_version, - msg_wtxidrelay, - NODE_BLOOM, -) -from test_framework.p2p import ( - P2PInterface, - P2P_SERVICES, - P2P_SUBVERSION, - P2P_VERSION, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_not_equal, -) - -class PeerNoVerack(P2PInterface): - def __init__(self, wtxidrelay=True): - super().__init__(wtxidrelay=wtxidrelay) - - def on_version(self, message): - # Avoid sending verack in response to version. - # When calling add_p2p_connection, wait_for_verack=False must be set (see - # comment in add_p2p_connection). - self.send_version() - if message.nVersion >= 70016 and self.wtxidrelay: - self.send_message(msg_wtxidrelay()) - -class SendTxrcnclReceiver(P2PInterface): - def __init__(self): - super().__init__() - self.sendtxrcncl_msg_received = None - - def on_sendtxrcncl(self, message): - self.sendtxrcncl_msg_received = message - - -class P2PFeelerReceiver(SendTxrcnclReceiver): - def on_version(self, message): - # feeler connections can not send any message other than their own version - self.send_version() - - -class PeerTrackMsgOrder(P2PInterface): - def __init__(self): - super().__init__() - self.messages = [] - - def on_message(self, message): - super().on_message(message) - self.messages.append(message) - -def create_sendtxrcncl_msg(): - sendtxrcncl_msg = msg_sendtxrcncl() - sendtxrcncl_msg.version = 1 - sendtxrcncl_msg.salt = 2 - return sendtxrcncl_msg - -class SendTxRcnclTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [['-txreconciliation']] - - def run_test(self): - # Check everything concerning *sending* SENDTXRCNCL - # First, *sending* to *inbound*. - self.log.info('SENDTXRCNCL sent to an inbound') - peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=True, wait_for_verack=True) - assert peer.sendtxrcncl_msg_received - assert_equal(peer.sendtxrcncl_msg_received.version, 1) - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL should be sent before VERACK') - peer = self.nodes[0].add_p2p_connection(PeerTrackMsgOrder(), send_version=True, wait_for_verack=True) - peer.wait_for_verack() - verack_index = [i for i, msg in enumerate(peer.messages) if msg.msgtype == b'verack'][0] - sendtxrcncl_index = [i for i, msg in enumerate(peer.messages) if msg.msgtype == b'sendtxrcncl'][0] - assert sendtxrcncl_index < verack_index - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL on pre-WTXID version should not be sent') - peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=False, wait_for_verack=False) - pre_wtxid_version_msg = msg_version() - pre_wtxid_version_msg.nVersion = 70015 - pre_wtxid_version_msg.strSubVer = P2P_SUBVERSION - pre_wtxid_version_msg.nServices = P2P_SERVICES - pre_wtxid_version_msg.relay = 1 - peer.send_message(pre_wtxid_version_msg) - peer.wait_for_verack() - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL for fRelay=false should not be sent') - peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=False, wait_for_verack=False) - no_txrelay_version_msg = msg_version() - no_txrelay_version_msg.nVersion = P2P_VERSION - no_txrelay_version_msg.strSubVer = P2P_SUBVERSION - no_txrelay_version_msg.nServices = P2P_SERVICES - no_txrelay_version_msg.relay = 0 - peer.send_message(no_txrelay_version_msg) - peer.wait_for_verack() - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL for fRelay=false should not be sent (with NODE_BLOOM offered)') - self.restart_node(0, ["-peerbloomfilters", "-txreconciliation"]) - peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=False, wait_for_verack=False) - no_txrelay_version_msg = msg_version() - no_txrelay_version_msg.nVersion = P2P_VERSION - no_txrelay_version_msg.strSubVer = P2P_SUBVERSION - no_txrelay_version_msg.nServices = P2P_SERVICES - no_txrelay_version_msg.relay = 0 - peer.send_message(no_txrelay_version_msg) - peer.wait_for_verack() - assert peer.nServices & NODE_BLOOM != 0 - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - # Now, *sending* to *outbound*. - self.log.info('SENDTXRCNCL sent to an outbound') - peer = self.nodes[0].add_outbound_p2p_connection( - SendTxrcnclReceiver(), wait_for_verack=True, p2p_idx=0, connection_type="outbound-full-relay") - assert peer.sendtxrcncl_msg_received - assert_equal(peer.sendtxrcncl_msg_received.version, 1) - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL should not be sent if block-relay-only') - peer = self.nodes[0].add_outbound_p2p_connection( - SendTxrcnclReceiver(), wait_for_verack=True, p2p_idx=0, connection_type="block-relay-only") - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - self.log.info("SENDTXRCNCL should not be sent if feeler") - peer = self.nodes[0].add_outbound_p2p_connection(P2PFeelerReceiver(), p2p_idx=0, connection_type="feeler") - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - self.log.info("SENDTXRCNCL should not be sent if addrfetch") - peer = self.nodes[0].add_outbound_p2p_connection( - SendTxrcnclReceiver(), wait_for_verack=True, p2p_idx=0, connection_type="addr-fetch") - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL not sent if -txreconciliation flag is not set') - self.restart_node(0, []) - peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=True, wait_for_verack=True) - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - self.log.info('SENDTXRCNCL not sent if blocksonly is set') - self.restart_node(0, ["-txreconciliation", "-blocksonly"]) - peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=True, wait_for_verack=True) - assert not peer.sendtxrcncl_msg_received - self.nodes[0].disconnect_p2ps() - - # Check everything concerning *receiving* SENDTXRCNCL - # First, receiving from *inbound*. - self.restart_node(0, ["-txreconciliation"]) - self.log.info('valid SENDTXRCNCL received') - peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False) - with self.nodes[0].assert_debug_log(["received: sendtxrcncl"]): - peer.send_message(create_sendtxrcncl_msg()) - self.log.info('second SENDTXRCNCL triggers a disconnect') - with self.nodes[0].assert_debug_log(["(sendtxrcncl received from already registered peer); disconnecting"]): - peer.send_message(create_sendtxrcncl_msg()) - peer.wait_for_disconnect() - - self.restart_node(0, []) - self.log.info('SENDTXRCNCL if no txreconciliation supported is ignored') - peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False) - with self.nodes[0].assert_debug_log(['ignored, as our node does not have txreconciliation enabled']): - peer.send_message(create_sendtxrcncl_msg()) - self.nodes[0].disconnect_p2ps() - - self.restart_node(0, ["-txreconciliation"]) - - self.log.info('SENDTXRCNCL with version=0 triggers a disconnect') - sendtxrcncl_low_version = create_sendtxrcncl_msg() - sendtxrcncl_low_version.version = 0 - peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False) - with self.nodes[0].assert_debug_log(["txreconciliation protocol violation"]): - peer.send_message(sendtxrcncl_low_version) - peer.wait_for_disconnect() - - self.log.info('SENDTXRCNCL with version=2 is valid') - sendtxrcncl_higher_version = create_sendtxrcncl_msg() - sendtxrcncl_higher_version.version = 2 - peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False) - with self.nodes[0].assert_debug_log(['Register peer=1']): - peer.send_message(sendtxrcncl_higher_version) - self.nodes[0].disconnect_p2ps() - - self.log.info('unexpected SENDTXRCNCL is ignored') - peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=False, wait_for_verack=False) - old_version_msg = msg_version() - old_version_msg.nVersion = 70015 - old_version_msg.strSubVer = P2P_SUBVERSION - old_version_msg.nServices = P2P_SERVICES - old_version_msg.relay = 1 - peer.send_message(old_version_msg) - with self.nodes[0].assert_debug_log(['Ignore unexpected txreconciliation signal']): - peer.send_message(create_sendtxrcncl_msg()) - self.nodes[0].disconnect_p2ps() - - self.log.info('sending SENDTXRCNCL after sending VERACK triggers a disconnect') - peer = self.nodes[0].add_p2p_connection(P2PInterface()) - with self.nodes[0].assert_debug_log(["sendtxrcncl received after verack"]): - peer.send_message(create_sendtxrcncl_msg()) - peer.wait_for_disconnect() - - self.log.info('SENDTXRCNCL without WTXIDRELAY is ignored (recon state is erased after VERACK)') - peer = self.nodes[0].add_p2p_connection(PeerNoVerack(wtxidrelay=False), send_version=True, wait_for_verack=False) - with self.nodes[0].assert_debug_log(['Forget txreconciliation state of peer']): - peer.send_message(create_sendtxrcncl_msg()) - peer.send_message(msg_verack()) - self.nodes[0].disconnect_p2ps() - - # Now, *receiving* from *outbound*. - self.log.info('SENDTXRCNCL if block-relay-only triggers a disconnect') - peer = self.nodes[0].add_outbound_p2p_connection( - PeerNoVerack(), wait_for_verack=False, p2p_idx=0, connection_type="block-relay-only") - with self.nodes[0].assert_debug_log(["we indicated no tx relay; disconnecting"]): - peer.send_message(create_sendtxrcncl_msg()) - peer.wait_for_disconnect() - - -if __name__ == '__main__': - SendTxRcnclTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test SENDTXRCNCL message) +""") +) +from test_framework.messages import () + msg_sendtxrcncl,) + msg_verack,) + msg_version,) + msg_wtxidrelay,) + NODE_BLOOM,) +)) +from test_framework.p2p import () + P2PInterface,) + P2P_SERVICES,) + P2P_SUBVERSION,) + P2P_VERSION,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_equal,) + assert_not_equal,) +)) +) +class PeerNoVerack(P2PInterface):) + def __init__(self, wtxidrelay=True):) + super().__init__(wtxidrelay=wtxidrelay)) +) + def on_version(self, message):) + # Avoid sending verack in response to version.) + # When calling add_p2p_connection, wait_for_verack=False must be set (see) + # comment in add_p2p_connection).) + self.send_version()) + if message.nVersion >= 70016 and self.wtxidrelay:) + self.send_message(msg_wtxidrelay())) +) +class SendTxrcnclReceiver(P2PInterface):) + def __init__(self):) + super().__init__()) + self.sendtxrcncl_msg_received = None) +) + def on_sendtxrcncl(self, message):) + self.sendtxrcncl_msg_received = message) +) +) +class P2PFeelerReceiver(SendTxrcnclReceiver):) + def on_version(self, message):) + # feeler connections can not send any message other than their own version) + self.send_version()) +) +) +class PeerTrackMsgOrder(P2PInterface):) + def __init__(self):) + super().__init__()) + self.messages = []) +) + def on_message(self, message):) + super().on_message(message)) + self.messages.append(message)) +) +def create_sendtxrcncl_msg():) + sendtxrcncl_msg = msg_sendtxrcncl()) + sendtxrcncl_msg.version = 1) + sendtxrcncl_msg.salt = 2) + return sendtxrcncl_msg) +) +class SendTxRcnclTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) + self.extra_args = [['-txreconciliation']]) +) + def run_test(self):) + # Check everything concerning *sending* SENDTXRCNCL) + # First, *sending* to *inbound*.) + self.log.info('SENDTXRCNCL sent to an inbound')) + peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=True, wait_for_verack=True)) + assert peer.sendtxrcncl_msg_received) + assert_equal(peer.sendtxrcncl_msg_received.version, 1)) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL should be sent before VERACK')) + peer = self.nodes[0].add_p2p_connection(PeerTrackMsgOrder(), send_version=True, wait_for_verack=True)) + peer.wait_for_verack()) + verack_index = [i for i, msg in enumerate(peer.messages) if msg.msgtype == b'verack'][0]) + sendtxrcncl_index = [i for i, msg in enumerate(peer.messages) if msg.msgtype == b'sendtxrcncl'][0]) + assert sendtxrcncl_index < verack_index) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL on pre-WTXID version should not be sent')) + peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=False, wait_for_verack=False)) + pre_wtxid_version_msg = msg_version()) + pre_wtxid_version_msg.nVersion = 70015) + pre_wtxid_version_msg.strSubVer = P2P_SUBVERSION) + pre_wtxid_version_msg.nServices = P2P_SERVICES) + pre_wtxid_version_msg.relay = 1) + peer.send_message(pre_wtxid_version_msg)) + peer.wait_for_verack()) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL for fRelay=false should not be sent')) + peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=False, wait_for_verack=False)) + no_txrelay_version_msg = msg_version()) + no_txrelay_version_msg.nVersion = P2P_VERSION) + no_txrelay_version_msg.strSubVer = P2P_SUBVERSION) + no_txrelay_version_msg.nServices = P2P_SERVICES) + no_txrelay_version_msg.relay = 0) + peer.send_message(no_txrelay_version_msg)) + peer.wait_for_verack()) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL for fRelay=false should not be sent (with NODE_BLOOM offered)')) + self.restart_node(0, ["-peerbloomfilters", "-txreconciliation"])) + peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=False, wait_for_verack=False)) + no_txrelay_version_msg = msg_version()) + no_txrelay_version_msg.nVersion = P2P_VERSION) + no_txrelay_version_msg.strSubVer = P2P_SUBVERSION) + no_txrelay_version_msg.nServices = P2P_SERVICES) + no_txrelay_version_msg.relay = 0) + peer.send_message(no_txrelay_version_msg)) + peer.wait_for_verack()) + assert_not_equal(peer.nServices & NODE_BLOOM, 0)) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + # Now, *sending* to *outbound*.) + self.log.info('SENDTXRCNCL sent to an outbound')) + peer = self.nodes[0].add_outbound_p2p_connection() + SendTxrcnclReceiver(), wait_for_verack=True, p2p_idx=0, connection_type="outbound-full-relay")) + assert peer.sendtxrcncl_msg_received) + assert_equal(peer.sendtxrcncl_msg_received.version, 1)) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL should not be sent if block-relay-only')) + peer = self.nodes[0].add_outbound_p2p_connection() + SendTxrcnclReceiver(), wait_for_verack=True, p2p_idx=0, connection_type="block-relay-only")) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + self.log.info("SENDTXRCNCL should not be sent if feeler")) + peer = self.nodes[0].add_outbound_p2p_connection(P2PFeelerReceiver(), p2p_idx=0, connection_type="feeler")) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + self.log.info("SENDTXRCNCL should not be sent if addrfetch")) + peer = self.nodes[0].add_outbound_p2p_connection() + SendTxrcnclReceiver(), wait_for_verack=True, p2p_idx=0, connection_type="addr-fetch")) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL not sent if -txreconciliation flag is not set')) + self.restart_node(0, [])) + peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=True, wait_for_verack=True)) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('SENDTXRCNCL not sent if blocksonly is set')) + self.restart_node(0, ["-txreconciliation", "-blocksonly"])) + peer = self.nodes[0].add_p2p_connection(SendTxrcnclReceiver(), send_version=True, wait_for_verack=True)) + assert not peer.sendtxrcncl_msg_received) + self.nodes[0].disconnect_p2ps()) +) + # Check everything concerning *receiving* SENDTXRCNCL) + # First, receiving from *inbound*.) + self.restart_node(0, ["-txreconciliation"])) + self.log.info('valid SENDTXRCNCL received')) + peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False)) + with self.nodes[0].assert_debug_log(["received: sendtxrcncl"]):) + peer.send_message(create_sendtxrcncl_msg())) + self.log.info('second SENDTXRCNCL triggers a disconnect')) + with self.nodes[0].assert_debug_log(["(sendtxrcncl received from already registered peer); disconnecting"]):) + peer.send_message(create_sendtxrcncl_msg())) + peer.wait_for_disconnect()) +) + self.restart_node(0, [])) + self.log.info('SENDTXRCNCL if no txreconciliation supported is ignored')) + peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False)) + with self.nodes[0].assert_debug_log(['ignored, as our node does not have txreconciliation enabled']):) + peer.send_message(create_sendtxrcncl_msg())) + self.nodes[0].disconnect_p2ps()) +) + self.restart_node(0, ["-txreconciliation"])) +) + self.log.info('SENDTXRCNCL with version=0 triggers a disconnect')) + sendtxrcncl_low_version = create_sendtxrcncl_msg()) + sendtxrcncl_low_version.version = 0) + peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False)) + with self.nodes[0].assert_debug_log(["txreconciliation protocol violation"]):) + peer.send_message(sendtxrcncl_low_version)) + peer.wait_for_disconnect()) +) + self.log.info('SENDTXRCNCL with version=2 is valid')) + sendtxrcncl_higher_version = create_sendtxrcncl_msg()) + sendtxrcncl_higher_version.version = 2) + peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=True, wait_for_verack=False)) + with self.nodes[0].assert_debug_log(['Register peer=1']):) + peer.send_message(sendtxrcncl_higher_version)) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('unexpected SENDTXRCNCL is ignored')) + peer = self.nodes[0].add_p2p_connection(PeerNoVerack(), send_version=False, wait_for_verack=False)) + old_version_msg = msg_version()) + old_version_msg.nVersion = 70015) + old_version_msg.strSubVer = P2P_SUBVERSION) + old_version_msg.nServices = P2P_SERVICES) + old_version_msg.relay = 1) + peer.send_message(old_version_msg)) + with self.nodes[0].assert_debug_log(['Ignore unexpected txreconciliation signal']):) + peer.send_message(create_sendtxrcncl_msg())) + self.nodes[0].disconnect_p2ps()) +) + self.log.info('sending SENDTXRCNCL after sending VERACK triggers a disconnect')) + peer = self.nodes[0].add_p2p_connection(P2PInterface())) + with self.nodes[0].assert_debug_log(["sendtxrcncl received after verack"]):) + peer.send_message(create_sendtxrcncl_msg())) + peer.wait_for_disconnect()) +) + self.log.info('SENDTXRCNCL without WTXIDRELAY is ignored (recon state is erased after VERACK)')) + peer = self.nodes[0].add_p2p_connection(PeerNoVerack(wtxidrelay=False), send_version=True, wait_for_verack=False)) + with self.nodes[0].assert_debug_log(['Forget txreconciliation state of peer']):) + peer.send_message(create_sendtxrcncl_msg())) + peer.send_message(msg_verack())) + self.nodes[0].disconnect_p2ps()) +) + # Now, *receiving* from *outbound*.) + self.log.info('SENDTXRCNCL if block-relay-only triggers a disconnect')) + peer = self.nodes[0].add_outbound_p2p_connection() + PeerNoVerack(), wait_for_verack=False, p2p_idx=0, connection_type="block-relay-only")) + with self.nodes[0].assert_debug_log(["we indicated no tx relay; disconnecting"]):) + peer.send_message(create_sendtxrcncl_msg())) + peer.wait_for_disconnect()) +) +) +if __name__ == '__main__':) + SendTxRcnclTest(__file__).main()) diff --git a/test/functional/p2p_v2_transport.py b/test/functional/p2p_v2_transport.py index 9ae8575e84caf0..b899a63ad0147b 100755 --- a/test/functional/p2p_v2_transport.py +++ b/test/functional/p2p_v2_transport.py @@ -1,172 +1,172 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021-present The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -""" -Test v2 transport -""" -import socket - -from test_framework.messages import MAGIC_BYTES, NODE_P2P_V2 -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - p2p_port, - assert_raises_rpc_error -) - - -class V2TransportTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 5 - self.extra_args = [["-v2transport=1"], ["-v2transport=1"], ["-v2transport=0"], ["-v2transport=0"], ["-v2transport=0"]] - - def run_test(self): - sending_handshake = "start sending v2 handshake to peer" - downgrading_to_v1 = "retrying with v1 transport protocol for peer" - self.disconnect_nodes(0, 1) - self.disconnect_nodes(1, 2) - self.disconnect_nodes(2, 3) - self.disconnect_nodes(3, 4) - - # verify local services - network_info = self.nodes[2].getnetworkinfo() - assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, 0) - assert "P2P_V2" not in network_info["localservicesnames"] - network_info = self.nodes[1].getnetworkinfo() - assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, NODE_P2P_V2) - assert "P2P_V2" in network_info["localservicesnames"] - - # V2 nodes can sync with V2 nodes - assert_equal(self.nodes[0].getblockcount(), 0) - assert_equal(self.nodes[1].getblockcount(), 0) - with self.nodes[0].assert_debug_log(expected_msgs=[sending_handshake], - unexpected_msgs=[downgrading_to_v1]): - self.connect_nodes(0, 1, peer_advertises_v2=True) - self.generate(self.nodes[0], 5, sync_fun=lambda: self.sync_all(self.nodes[0:2])) - assert_equal(self.nodes[1].getblockcount(), 5) - # verify there is a v2 connection between node 0 and 1 - node_0_info = self.nodes[0].getpeerinfo() - node_1_info = self.nodes[1].getpeerinfo() - assert_equal(len(node_0_info), 1) - assert_equal(len(node_1_info), 1) - assert_equal(node_0_info[0]["transport_protocol_type"], "v2") - assert_equal(node_1_info[0]["transport_protocol_type"], "v2") - assert_equal(len(node_0_info[0]["session_id"]), 64) - assert_equal(len(node_1_info[0]["session_id"]), 64) - assert_equal(node_0_info[0]["session_id"], node_1_info[0]["session_id"]) - - # V1 nodes can sync with each other - assert_equal(self.nodes[2].getblockcount(), 0) - assert_equal(self.nodes[3].getblockcount(), 0) - - # addnode rpc error when v2transport requested but not enabled - ip_port = "127.0.0.1:{}".format(p2p_port(3)) - assert_raises_rpc_error(-8, "Error: v2transport requested but not enabled (see -v2transport)", self.nodes[2].addnode, node=ip_port, command='add', v2transport=True) - - with self.nodes[2].assert_debug_log(expected_msgs=[], - unexpected_msgs=[sending_handshake, downgrading_to_v1]): - self.connect_nodes(2, 3, peer_advertises_v2=False) - self.generate(self.nodes[2], 8, sync_fun=lambda: self.sync_all(self.nodes[2:4])) - assert_equal(self.nodes[3].getblockcount(), 8) - assert self.nodes[0].getbestblockhash() != self.nodes[2].getbestblockhash() - # verify there is a v1 connection between node 2 and 3 - node_2_info = self.nodes[2].getpeerinfo() - node_3_info = self.nodes[3].getpeerinfo() - assert_equal(len(node_2_info), 1) - assert_equal(len(node_3_info), 1) - assert_equal(node_2_info[0]["transport_protocol_type"], "v1") - assert_equal(node_3_info[0]["transport_protocol_type"], "v1") - assert_equal(len(node_2_info[0]["session_id"]), 0) - assert_equal(len(node_3_info[0]["session_id"]), 0) - - # V1 nodes can sync with V2 nodes - self.disconnect_nodes(0, 1) - self.disconnect_nodes(2, 3) - with self.nodes[2].assert_debug_log(expected_msgs=[], - unexpected_msgs=[sending_handshake, downgrading_to_v1]): - self.connect_nodes(2, 1, peer_advertises_v2=False) # cannot enable v2 on v1 node - self.sync_all(self.nodes[1:3]) - assert_equal(self.nodes[1].getblockcount(), 8) - assert self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash() - # verify there is a v1 connection between node 1 and 2 - node_1_info = self.nodes[1].getpeerinfo() - node_2_info = self.nodes[2].getpeerinfo() - assert_equal(len(node_1_info), 1) - assert_equal(len(node_2_info), 1) - assert_equal(node_1_info[0]["transport_protocol_type"], "v1") - assert_equal(node_2_info[0]["transport_protocol_type"], "v1") - assert_equal(len(node_1_info[0]["session_id"]), 0) - assert_equal(len(node_2_info[0]["session_id"]), 0) - - # V2 nodes can sync with V1 nodes - self.disconnect_nodes(1, 2) - with self.nodes[0].assert_debug_log(expected_msgs=[], - unexpected_msgs=[sending_handshake, downgrading_to_v1]): - self.connect_nodes(0, 3, peer_advertises_v2=False) - self.sync_all([self.nodes[0], self.nodes[3]]) - assert_equal(self.nodes[0].getblockcount(), 8) - # verify there is a v1 connection between node 0 and 3 - node_0_info = self.nodes[0].getpeerinfo() - node_3_info = self.nodes[3].getpeerinfo() - assert_equal(len(node_0_info), 1) - assert_equal(len(node_3_info), 1) - assert_equal(node_0_info[0]["transport_protocol_type"], "v1") - assert_equal(node_3_info[0]["transport_protocol_type"], "v1") - assert_equal(len(node_0_info[0]["session_id"]), 0) - assert_equal(len(node_3_info[0]["session_id"]), 0) - - # V2 node mines another block and everyone gets it - self.connect_nodes(0, 1, peer_advertises_v2=True) - self.connect_nodes(1, 2, peer_advertises_v2=False) - self.generate(self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:4])) - assert_equal(self.nodes[0].getblockcount(), 9) # sync_all() verifies tip hashes match - - # V1 node mines another block and everyone gets it - self.generate(self.nodes[3], 2, sync_fun=lambda: self.sync_all(self.nodes[0:4])) - assert_equal(self.nodes[2].getblockcount(), 11) # sync_all() verifies tip hashes match - - assert_equal(self.nodes[4].getblockcount(), 0) - # Peer 4 is v1 p2p, but is falsely advertised as v2. - with self.nodes[1].assert_debug_log(expected_msgs=[sending_handshake, downgrading_to_v1]): - self.connect_nodes(1, 4, peer_advertises_v2=True) - self.sync_all() - assert_equal(self.nodes[4].getblockcount(), 11) - - # Check v1 prefix detection - V1_PREFIX = MAGIC_BYTES["regtest"] + b"version\x00\x00\x00\x00\x00" - assert_equal(len(V1_PREFIX), 16) - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - with self.nodes[0].wait_for_new_peer(): - s.connect(("127.0.0.1", p2p_port(0))) - s.sendall(V1_PREFIX[:-1]) - assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting") - s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte - self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"] == "v1") - - # Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message) - wrong_network_magic_prefix = MAGIC_BYTES["signet"] + V1_PREFIX[4:] - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - with self.nodes[0].wait_for_new_peer(): - s.connect(("127.0.0.1", p2p_port(0))) - with self.nodes[0].assert_debug_log(["V2 transport error: V1 peer with wrong MessageStart"]): - s.sendall(wrong_network_magic_prefix + b"somepayload") - - # Check detection of missing garbage terminator (hits after fixed amount of data if terminator never matches garbage) - MAX_KEY_GARB_AND_GARBTERM_LEN = 64 + 4095 + 16 - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - with self.nodes[0].wait_for_new_peer(): - s.connect(("127.0.0.1", p2p_port(0))) - s.sendall(b'\x00' * (MAX_KEY_GARB_AND_GARBTERM_LEN - 1)) - self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["bytesrecv"] == MAX_KEY_GARB_AND_GARBTERM_LEN - 1) - with self.nodes[0].assert_debug_log(["V2 transport error: missing garbage terminator"]): - peer_id = self.nodes[0].getpeerinfo()[-1]["id"] - s.sendall(b'\x00') # send out last byte - # should disconnect immediately - self.wait_until(lambda: not peer_id in [p["id"] for p in self.nodes[0].getpeerinfo()]) - - -if __name__ == '__main__': - V2TransportTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2021-present The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +""") +Test v2 transport) +""") +import socket) +) +from test_framework.messages import MAGIC_BYTES, NODE_P2P_V2) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + p2p_port,) + assert_raises_rpc_error) +)) +) +) +class V2TransportTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 5) + self.extra_args = [["-v2transport=1"], ["-v2transport=1"], ["-v2transport=0"], ["-v2transport=0"], ["-v2transport=0"]]) +) + def run_test(self):) + sending_handshake = "start sending v2 handshake to peer") + downgrading_to_v1 = "retrying with v1 transport protocol for peer") + self.disconnect_nodes(0, 1)) + self.disconnect_nodes(1, 2)) + self.disconnect_nodes(2, 3)) + self.disconnect_nodes(3, 4)) +) + # verify local services) + network_info = self.nodes[2].getnetworkinfo()) + assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, 0)) + assert "P2P_V2" not in network_info["localservicesnames"]) + network_info = self.nodes[1].getnetworkinfo()) + assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, NODE_P2P_V2)) + assert "P2P_V2" in network_info["localservicesnames"]) +) + # V2 nodes can sync with V2 nodes) + assert_equal(self.nodes[0].getblockcount(), 0)) + assert_equal(self.nodes[1].getblockcount(), 0)) + with self.nodes[0].assert_debug_log(expected_msgs=[sending_handshake],) + unexpected_msgs=[downgrading_to_v1]):) + self.connect_nodes(0, 1, peer_advertises_v2=True)) + self.generate(self.nodes[0], 5, sync_fun=lambda: self.sync_all(self.nodes[0:2]))) + assert_equal(self.nodes[1].getblockcount(), 5)) + # verify there is a v2 connection between node 0 and 1) + node_0_info = self.nodes[0].getpeerinfo()) + node_1_info = self.nodes[1].getpeerinfo()) + assert_equal(len(node_0_info), 1)) + assert_equal(len(node_1_info), 1)) + assert_equal(node_0_info[0]["transport_protocol_type"], "v2")) + assert_equal(node_1_info[0]["transport_protocol_type"], "v2")) + assert_equal(len(node_0_info[0]["session_id"]), 64)) + assert_equal(len(node_1_info[0]["session_id"]), 64)) + assert_equal(node_0_info[0]["session_id"], node_1_info[0]["session_id"])) +) + # V1 nodes can sync with each other) + assert_equal(self.nodes[2].getblockcount(), 0)) + assert_equal(self.nodes[3].getblockcount(), 0)) +) + # addnode rpc error when v2transport requested but not enabled) + ip_port = "127.0.0.1:{}".format(p2p_port(3))) + assert_raises_rpc_error(-8, "Error: v2transport requested but not enabled (see -v2transport)", self.nodes[2].addnode, node=ip_port, command='add', v2transport=True)) +) + with self.nodes[2].assert_debug_log(expected_msgs=[],) + unexpected_msgs=[sending_handshake, downgrading_to_v1]):) + self.connect_nodes(2, 3, peer_advertises_v2=False)) + self.generate(self.nodes[2], 8, sync_fun=lambda: self.sync_all(self.nodes[2:4]))) + assert_equal(self.nodes[3].getblockcount(), 8)) + assert_not_equal(self.nodes[0].getbestblockhash(), self.nodes[2].getbestblockhash())) + # verify there is a v1 connection between node 2 and 3) + node_2_info = self.nodes[2].getpeerinfo()) + node_3_info = self.nodes[3].getpeerinfo()) + assert_equal(len(node_2_info), 1)) + assert_equal(len(node_3_info), 1)) + assert_equal(node_2_info[0]["transport_protocol_type"], "v1")) + assert_equal(node_3_info[0]["transport_protocol_type"], "v1")) + assert_equal(len(node_2_info[0]["session_id"]), 0)) + assert_equal(len(node_3_info[0]["session_id"]), 0)) +) + # V1 nodes can sync with V2 nodes) + self.disconnect_nodes(0, 1)) + self.disconnect_nodes(2, 3)) + with self.nodes[2].assert_debug_log(expected_msgs=[],) + unexpected_msgs=[sending_handshake, downgrading_to_v1]):) + self.connect_nodes(2, 1, peer_advertises_v2=False) # cannot enable v2 on v1 node) + self.sync_all(self.nodes[1:3])) + assert_equal(self.nodes[1].getblockcount(), 8)) + assert_not_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())) + # verify there is a v1 connection between node 1 and 2) + node_1_info = self.nodes[1].getpeerinfo()) + node_2_info = self.nodes[2].getpeerinfo()) + assert_equal(len(node_1_info), 1)) + assert_equal(len(node_2_info), 1)) + assert_equal(node_1_info[0]["transport_protocol_type"], "v1")) + assert_equal(node_2_info[0]["transport_protocol_type"], "v1")) + assert_equal(len(node_1_info[0]["session_id"]), 0)) + assert_equal(len(node_2_info[0]["session_id"]), 0)) +) + # V2 nodes can sync with V1 nodes) + self.disconnect_nodes(1, 2)) + with self.nodes[0].assert_debug_log(expected_msgs=[],) + unexpected_msgs=[sending_handshake, downgrading_to_v1]):) + self.connect_nodes(0, 3, peer_advertises_v2=False)) + self.sync_all([self.nodes[0], self.nodes[3]])) + assert_equal(self.nodes[0].getblockcount(), 8)) + # verify there is a v1 connection between node 0 and 3) + node_0_info = self.nodes[0].getpeerinfo()) + node_3_info = self.nodes[3].getpeerinfo()) + assert_equal(len(node_0_info), 1)) + assert_equal(len(node_3_info), 1)) + assert_equal(node_0_info[0]["transport_protocol_type"], "v1")) + assert_equal(node_3_info[0]["transport_protocol_type"], "v1")) + assert_equal(len(node_0_info[0]["session_id"]), 0)) + assert_equal(len(node_3_info[0]["session_id"]), 0)) +) + # V2 node mines another block and everyone gets it) + self.connect_nodes(0, 1, peer_advertises_v2=True)) + self.connect_nodes(1, 2, peer_advertises_v2=False)) + self.generate(self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:4]))) + assert_equal(self.nodes[0].getblockcount(), 9) # sync_all() verifies tip hashes match) +) + # V1 node mines another block and everyone gets it) + self.generate(self.nodes[3], 2, sync_fun=lambda: self.sync_all(self.nodes[0:4]))) + assert_equal(self.nodes[2].getblockcount(), 11) # sync_all() verifies tip hashes match) +) + assert_equal(self.nodes[4].getblockcount(), 0)) + # Peer 4 is v1 p2p, but is falsely advertised as v2.) + with self.nodes[1].assert_debug_log(expected_msgs=[sending_handshake, downgrading_to_v1]):) + self.connect_nodes(1, 4, peer_advertises_v2=True)) + self.sync_all()) + assert_equal(self.nodes[4].getblockcount(), 11)) +) + # Check v1 prefix detection) + V1_PREFIX = MAGIC_BYTES["regtest"] + b"version\x00\x00\x00\x00\x00") + assert_equal(len(V1_PREFIX), 16)) + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:) + with self.nodes[0].wait_for_new_peer():) + s.connect(("127.0.0.1", p2p_port(0)))) + s.sendall(V1_PREFIX[:-1])) + assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting")) + s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte) + self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"] == "v1")) +) + # Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message)) + wrong_network_magic_prefix = MAGIC_BYTES["signet"] + V1_PREFIX[4:]) + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:) + with self.nodes[0].wait_for_new_peer():) + s.connect(("127.0.0.1", p2p_port(0)))) + with self.nodes[0].assert_debug_log(["V2 transport error: V1 peer with wrong MessageStart"]):) + s.sendall(wrong_network_magic_prefix + b"somepayload")) +) + # Check detection of missing garbage terminator (hits after fixed amount of data if terminator never matches garbage)) + MAX_KEY_GARB_AND_GARBTERM_LEN = 64 + 4095 + 16) + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:) + with self.nodes[0].wait_for_new_peer():) + s.connect(("127.0.0.1", p2p_port(0)))) + s.sendall(b'\x00' * (MAX_KEY_GARB_AND_GARBTERM_LEN - 1))) + self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["bytesrecv"] == MAX_KEY_GARB_AND_GARBTERM_LEN - 1)) + with self.nodes[0].assert_debug_log(["V2 transport error: missing garbage terminator"]):) + peer_id = self.nodes[0].getpeerinfo()[-1]["id"]) + s.sendall(b'\x00') # send out last byte) + # should disconnect immediately) + self.wait_until(lambda: not peer_id in [p["id"] for p in self.nodes[0].getpeerinfo()])) +) +) +if __name__ == '__main__':) + V2TransportTest(__file__).main()) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index cca71b0c37e41c..dee88301b68d5b 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -1,697 +1,697 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test RPCs related to blockchainstate. - -Test the following RPCs: - - getblockchaininfo - - getdeploymentinfo - - getchaintxstats - - gettxoutsetinfo - - getblockheader - - getdifficulty - - getnetworkhashps - - waitforblockheight - - getblock - - getblockhash - - getbestblockhash - - verifychain - -Tests correspond to code in rpc/blockchain.cpp. -""" - -from decimal import Decimal -import http.client -import os -import subprocess -import textwrap - -from test_framework.blocktools import ( - MAX_FUTURE_BLOCK_TIME, - TIME_GENESIS_BLOCK, - create_block, - create_coinbase, - create_tx_with_script, -) -from test_framework.messages import ( - CBlockHeader, - COIN, - from_hex, - msg_block, -) -from test_framework.p2p import P2PInterface -from test_framework.script import hash256, OP_TRUE -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises, - assert_raises_rpc_error, - assert_is_hex_string, - assert_is_hash_string, -) -from test_framework.wallet import MiniWallet - - -HEIGHT = 200 # blocks mined -TIME_RANGE_STEP = 600 # ten-minute steps -TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP -TIME_RANGE_TIP = TIME_GENESIS_BLOCK + (HEIGHT - 1) * TIME_RANGE_STEP -TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP -DIFFICULTY_ADJUSTMENT_INTERVAL = 144 - - -class BlockchainTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.supports_cli = False - - def run_test(self): - self.wallet = MiniWallet(self.nodes[0]) - self._test_prune_disk_space() - self.mine_chain() - self._test_max_future_block_time() - self.restart_node( - 0, - extra_args=[ - "-stopatheight=207", - "-checkblocks=-1", # Check all blocks - "-prune=1", # Set pruning after rescan is complete - ], - ) - - self._test_getblockchaininfo() - self._test_getchaintxstats() - self._test_gettxoutsetinfo() - self._test_getblockheader() - self._test_getdifficulty() - self._test_getnetworkhashps() - self._test_stopatheight() - self._test_waitforblock() # also tests waitfornewblock - self._test_waitforblockheight() - self._test_getblock() - self._test_getdeploymentinfo() - self._test_y2106() - assert self.nodes[0].verifychain(4, 0) - - def mine_chain(self): - self.log.info(f"Generate {HEIGHT} blocks after the genesis block in ten-minute steps") - for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP): - self.nodes[0].setmocktime(t) - self.generate(self.wallet, 1) - assert_equal(self.nodes[0].getblockchaininfo()['blocks'], HEIGHT) - - def _test_prune_disk_space(self): - self.log.info("Test that a manually pruned node does not run into " - "integer overflow on first start up") - self.restart_node(0, extra_args=["-prune=1"]) - self.log.info("Avoid warning when assumed chain size is enough") - self.restart_node(0, extra_args=["-prune=123456789"]) - - def _test_max_future_block_time(self): - self.stop_node(0) - self.log.info("A block tip of more than MAX_FUTURE_BLOCK_TIME in the future raises an error") - self.nodes[0].assert_start_raises_init_error( - extra_args=[f"-mocktime={TIME_RANGE_TIP - MAX_FUTURE_BLOCK_TIME - 1}"], - expected_msg=": The block database contains a block which appears to be from the future." - " This may be due to your computer's date and time being set incorrectly." - f" Only rebuild the block database if you are sure that your computer's date and time are correct.{os.linesep}" - "Please restart with -reindex or -reindex-chainstate to recover.", - ) - self.log.info("A block tip of MAX_FUTURE_BLOCK_TIME in the future is fine") - self.start_node(0, extra_args=[f"-mocktime={TIME_RANGE_TIP - MAX_FUTURE_BLOCK_TIME}"]) - - def _test_getblockchaininfo(self): - self.log.info("Test getblockchaininfo") - - keys = [ - 'bestblockhash', - 'blocks', - 'chain', - 'chainwork', - 'difficulty', - 'headers', - 'initialblockdownload', - 'mediantime', - 'pruned', - 'size_on_disk', - 'time', - 'verificationprogress', - 'warnings', - ] - res = self.nodes[0].getblockchaininfo() - - assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP) - assert_equal(res['mediantime'], TIME_RANGE_MTP) - - # result should have these additional pruning keys if manual pruning is enabled - assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys)) - - # size_on_disk should be > 0 - assert_greater_than(res['size_on_disk'], 0) - - # pruneheight should be greater or equal to 0 - assert_greater_than_or_equal(res['pruneheight'], 0) - - # check other pruning fields given that prune=1 - assert res['pruned'] - assert not res['automatic_pruning'] - - self.restart_node(0, ['-stopatheight=207']) - res = self.nodes[0].getblockchaininfo() - # should have exact keys - assert_equal(sorted(res.keys()), keys) - - self.stop_node(0) - self.nodes[0].assert_start_raises_init_error( - extra_args=['-testactivationheight=name@2'], - expected_msg='Error: Invalid name (name@2) for -testactivationheight=name@height.', - ) - self.nodes[0].assert_start_raises_init_error( - extra_args=['-testactivationheight=bip34@-2'], - expected_msg='Error: Invalid height value (bip34@-2) for -testactivationheight=name@height.', - ) - self.nodes[0].assert_start_raises_init_error( - extra_args=['-testactivationheight='], - expected_msg='Error: Invalid format () for -testactivationheight=name@height.', - ) - self.start_node(0, extra_args=[ - '-stopatheight=207', - '-prune=550', - ]) - - res = self.nodes[0].getblockchaininfo() - # result should have these additional pruning keys if prune=550 - assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys)) - - # check related fields - assert res['pruned'] - assert_equal(res['pruneheight'], 0) - assert res['automatic_pruning'] - assert_equal(res['prune_target_size'], 576716800) - assert_greater_than(res['size_on_disk'], 0) - - def check_signalling_deploymentinfo_result(self, gdi_result, height, blockhash, status_next): - assert height >= 144 and height <= 287 - - assert_equal(gdi_result, { - "hash": blockhash, - "height": height, - "deployments": { - 'bip34': {'type': 'buried', 'active': True, 'height': 2}, - 'bip66': {'type': 'buried', 'active': True, 'height': 3}, - 'bip65': {'type': 'buried', 'active': True, 'height': 4}, - 'csv': {'type': 'buried', 'active': True, 'height': 5}, - 'segwit': {'type': 'buried', 'active': True, 'height': 6}, - 'testdummy': { - 'type': 'bip9', - 'bip9': { - 'bit': 28, - 'start_time': 0, - 'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value - 'min_activation_height': 0, - 'status': 'started', - 'status_next': status_next, - 'since': 144, - 'statistics': { - 'period': 144, - 'threshold': 108, - 'elapsed': height - 143, - 'count': height - 143, - 'possible': True, - }, - 'signalling': '#'*(height-143), - }, - 'active': False - }, - 'taproot': { - 'type': 'bip9', - 'bip9': { - 'start_time': -1, - 'timeout': 9223372036854775807, - 'min_activation_height': 0, - 'status': 'active', - 'status_next': 'active', - 'since': 0, - }, - 'height': 0, - 'active': True - } - } - }) - - def _test_getdeploymentinfo(self): - # Note: continues past -stopatheight height, so must be invoked - # after _test_stopatheight - - self.log.info("Test getdeploymentinfo") - self.stop_node(0) - self.start_node(0, extra_args=[ - '-testactivationheight=bip34@2', - '-testactivationheight=dersig@3', - '-testactivationheight=cltv@4', - '-testactivationheight=csv@5', - '-testactivationheight=segwit@6', +#!/usr/bin/env python3) +# Copyright (c) 2014-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test RPCs related to blockchainstate.) +) +Test the following RPCs:) + - getblockchaininfo) + - getdeploymentinfo) + - getchaintxstats) + - gettxoutsetinfo) + - getblockheader) + - getdifficulty) + - getnetworkhashps) + - waitforblockheight) + - getblock) + - getblockhash) + - getbestblockhash) + - verifychain) +) +Tests correspond to code in rpc/blockchain.cpp.) +""") +) +from decimal import Decimal) +import http.client) +import os) +import subprocess) +import textwrap) +) +from test_framework.blocktools import () + MAX_FUTURE_BLOCK_TIME,) + TIME_GENESIS_BLOCK,) + create_block,) + create_coinbase,) + create_tx_with_script,) +)) +from test_framework.messages import () + CBlockHeader,) + COIN,) + from_hex,) + msg_block,) +)) +from test_framework.p2p import P2PInterface) +from test_framework.script import hash256, OP_TRUE) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_greater_than,) + assert_greater_than_or_equal,) + assert_raises,) + assert_raises_rpc_error,) + assert_is_hex_string,) + assert_is_hash_string,) +)) +from test_framework.wallet import MiniWallet) +) +) +HEIGHT = 200 # blocks mined) +TIME_RANGE_STEP = 600 # ten-minute steps) +TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP) +TIME_RANGE_TIP = TIME_GENESIS_BLOCK + (HEIGHT - 1) * TIME_RANGE_STEP) +TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP) +DIFFICULTY_ADJUSTMENT_INTERVAL = 144) +) +) +class BlockchainTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 1) + self.supports_cli = False) +) + def run_test(self):) + self.wallet = MiniWallet(self.nodes[0])) + self._test_prune_disk_space()) + self.mine_chain()) + self._test_max_future_block_time()) + self.restart_node() + 0,) + extra_args=[) + "-stopatheight=207",) + "-checkblocks=-1", # Check all blocks) + "-prune=1", # Set pruning after rescan is complete) + ],) + )) +) + self._test_getblockchaininfo()) + self._test_getchaintxstats()) + self._test_gettxoutsetinfo()) + self._test_getblockheader()) + self._test_getdifficulty()) + self._test_getnetworkhashps()) + self._test_stopatheight()) + self._test_waitforblock() # also tests waitfornewblock) + self._test_waitforblockheight()) + self._test_getblock()) + self._test_getdeploymentinfo()) + self._test_y2106()) + assert self.nodes[0].verifychain(4, 0)) +) + def mine_chain(self):) + self.log.info(f"Generate {HEIGHT} blocks after the genesis block in ten-minute steps")) + for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP):) + self.nodes[0].setmocktime(t)) + self.generate(self.wallet, 1)) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], HEIGHT)) +) + def _test_prune_disk_space(self):) + self.log.info("Test that a manually pruned node does not run into ") + "integer overflow on first start up")) + self.restart_node(0, extra_args=["-prune=1"])) + self.log.info("Avoid warning when assumed chain size is enough")) + self.restart_node(0, extra_args=["-prune=123456789"])) +) + def _test_max_future_block_time(self):) + self.stop_node(0)) + self.log.info("A block tip of more than MAX_FUTURE_BLOCK_TIME in the future raises an error")) + self.nodes[0].assert_start_raises_init_error() + extra_args=[f"-mocktime={TIME_RANGE_TIP - MAX_FUTURE_BLOCK_TIME - 1}"],) + expected_msg=": The block database contains a block which appears to be from the future.") + " This may be due to your computer's date and time being set incorrectly.") + f" Only rebuild the block database if you are sure that your computer's date and time are correct.{os.linesep}") + "Please restart with -reindex or -reindex-chainstate to recover.",) + )) + self.log.info("A block tip of MAX_FUTURE_BLOCK_TIME in the future is fine")) + self.start_node(0, extra_args=[f"-mocktime={TIME_RANGE_TIP - MAX_FUTURE_BLOCK_TIME}"])) +) + def _test_getblockchaininfo(self):) + self.log.info("Test getblockchaininfo")) +) + keys = [) + 'bestblockhash',) + 'blocks',) + 'chain',) + 'chainwork',) + 'difficulty',) + 'headers',) + 'initialblockdownload',) + 'mediantime',) + 'pruned',) + 'size_on_disk',) + 'time',) + 'verificationprogress',) + 'warnings',) ]) - - gbci207 = self.nodes[0].getblockchaininfo() - self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci207["blocks"], gbci207["bestblockhash"], "started") - - # block just prior to lock in - self.generate(self.wallet, 287 - gbci207["blocks"]) - gbci287 = self.nodes[0].getblockchaininfo() - self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci287["blocks"], gbci287["bestblockhash"], "locked_in") - - # calling with an explicit hash works - self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(gbci207["bestblockhash"]), gbci207["blocks"], gbci207["bestblockhash"], "started") - - def _test_y2106(self): - self.log.info("Check that block timestamps work until year 2106") - self.generate(self.nodes[0], 8)[-1] - time_2106 = 2**32 - 1 - self.nodes[0].setmocktime(time_2106) - last = self.generate(self.nodes[0], 6)[-1] - assert_equal(self.nodes[0].getblockheader(last)["mediantime"], time_2106) - - def _test_getchaintxstats(self): - self.log.info("Test getchaintxstats") - - # Test `getchaintxstats` invalid extra parameters - assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0) - - # Test `getchaintxstats` invalid `nblocks` - assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", self.nodes[0].getchaintxstats, '') - assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1) - assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount()) - - # Test `getchaintxstats` invalid `blockhash` - assert_raises_rpc_error(-3, "JSON value of type number is not of expected type string", self.nodes[0].getchaintxstats, blockhash=0) - assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0') - assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000') - assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000') - blockhash = self.nodes[0].getblockhash(HEIGHT) - self.nodes[0].invalidateblock(blockhash) - assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash) - self.nodes[0].reconsiderblock(blockhash) - - chaintxstats = self.nodes[0].getchaintxstats(nblocks=1) - # 200 txs plus genesis tx - assert_equal(chaintxstats['txcount'], HEIGHT + 1) - # tx rate should be 1 per 10 minutes, or 1/600 - # we have to round because of binary math - assert_equal(round(chaintxstats['txrate'] * TIME_RANGE_STEP, 10), Decimal(1)) - - b1_hash = self.nodes[0].getblockhash(1) - b1 = self.nodes[0].getblock(b1_hash) - b200_hash = self.nodes[0].getblockhash(HEIGHT) - b200 = self.nodes[0].getblock(b200_hash) - time_diff = b200['mediantime'] - b1['mediantime'] - - chaintxstats = self.nodes[0].getchaintxstats() - assert_equal(chaintxstats['time'], b200['time']) - assert_equal(chaintxstats['txcount'], HEIGHT + 1) - assert_equal(chaintxstats['window_final_block_hash'], b200_hash) - assert_equal(chaintxstats['window_final_block_height'], HEIGHT ) - assert_equal(chaintxstats['window_block_count'], HEIGHT - 1) - assert_equal(chaintxstats['window_tx_count'], HEIGHT - 1) - assert_equal(chaintxstats['window_interval'], time_diff) - assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(HEIGHT - 1)) - - chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash) - assert_equal(chaintxstats['time'], b1['time']) - assert_equal(chaintxstats['txcount'], 2) - assert_equal(chaintxstats['window_final_block_hash'], b1_hash) - assert_equal(chaintxstats['window_final_block_height'], 1) - assert_equal(chaintxstats['window_block_count'], 0) - assert 'window_tx_count' not in chaintxstats - assert 'window_interval' not in chaintxstats - assert 'txrate' not in chaintxstats - - def _test_gettxoutsetinfo(self): - node = self.nodes[0] - res = node.gettxoutsetinfo() - - assert_equal(res['total_amount'], Decimal('8725.00000000')) - assert_equal(res['transactions'], HEIGHT) - assert_equal(res['height'], HEIGHT) - assert_equal(res['txouts'], HEIGHT) - assert_equal(res['bogosize'], 16800), - assert_equal(res['bestblock'], node.getblockhash(HEIGHT)) - size = res['disk_size'] - assert size > 6400 - assert size < 64000 - assert_equal(len(res['bestblock']), 64) - assert_equal(len(res['hash_serialized_3']), 64) - - self.log.info("Test gettxoutsetinfo works for blockchain with just the genesis block") - b1hash = node.getblockhash(1) - node.invalidateblock(b1hash) - - res2 = node.gettxoutsetinfo() - assert_equal(res2['transactions'], 0) - assert_equal(res2['total_amount'], Decimal('0')) - assert_equal(res2['height'], 0) - assert_equal(res2['txouts'], 0) - assert_equal(res2['bogosize'], 0), - assert_equal(res2['bestblock'], node.getblockhash(0)) - assert_equal(len(res2['hash_serialized_3']), 64) - - self.log.info("Test gettxoutsetinfo returns the same result after invalidate/reconsider block") - node.reconsiderblock(b1hash) - - res3 = node.gettxoutsetinfo() - # The field 'disk_size' is non-deterministic and can thus not be - # compared between res and res3. Everything else should be the same. - del res['disk_size'], res3['disk_size'] - assert_equal(res, res3) - - self.log.info("Test gettxoutsetinfo hash_type option") - # Adding hash_type 'hash_serialized_3', which is the default, should - # not change the result. - res4 = node.gettxoutsetinfo(hash_type='hash_serialized_3') - del res4['disk_size'] - assert_equal(res, res4) - - # hash_type none should not return a UTXO set hash. - res5 = node.gettxoutsetinfo(hash_type='none') - assert 'hash_serialized_3' not in res5 - - # hash_type muhash should return a different UTXO set hash. - res6 = node.gettxoutsetinfo(hash_type='muhash') - assert 'muhash' in res6 - assert res['hash_serialized_3'] != res6['muhash'] - - # muhash should not be returned unless requested. - for r in [res, res2, res3, res4, res5]: - assert 'muhash' not in r - - # Unknown hash_type raises an error - assert_raises_rpc_error(-8, "'foo hash' is not a valid hash_type", node.gettxoutsetinfo, "foo hash") - - def _test_getblockheader(self): - self.log.info("Test getblockheader") - node = self.nodes[0] - - assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense") - assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844") - assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844") - - besthash = node.getbestblockhash() - secondbesthash = node.getblockhash(HEIGHT - 1) - header = node.getblockheader(blockhash=besthash) - - assert_equal(header['hash'], besthash) - assert_equal(header['height'], HEIGHT) - assert_equal(header['confirmations'], 1) - assert_equal(header['previousblockhash'], secondbesthash) - assert_is_hex_string(header['chainwork']) - assert_equal(header['nTx'], 1) - assert_is_hash_string(header['hash']) - assert_is_hash_string(header['previousblockhash']) - assert_is_hash_string(header['merkleroot']) - assert_is_hash_string(header['bits'], length=None) - assert isinstance(header['time'], int) - assert_equal(header['mediantime'], TIME_RANGE_MTP) - assert isinstance(header['nonce'], int) - assert isinstance(header['version'], int) - assert isinstance(int(header['versionHex'], 16), int) - assert isinstance(header['difficulty'], Decimal) - - # Test with verbose=False, which should return the header as hex. - header_hex = node.getblockheader(blockhash=besthash, verbose=False) - assert_is_hex_string(header_hex) - - header = from_hex(CBlockHeader(), header_hex) - header.calc_sha256() - assert_equal(header.hash, besthash) - - assert 'previousblockhash' not in node.getblockheader(node.getblockhash(0)) - assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash()) - - def _test_getdifficulty(self): - self.log.info("Test getdifficulty") - difficulty = self.nodes[0].getdifficulty() - # 1 hash in 2 should be valid, so difficulty should be 1/2**31 - # binary => decimal => binary math is why we do this check - assert abs(difficulty * 2**31 - 1) < 0.0001 - - def _test_getnetworkhashps(self): - self.log.info("Test getnetworkhashps") - assert_raises_rpc_error( - -3, - textwrap.dedent(""" - Wrong type passed: - { - "Position 1 (nblocks)": "JSON value of type string is not of expected type number", - "Position 2 (height)": "JSON value of type array is not of expected type number" - } - """).strip(), - lambda: self.nodes[0].getnetworkhashps("a", []), - ) - assert_raises_rpc_error( - -8, - "Block does not exist at specified height", - lambda: self.nodes[0].getnetworkhashps(100, self.nodes[0].getblockcount() + 1), - ) - assert_raises_rpc_error( - -8, - "Block does not exist at specified height", - lambda: self.nodes[0].getnetworkhashps(100, -10), - ) - assert_raises_rpc_error( - -8, - "Invalid nblocks. Must be a positive number or -1.", - lambda: self.nodes[0].getnetworkhashps(-100), - ) - assert_raises_rpc_error( - -8, - "Invalid nblocks. Must be a positive number or -1.", - lambda: self.nodes[0].getnetworkhashps(0), - ) - - # Genesis block height estimate should return 0 - hashes_per_second = self.nodes[0].getnetworkhashps(100, 0) - assert_equal(hashes_per_second, 0) - - # This should be 2 hashes every 10 minutes or 1/300 - hashes_per_second = self.nodes[0].getnetworkhashps() - assert abs(hashes_per_second * 300 - 1) < 0.0001 - - # Test setting the first param of getnetworkhashps to -1 returns the average network - # hashes per second from the last difficulty change. - current_block_height = self.nodes[0].getmininginfo()['blocks'] - blocks_since_last_diff_change = current_block_height % DIFFICULTY_ADJUSTMENT_INTERVAL + 1 - expected_hashes_per_second_since_diff_change = self.nodes[0].getnetworkhashps(blocks_since_last_diff_change) - - assert_equal(self.nodes[0].getnetworkhashps(-1), expected_hashes_per_second_since_diff_change) - - # Ensure long lookups get truncated to chain length - hashes_per_second = self.nodes[0].getnetworkhashps(self.nodes[0].getblockcount() + 1000) - assert hashes_per_second > 0.003 - - def _test_stopatheight(self): - self.log.info("Test stopping at height") - assert_equal(self.nodes[0].getblockcount(), HEIGHT) - self.generate(self.wallet, 6) - assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6) - self.log.debug('Node should not stop at this height') - assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3)) - try: - self.generatetoaddress(self.nodes[0], 1, self.wallet.get_address(), sync_fun=self.no_op) - except (ConnectionError, http.client.BadStatusLine): - pass # The node already shut down before response - self.log.debug('Node should stop at this height...') - self.nodes[0].wait_until_stopped() - self.start_node(0) - assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7) - - def _test_waitforblock(self): - self.log.info("Test waitforblock and waitfornewblock") - node = self.nodes[0] - - current_height = node.getblock(node.getbestblockhash())['height'] - current_hash = node.getblock(node.getbestblockhash())['hash'] - - self.log.debug("Roll the chain back a few blocks and then reconsider it") - rollback_height = current_height - 100 - rollback_hash = node.getblockhash(rollback_height) - rollback_header = node.getblockheader(rollback_hash) - - node.invalidateblock(rollback_hash) - assert_equal(node.getblockcount(), rollback_height - 1) - - self.log.debug("waitforblock should return the same block after its timeout") - assert_equal(node.waitforblock(blockhash=current_hash, timeout=1)['hash'], rollback_header['previousblockhash']) - - node.reconsiderblock(rollback_hash) - # The chain has probably already been restored by the time reconsiderblock returns, - # but poll anyway. - self.wait_until(lambda: node.waitforblock(blockhash=current_hash, timeout=100)['hash'] == current_hash) - - # roll back again - node.invalidateblock(rollback_hash) - assert_equal(node.getblockcount(), rollback_height - 1) - - node.reconsiderblock(rollback_hash) - # The chain has probably already been restored by the time reconsiderblock returns, - # but poll anyway. - self.wait_until(lambda: node.waitfornewblock(timeout=100)['hash'] == current_hash) - - def _test_waitforblockheight(self): - self.log.info("Test waitforblockheight") - node = self.nodes[0] - peer = node.add_p2p_connection(P2PInterface()) - - current_height = node.getblock(node.getbestblockhash())['height'] - - # Create a fork somewhere below our current height, invalidate the tip - # of that fork, and then ensure that waitforblockheight still - # works as expected. - # - # (Previously this was broken based on setting - # `rpc/blockchain.cpp:latestblock` incorrectly.) - # - fork_height = current_height - 100 # choose something vaguely near our tip - fork_hash = node.getblockhash(fork_height) - fork_block = node.getblock(fork_hash) - - def solve_and_send_block(prevhash, height, time): - b = create_block(prevhash, create_coinbase(height), time) - b.solve() - peer.send_and_ping(msg_block(b)) - return b - - b1 = solve_and_send_block(int(fork_hash, 16), fork_height+1, fork_block['time'] + 1) - b2 = solve_and_send_block(b1.sha256, fork_height+2, b1.nTime + 1) - - node.invalidateblock(b2.hash) - - def assert_waitforheight(height, timeout=2): - assert_equal( - node.waitforblockheight(height=height, timeout=timeout)['height'], - current_height) - - assert_waitforheight(0) - assert_waitforheight(current_height - 1) - assert_waitforheight(current_height) - assert_waitforheight(current_height + 1) - - def _test_getblock(self): - node = self.nodes[0] - fee_per_byte = Decimal('0.00000010') - fee_per_kb = 1000 * fee_per_byte - - self.wallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node) - blockhash = self.generate(node, 1)[0] - - def assert_hexblock_hashes(verbosity): - block = node.getblock(blockhash, verbosity) - assert_equal(blockhash, hash256(bytes.fromhex(block[:160]))[::-1].hex()) - - def assert_fee_not_in_block(hash, verbosity): - block = node.getblock(hash, verbosity) - assert 'fee' not in block['tx'][1] - - def assert_fee_in_block(hash, verbosity): - block = node.getblock(hash, verbosity) - tx = block['tx'][1] - assert 'fee' in tx - assert_equal(tx['fee'], tx['vsize'] * fee_per_byte) - - def assert_vin_contains_prevout(verbosity): - block = node.getblock(blockhash, verbosity) - tx = block["tx"][1] - total_vin = Decimal("0.00000000") - total_vout = Decimal("0.00000000") - for vin in tx["vin"]: - assert "prevout" in vin - assert_equal(set(vin["prevout"].keys()), set(("value", "height", "generated", "scriptPubKey"))) - assert_equal(vin["prevout"]["generated"], True) - total_vin += vin["prevout"]["value"] - for vout in tx["vout"]: - total_vout += vout["value"] - assert_equal(total_vin, total_vout + tx["fee"]) - - def assert_vin_does_not_contain_prevout(hash, verbosity): - block = node.getblock(hash, verbosity) - tx = block["tx"][1] - if isinstance(tx, str): - # In verbosity level 1, only the transaction hashes are written - pass - else: - for vin in tx["vin"]: - assert "prevout" not in vin - - self.log.info("Test that getblock with verbosity 0 hashes to expected value") - assert_hexblock_hashes(0) - assert_hexblock_hashes(False) - - self.log.info("Test that getblock with verbosity 1 doesn't include fee") - assert_fee_not_in_block(blockhash, 1) - assert_fee_not_in_block(blockhash, True) - - self.log.info('Test that getblock with verbosity 2 and 3 includes expected fee') - assert_fee_in_block(blockhash, 2) - assert_fee_in_block(blockhash, 3) - - self.log.info("Test that getblock with verbosity 1 and 2 does not include prevout") - assert_vin_does_not_contain_prevout(blockhash, 1) - assert_vin_does_not_contain_prevout(blockhash, 2) - - self.log.info("Test that getblock with verbosity 3 includes prevout") - assert_vin_contains_prevout(3) - - self.log.info("Test getblock with invalid verbosity type returns proper error message") - assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", node.getblock, blockhash, "2") - - self.log.info("Test that getblock doesn't work with deleted Undo data") - - def move_block_file(old, new): - old_path = self.nodes[0].blocks_path / old - new_path = self.nodes[0].blocks_path / new - old_path.rename(new_path) - - # Move instead of deleting so we can restore chain state afterwards - move_block_file('rev00000.dat', 'rev_wrong') - - assert_raises_rpc_error(-32603, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.", lambda: node.getblock(blockhash, 2)) - assert_raises_rpc_error(-32603, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.", lambda: node.getblock(blockhash, 3)) - - # Restore chain state - move_block_file('rev_wrong', 'rev00000.dat') - - assert 'previousblockhash' not in node.getblock(node.getblockhash(0)) - assert 'nextblockhash' not in node.getblock(node.getbestblockhash()) - - self.log.info("Test getblock when only header is known") - current_height = node.getblock(node.getbestblockhash())['height'] - block_time = node.getblock(node.getbestblockhash())['time'] + 1 - block = create_block(int(blockhash, 16), create_coinbase(current_height + 1, nValue=100), block_time) - block.solve() - node.submitheader(block.serialize().hex()) - assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", lambda: node.getblock(block.hash)) - - self.log.info("Test getblock when block data is available but undo data isn't") - # Submits a block building on the header-only block, so it can't be connected and has no undo data - tx = create_tx_with_script(block.vtx[0], 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN) - block_noundo = create_block(block.sha256, create_coinbase(current_height + 2, nValue=100), block_time + 1, txlist=[tx]) - block_noundo.solve() - node.submitblock(block_noundo.serialize().hex()) - - assert_fee_not_in_block(block_noundo.hash, 2) - assert_fee_not_in_block(block_noundo.hash, 3) - assert_vin_does_not_contain_prevout(block_noundo.hash, 2) - assert_vin_does_not_contain_prevout(block_noundo.hash, 3) - - self.log.info("Test getblock when block is missing") - move_block_file('blk00000.dat', 'blk00000.dat.bak') - assert_raises_rpc_error(-1, "Block not found on disk", node.getblock, blockhash) - move_block_file('blk00000.dat.bak', 'blk00000.dat') - - -if __name__ == '__main__': - BlockchainTest(__file__).main() + res = self.nodes[0].getblockchaininfo()) +) + assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP)) + assert_equal(res['mediantime'], TIME_RANGE_MTP)) +) + # result should have these additional pruning keys if manual pruning is enabled) + assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))) +) + # size_on_disk should be > 0) + assert_greater_than(res['size_on_disk'], 0)) +) + # pruneheight should be greater or equal to 0) + assert_greater_than_or_equal(res['pruneheight'], 0)) +) + # check other pruning fields given that prune=1) + assert res['pruned']) + assert not res['automatic_pruning']) +) + self.restart_node(0, ['-stopatheight=207'])) + res = self.nodes[0].getblockchaininfo()) + # should have exact keys) + assert_equal(sorted(res.keys()), keys)) +) + self.stop_node(0)) + self.nodes[0].assert_start_raises_init_error() + extra_args=['-testactivationheight=name@2'],) + expected_msg='Error: Invalid name (name@2) for -testactivationheight=name@height.',) + )) + self.nodes[0].assert_start_raises_init_error() + extra_args=['-testactivationheight=bip34@-2'],) + expected_msg='Error: Invalid height value (bip34@-2) for -testactivationheight=name@height.',) + )) + self.nodes[0].assert_start_raises_init_error() + extra_args=['-testactivationheight='],) + expected_msg='Error: Invalid format () for -testactivationheight=name@height.',) + )) + self.start_node(0, extra_args=[) + '-stopatheight=207',) + '-prune=550',) + ])) +) + res = self.nodes[0].getblockchaininfo()) + # result should have these additional pruning keys if prune=550) + assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))) +) + # check related fields) + assert res['pruned']) + assert_equal(res['pruneheight'], 0)) + assert res['automatic_pruning']) + assert_equal(res['prune_target_size'], 576716800)) + assert_greater_than(res['size_on_disk'], 0)) +) + def check_signalling_deploymentinfo_result(self, gdi_result, height, blockhash, status_next):) + assert height >= 144 and height <= 287) +) + assert_equal(gdi_result, {) + "hash": blockhash,) + "height": height,) + "deployments": {) + 'bip34': {'type': 'buried', 'active': True, 'height': 2},) + 'bip66': {'type': 'buried', 'active': True, 'height': 3},) + 'bip65': {'type': 'buried', 'active': True, 'height': 4},) + 'csv': {'type': 'buried', 'active': True, 'height': 5},) + 'segwit': {'type': 'buried', 'active': True, 'height': 6},) + 'testdummy': {) + 'type': 'bip9',) + 'bip9': {) + 'bit': 28,) + 'start_time': 0,) + 'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value) + 'min_activation_height': 0,) + 'status': 'started',) + 'status_next': status_next,) + 'since': 144,) + 'statistics': {) + 'period': 144,) + 'threshold': 108,) + 'elapsed': height - 143,) + 'count': height - 143,) + 'possible': True,) + },) + 'signalling': '#'*(height-143),) + },) + 'active': False) + },) + 'taproot': {) + 'type': 'bip9',) + 'bip9': {) + 'start_time': -1,) + 'timeout': 9223372036854775807,) + 'min_activation_height': 0,) + 'status': 'active',) + 'status_next': 'active',) + 'since': 0,) + },) + 'height': 0,) + 'active': True) + }) + }) + })) +) + def _test_getdeploymentinfo(self):) + # Note: continues past -stopatheight height, so must be invoked) + # after _test_stopatheight) +) + self.log.info("Test getdeploymentinfo")) + self.stop_node(0)) + self.start_node(0, extra_args=[) + '-testactivationheight=bip34@2',) + '-testactivationheight=dersig@3',) + '-testactivationheight=cltv@4',) + '-testactivationheight=csv@5',) + '-testactivationheight=segwit@6',) + ])) +) + gbci207 = self.nodes[0].getblockchaininfo()) + self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci207["blocks"], gbci207["bestblockhash"], "started")) +) + # block just prior to lock in) + self.generate(self.wallet, 287 - gbci207["blocks"])) + gbci287 = self.nodes[0].getblockchaininfo()) + self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci287["blocks"], gbci287["bestblockhash"], "locked_in")) +) + # calling with an explicit hash works) + self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(gbci207["bestblockhash"]), gbci207["blocks"], gbci207["bestblockhash"], "started")) +) + def _test_y2106(self):) + self.log.info("Check that block timestamps work until year 2106")) + self.generate(self.nodes[0], 8)[-1]) + time_2106 = 2**32 - 1) + self.nodes[0].setmocktime(time_2106)) + last = self.generate(self.nodes[0], 6)[-1]) + assert_equal(self.nodes[0].getblockheader(last)["mediantime"], time_2106)) +) + def _test_getchaintxstats(self):) + self.log.info("Test getchaintxstats")) +) + # Test `getchaintxstats` invalid extra parameters) + assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)) +) + # Test `getchaintxstats` invalid `nblocks`) + assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", self.nodes[0].getchaintxstats, '')) + assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)) + assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())) +) + # Test `getchaintxstats` invalid `blockhash`) + assert_raises_rpc_error(-3, "JSON value of type number is not of expected type string", self.nodes[0].getchaintxstats, blockhash=0)) + assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')) + assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')) + assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')) + blockhash = self.nodes[0].getblockhash(HEIGHT)) + self.nodes[0].invalidateblock(blockhash)) + assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)) + self.nodes[0].reconsiderblock(blockhash)) +) + chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)) + # 200 txs plus genesis tx) + assert_equal(chaintxstats['txcount'], HEIGHT + 1)) + # tx rate should be 1 per 10 minutes, or 1/600) + # we have to round because of binary math) + assert_equal(round(chaintxstats['txrate'] * TIME_RANGE_STEP, 10), Decimal(1))) +) + b1_hash = self.nodes[0].getblockhash(1)) + b1 = self.nodes[0].getblock(b1_hash)) + b200_hash = self.nodes[0].getblockhash(HEIGHT)) + b200 = self.nodes[0].getblock(b200_hash)) + time_diff = b200['mediantime'] - b1['mediantime']) +) + chaintxstats = self.nodes[0].getchaintxstats()) + assert_equal(chaintxstats['time'], b200['time'])) + assert_equal(chaintxstats['txcount'], HEIGHT + 1)) + assert_equal(chaintxstats['window_final_block_hash'], b200_hash)) + assert_equal(chaintxstats['window_final_block_height'], HEIGHT )) + assert_equal(chaintxstats['window_block_count'], HEIGHT - 1)) + assert_equal(chaintxstats['window_tx_count'], HEIGHT - 1)) + assert_equal(chaintxstats['window_interval'], time_diff)) + assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(HEIGHT - 1))) +) + chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)) + assert_equal(chaintxstats['time'], b1['time'])) + assert_equal(chaintxstats['txcount'], 2)) + assert_equal(chaintxstats['window_final_block_hash'], b1_hash)) + assert_equal(chaintxstats['window_final_block_height'], 1)) + assert_equal(chaintxstats['window_block_count'], 0)) + assert 'window_tx_count' not in chaintxstats) + assert 'window_interval' not in chaintxstats) + assert 'txrate' not in chaintxstats) +) + def _test_gettxoutsetinfo(self):) + node = self.nodes[0]) + res = node.gettxoutsetinfo()) +) + assert_equal(res['total_amount'], Decimal('8725.00000000'))) + assert_equal(res['transactions'], HEIGHT)) + assert_equal(res['height'], HEIGHT)) + assert_equal(res['txouts'], HEIGHT)) + assert_equal(res['bogosize'], 16800),) + assert_equal(res['bestblock'], node.getblockhash(HEIGHT))) + size = res['disk_size']) + assert size > 6400) + assert size < 64000) + assert_equal(len(res['bestblock']), 64)) + assert_equal(len(res['hash_serialized_3']), 64)) +) + self.log.info("Test gettxoutsetinfo works for blockchain with just the genesis block")) + b1hash = node.getblockhash(1)) + node.invalidateblock(b1hash)) +) + res2 = node.gettxoutsetinfo()) + assert_equal(res2['transactions'], 0)) + assert_equal(res2['total_amount'], Decimal('0'))) + assert_equal(res2['height'], 0)) + assert_equal(res2['txouts'], 0)) + assert_equal(res2['bogosize'], 0),) + assert_equal(res2['bestblock'], node.getblockhash(0))) + assert_equal(len(res2['hash_serialized_3']), 64)) +) + self.log.info("Test gettxoutsetinfo returns the same result after invalidate/reconsider block")) + node.reconsiderblock(b1hash)) +) + res3 = node.gettxoutsetinfo()) + # The field 'disk_size' is non-deterministic and can thus not be) + # compared between res and res3. Everything else should be the same.) + del res['disk_size'], res3['disk_size']) + assert_equal(res, res3)) +) + self.log.info("Test gettxoutsetinfo hash_type option")) + # Adding hash_type 'hash_serialized_3', which is the default, should) + # not change the result.) + res4 = node.gettxoutsetinfo(hash_type='hash_serialized_3')) + del res4['disk_size']) + assert_equal(res, res4)) +) + # hash_type none should not return a UTXO set hash.) + res5 = node.gettxoutsetinfo(hash_type='none')) + assert 'hash_serialized_3' not in res5) +) + # hash_type muhash should return a different UTXO set hash.) + res6 = node.gettxoutsetinfo(hash_type='muhash')) + assert 'muhash' in res6) + assert_not_equal(res['hash_serialized_3'], res6['muhash'])) +) + # muhash should not be returned unless requested.) + for r in [res, res2, res3, res4, res5]:) + assert 'muhash' not in r) +) + # Unknown hash_type raises an error) + assert_raises_rpc_error(-8, "'foo hash' is not a valid hash_type", node.gettxoutsetinfo, "foo hash")) +) + def _test_getblockheader(self):) + self.log.info("Test getblockheader")) + node = self.nodes[0]) +) + assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")) + assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")) + assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")) +) + besthash = node.getbestblockhash()) + secondbesthash = node.getblockhash(HEIGHT - 1)) + header = node.getblockheader(blockhash=besthash)) +) + assert_equal(header['hash'], besthash)) + assert_equal(header['height'], HEIGHT)) + assert_equal(header['confirmations'], 1)) + assert_equal(header['previousblockhash'], secondbesthash)) + assert_is_hex_string(header['chainwork'])) + assert_equal(header['nTx'], 1)) + assert_is_hash_string(header['hash'])) + assert_is_hash_string(header['previousblockhash'])) + assert_is_hash_string(header['merkleroot'])) + assert_is_hash_string(header['bits'], length=None)) + assert isinstance(header['time'], int)) + assert_equal(header['mediantime'], TIME_RANGE_MTP)) + assert isinstance(header['nonce'], int)) + assert isinstance(header['version'], int)) + assert isinstance(int(header['versionHex'], 16), int)) + assert isinstance(header['difficulty'], Decimal)) +) + # Test with verbose=False, which should return the header as hex.) + header_hex = node.getblockheader(blockhash=besthash, verbose=False)) + assert_is_hex_string(header_hex)) +) + header = from_hex(CBlockHeader(), header_hex)) + header.calc_sha256()) + assert_equal(header.hash, besthash)) +) + assert 'previousblockhash' not in node.getblockheader(node.getblockhash(0))) + assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash())) +) + def _test_getdifficulty(self):) + self.log.info("Test getdifficulty")) + difficulty = self.nodes[0].getdifficulty()) + # 1 hash in 2 should be valid, so difficulty should be 1/2**31) + # binary => decimal => binary math is why we do this check) + assert abs(difficulty * 2**31 - 1) < 0.0001) +) + def _test_getnetworkhashps(self):) + self.log.info("Test getnetworkhashps")) + assert_raises_rpc_error() + -3,) + textwrap.dedent(""") + Wrong type passed:) + {) + "Position 1 (nblocks)": "JSON value of type string is not of expected type number",) + "Position 2 (height)": "JSON value of type array is not of expected type number") + }) + """).strip(),) + lambda: self.nodes[0].getnetworkhashps("a", []),) + )) + assert_raises_rpc_error() + -8,) + "Block does not exist at specified height",) + lambda: self.nodes[0].getnetworkhashps(100, self.nodes[0].getblockcount() + 1),) + )) + assert_raises_rpc_error() + -8,) + "Block does not exist at specified height",) + lambda: self.nodes[0].getnetworkhashps(100, -10),) + )) + assert_raises_rpc_error() + -8,) + "Invalid nblocks. Must be a positive number or -1.",) + lambda: self.nodes[0].getnetworkhashps(-100),) + )) + assert_raises_rpc_error() + -8,) + "Invalid nblocks. Must be a positive number or -1.",) + lambda: self.nodes[0].getnetworkhashps(0),) + )) +) + # Genesis block height estimate should return 0) + hashes_per_second = self.nodes[0].getnetworkhashps(100, 0)) + assert_equal(hashes_per_second, 0)) +) + # This should be 2 hashes every 10 minutes or 1/300) + hashes_per_second = self.nodes[0].getnetworkhashps()) + assert abs(hashes_per_second * 300 - 1) < 0.0001) +) + # Test setting the first param of getnetworkhashps to -1 returns the average network) + # hashes per second from the last difficulty change.) + current_block_height = self.nodes[0].getmininginfo()['blocks']) + blocks_since_last_diff_change = current_block_height % DIFFICULTY_ADJUSTMENT_INTERVAL + 1) + expected_hashes_per_second_since_diff_change = self.nodes[0].getnetworkhashps(blocks_since_last_diff_change)) +) + assert_equal(self.nodes[0].getnetworkhashps(-1), expected_hashes_per_second_since_diff_change)) +) + # Ensure long lookups get truncated to chain length) + hashes_per_second = self.nodes[0].getnetworkhashps(self.nodes[0].getblockcount() + 1000)) + assert hashes_per_second > 0.003) +) + def _test_stopatheight(self):) + self.log.info("Test stopping at height")) + assert_equal(self.nodes[0].getblockcount(), HEIGHT)) + self.generate(self.wallet, 6)) + assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6)) + self.log.debug('Node should not stop at this height')) + assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))) + try:) + self.generatetoaddress(self.nodes[0], 1, self.wallet.get_address(), sync_fun=self.no_op)) + except (ConnectionError, http.client.BadStatusLine):) + pass # The node already shut down before response) + self.log.debug('Node should stop at this height...')) + self.nodes[0].wait_until_stopped()) + self.start_node(0)) + assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7)) +) + def _test_waitforblock(self):) + self.log.info("Test waitforblock and waitfornewblock")) + node = self.nodes[0]) +) + current_height = node.getblock(node.getbestblockhash())['height']) + current_hash = node.getblock(node.getbestblockhash())['hash']) +) + self.log.debug("Roll the chain back a few blocks and then reconsider it")) + rollback_height = current_height - 100) + rollback_hash = node.getblockhash(rollback_height)) + rollback_header = node.getblockheader(rollback_hash)) +) + node.invalidateblock(rollback_hash)) + assert_equal(node.getblockcount(), rollback_height - 1)) +) + self.log.debug("waitforblock should return the same block after its timeout")) + assert_equal(node.waitforblock(blockhash=current_hash, timeout=1)['hash'], rollback_header['previousblockhash'])) +) + node.reconsiderblock(rollback_hash)) + # The chain has probably already been restored by the time reconsiderblock returns,) + # but poll anyway.) + self.wait_until(lambda: node.waitforblock(blockhash=current_hash, timeout=100)['hash'] == current_hash)) +) + # roll back again) + node.invalidateblock(rollback_hash)) + assert_equal(node.getblockcount(), rollback_height - 1)) +) + node.reconsiderblock(rollback_hash)) + # The chain has probably already been restored by the time reconsiderblock returns,) + # but poll anyway.) + self.wait_until(lambda: node.waitfornewblock(timeout=100)['hash'] == current_hash)) +) + def _test_waitforblockheight(self):) + self.log.info("Test waitforblockheight")) + node = self.nodes[0]) + peer = node.add_p2p_connection(P2PInterface())) +) + current_height = node.getblock(node.getbestblockhash())['height']) +) + # Create a fork somewhere below our current height, invalidate the tip) + # of that fork, and then ensure that waitforblockheight still) + # works as expected.) + #) + # (Previously this was broken based on setting) + # `rpc/blockchain.cpp:latestblock` incorrectly.)) + #) + fork_height = current_height - 100 # choose something vaguely near our tip) + fork_hash = node.getblockhash(fork_height)) + fork_block = node.getblock(fork_hash)) +) + def solve_and_send_block(prevhash, height, time):) + b = create_block(prevhash, create_coinbase(height), time)) + b.solve()) + peer.send_and_ping(msg_block(b))) + return b) +) + b1 = solve_and_send_block(int(fork_hash, 16), fork_height+1, fork_block['time'] + 1)) + b2 = solve_and_send_block(b1.sha256, fork_height+2, b1.nTime + 1)) +) + node.invalidateblock(b2.hash)) +) + def assert_waitforheight(height, timeout=2):) + assert_equal() + node.waitforblockheight(height=height, timeout=timeout)['height'],) + current_height)) +) + assert_waitforheight(0)) + assert_waitforheight(current_height - 1)) + assert_waitforheight(current_height)) + assert_waitforheight(current_height + 1)) +) + def _test_getblock(self):) + node = self.nodes[0]) + fee_per_byte = Decimal('0.00000010')) + fee_per_kb = 1000 * fee_per_byte) +) + self.wallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)) + blockhash = self.generate(node, 1)[0]) +) + def assert_hexblock_hashes(verbosity):) + block = node.getblock(blockhash, verbosity)) + assert_equal(blockhash, hash256(bytes.fromhex(block[:160]))[::-1].hex())) +) + def assert_fee_not_in_block(hash, verbosity):) + block = node.getblock(hash, verbosity)) + assert 'fee' not in block['tx'][1]) +) + def assert_fee_in_block(hash, verbosity):) + block = node.getblock(hash, verbosity)) + tx = block['tx'][1]) + assert 'fee' in tx) + assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)) +) + def assert_vin_contains_prevout(verbosity):) + block = node.getblock(blockhash, verbosity)) + tx = block["tx"][1]) + total_vin = Decimal("0.00000000")) + total_vout = Decimal("0.00000000")) + for vin in tx["vin"]:) + assert "prevout" in vin) + assert_equal(set(vin["prevout"].keys()), set(("value", "height", "generated", "scriptPubKey")))) + assert_equal(vin["prevout"]["generated"], True)) + total_vin += vin["prevout"]["value"]) + for vout in tx["vout"]:) + total_vout += vout["value"]) + assert_equal(total_vin, total_vout + tx["fee"])) +) + def assert_vin_does_not_contain_prevout(hash, verbosity):) + block = node.getblock(hash, verbosity)) + tx = block["tx"][1]) + if isinstance(tx, str):) + # In verbosity level 1, only the transaction hashes are written) + pass) + else:) + for vin in tx["vin"]:) + assert "prevout" not in vin) +) + self.log.info("Test that getblock with verbosity 0 hashes to expected value")) + assert_hexblock_hashes(0)) + assert_hexblock_hashes(False)) +) + self.log.info("Test that getblock with verbosity 1 doesn't include fee")) + assert_fee_not_in_block(blockhash, 1)) + assert_fee_not_in_block(blockhash, True)) +) + self.log.info('Test that getblock with verbosity 2 and 3 includes expected fee')) + assert_fee_in_block(blockhash, 2)) + assert_fee_in_block(blockhash, 3)) +) + self.log.info("Test that getblock with verbosity 1 and 2 does not include prevout")) + assert_vin_does_not_contain_prevout(blockhash, 1)) + assert_vin_does_not_contain_prevout(blockhash, 2)) +) + self.log.info("Test that getblock with verbosity 3 includes prevout")) + assert_vin_contains_prevout(3)) +) + self.log.info("Test getblock with invalid verbosity type returns proper error message")) + assert_raises_rpc_error(-3, "JSON value of type string is not of expected type number", node.getblock, blockhash, "2")) +) + self.log.info("Test that getblock doesn't work with deleted Undo data")) +) + def move_block_file(old, new):) + old_path = self.nodes[0].blocks_path / old) + new_path = self.nodes[0].blocks_path / new) + old_path.rename(new_path)) +) + # Move instead of deleting so we can restore chain state afterwards) + move_block_file('rev00000.dat', 'rev_wrong')) +) + assert_raises_rpc_error(-32603, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.", lambda: node.getblock(blockhash, 2))) + assert_raises_rpc_error(-32603, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event.", lambda: node.getblock(blockhash, 3))) +) + # Restore chain state) + move_block_file('rev_wrong', 'rev00000.dat')) +) + assert 'previousblockhash' not in node.getblock(node.getblockhash(0))) + assert 'nextblockhash' not in node.getblock(node.getbestblockhash())) +) + self.log.info("Test getblock when only header is known")) + current_height = node.getblock(node.getbestblockhash())['height']) + block_time = node.getblock(node.getbestblockhash())['time'] + 1) + block = create_block(int(blockhash, 16), create_coinbase(current_height + 1, nValue=100), block_time)) + block.solve()) + node.submitheader(block.serialize().hex())) + assert_raises_rpc_error(-1, "Block not available (not fully downloaded)", lambda: node.getblock(block.hash))) +) + self.log.info("Test getblock when block data is available but undo data isn't")) + # Submits a block building on the header-only block, so it can't be connected and has no undo data) + tx = create_tx_with_script(block.vtx[0], 0, script_sig=bytes([OP_TRUE]), amount=50 * COIN)) + block_noundo = create_block(block.sha256, create_coinbase(current_height + 2, nValue=100), block_time + 1, txlist=[tx])) + block_noundo.solve()) + node.submitblock(block_noundo.serialize().hex())) +) + assert_fee_not_in_block(block_noundo.hash, 2)) + assert_fee_not_in_block(block_noundo.hash, 3)) + assert_vin_does_not_contain_prevout(block_noundo.hash, 2)) + assert_vin_does_not_contain_prevout(block_noundo.hash, 3)) +) + self.log.info("Test getblock when block is missing")) + move_block_file('blk00000.dat', 'blk00000.dat.bak')) + assert_raises_rpc_error(-1, "Block not found on disk", node.getblock, blockhash)) + move_block_file('blk00000.dat.bak', 'blk00000.dat')) +) +) +if __name__ == '__main__':) + BlockchainTest(__file__).main()) diff --git a/test/functional/rpc_orphans.py b/test/functional/rpc_orphans.py index 94279914ea978b..8596b13f25df61 100755 --- a/test/functional/rpc_orphans.py +++ b/test/functional/rpc_orphans.py @@ -1,154 +1,154 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2024 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Tests for orphan related RPCs.""" - -import time - -from test_framework.mempool_util import ( - ORPHAN_TX_EXPIRE_TIME, - tx_in_orphanage, -) -from test_framework.messages import msg_tx -from test_framework.p2p import P2PInterface -from test_framework.util import ( - assert_equal, - assert_not_equal, - assert_raises_rpc_error, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.wallet import MiniWallet - - -class OrphanRPCsTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - - def run_test(self): - self.wallet = MiniWallet(self.nodes[0]) - self.test_orphan_activity() - self.test_orphan_details() - self.test_misc() - - def test_orphan_activity(self): - self.log.info("Check that orphaned transactions are returned with getorphantxs") - node = self.nodes[0] - - self.log.info("Create two 1P1C packages, but only broadcast the children") - tx_parent_1 = self.wallet.create_self_transfer() - tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"]) - tx_parent_2 = self.wallet.create_self_transfer() - tx_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_2["new_utxo"]) - peer = node.add_p2p_connection(P2PInterface()) - peer.send_and_ping(msg_tx(tx_child_1["tx"])) - peer.send_and_ping(msg_tx(tx_child_2["tx"])) - - self.log.info("Check that neither parent is in the mempool") - assert_equal(node.getmempoolinfo()["size"], 0) - - orphanage = node.getorphantxs(verbosity=0) - self.log.info("Check the size of the orphanage") - assert_equal(len(orphanage), 2) - self.log.info("Check that undefined verbosity is disallowed") - assert_raises_rpc_error(-8, "Invalid verbosity value -1", node.getorphantxs, verbosity=-1) - assert_raises_rpc_error(-8, "Invalid verbosity value 3", node.getorphantxs, verbosity=3) - self.log.info("Check that both children are in the orphanage") - assert tx_in_orphanage(node, tx_child_1["tx"]) - assert tx_in_orphanage(node, tx_child_2["tx"]) - - self.log.info("Broadcast parent 1") - peer.send_and_ping(msg_tx(tx_parent_1["tx"])) - self.log.info("Check that parent 1 and child 1 are in the mempool") - raw_mempool = node.getrawmempool() - assert_equal(len(raw_mempool), 2) - assert tx_parent_1["txid"] in raw_mempool - assert tx_child_1["txid"] in raw_mempool - - self.log.info("Check that orphanage only contains child 2") - orphanage = node.getorphantxs() - assert_equal(len(orphanage), 1) - assert tx_in_orphanage(node, tx_child_2["tx"]) - - peer.send_and_ping(msg_tx(tx_parent_2["tx"])) - self.log.info("Check that all parents and children are now in the mempool") - raw_mempool = node.getrawmempool() - assert_equal(len(raw_mempool), 4) - assert tx_parent_1["txid"] in raw_mempool - assert tx_child_1["txid"] in raw_mempool - assert tx_parent_2["txid"] in raw_mempool - assert tx_child_2["txid"] in raw_mempool - self.log.info("Check that the orphanage is empty") - assert_equal(len(node.getorphantxs()), 0) - - self.log.info("Confirm the transactions (clears mempool)") - self.generate(node, 1) - assert_equal(node.getmempoolinfo()["size"], 0) - - def test_orphan_details(self): - self.log.info("Check the transaction details returned from getorphantxs") - node = self.nodes[0] - - self.log.info("Create two orphans, from different peers") - tx_parent_1 = self.wallet.create_self_transfer() - tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"]) - tx_parent_2 = self.wallet.create_self_transfer() - tx_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_2["new_utxo"]) - peer_1 = node.add_p2p_connection(P2PInterface()) - peer_2 = node.add_p2p_connection(P2PInterface()) - entry_time = int(time.time()) - node.setmocktime(entry_time) - peer_1.send_and_ping(msg_tx(tx_child_1["tx"])) - peer_2.send_and_ping(msg_tx(tx_child_2["tx"])) - - orphanage = node.getorphantxs(verbosity=2) - assert tx_in_orphanage(node, tx_child_1["tx"]) - assert tx_in_orphanage(node, tx_child_2["tx"]) - - self.log.info("Check that orphan 1 and 2 were from different peers") - assert orphanage[0]["from"][0] != orphanage[1]["from"][0] - - self.log.info("Unorphan child 2") - peer_2.send_and_ping(msg_tx(tx_parent_2["tx"])) - assert not tx_in_orphanage(node, tx_child_2["tx"]) - - self.log.info("Checking orphan details") - orphanage = node.getorphantxs(verbosity=1) - assert_equal(len(node.getorphantxs()), 1) - orphan_1 = orphanage[0] - self.orphan_details_match(orphan_1, tx_child_1, verbosity=1) - self.log.info("Checking orphan entry/expiration times") - assert_equal(orphan_1["entry"], entry_time) - assert_equal(orphan_1["expiration"], entry_time + ORPHAN_TX_EXPIRE_TIME) - - self.log.info("Checking orphan details (verbosity 2)") - orphanage = node.getorphantxs(verbosity=2) - orphan_1 = orphanage[0] - self.orphan_details_match(orphan_1, tx_child_1, verbosity=2) - - def orphan_details_match(self, orphan, tx, verbosity): - self.log.info("Check txid/wtxid of orphan") - assert_equal(orphan["txid"], tx["txid"]) - assert_equal(orphan["wtxid"], tx["wtxid"]) - - self.log.info("Check the sizes of orphan") - assert_equal(orphan["bytes"], len(tx["tx"].serialize())) - assert_equal(orphan["vsize"], tx["tx"].get_vsize()) - assert_equal(orphan["weight"], tx["tx"].get_weight()) - - if verbosity == 2: - self.log.info("Check the transaction hex of orphan") - assert_equal(orphan["hex"], tx["hex"]) - - def test_misc(self): - node = self.nodes[0] - assert_raises_rpc_error(-3, "Verbosity was boolean but only integer allowed", node.getorphantxs, verbosity=True) - assert_raises_rpc_error(-3, "Verbosity was boolean but only integer allowed", node.getorphantxs, verbosity=False) - help_output = node.help() - self.log.info("Check that getorphantxs is a hidden RPC") - assert "getorphantxs" not in help_output - assert "unknown command: getorphantxs" not in node.help("getorphantxs") - - -if __name__ == '__main__': - OrphanRPCsTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2014-2024 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Tests for orphan related RPCs.""") +) +import time) +) +from test_framework.mempool_util import () + ORPHAN_TX_EXPIRE_TIME,) + tx_in_orphanage,) +)) +from test_framework.messages import msg_tx) +from test_framework.p2p import P2PInterface) +from test_framework.util import () + assert_equal,) + assert_not_equal,) + assert_raises_rpc_error,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.wallet import MiniWallet) +) +) +class OrphanRPCsTest(BitcoinTestFramework):) + def set_test_params(self):) + self.num_nodes = 1) +) + def run_test(self):) + self.wallet = MiniWallet(self.nodes[0])) + self.test_orphan_activity()) + self.test_orphan_details()) + self.test_misc()) +) + def test_orphan_activity(self):) + self.log.info("Check that orphaned transactions are returned with getorphantxs")) + node = self.nodes[0]) +) + self.log.info("Create two 1P1C packages, but only broadcast the children")) + tx_parent_1 = self.wallet.create_self_transfer()) + tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"])) + tx_parent_2 = self.wallet.create_self_transfer()) + tx_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_2["new_utxo"])) + peer = node.add_p2p_connection(P2PInterface())) + peer.send_and_ping(msg_tx(tx_child_1["tx"]))) + peer.send_and_ping(msg_tx(tx_child_2["tx"]))) +) + self.log.info("Check that neither parent is in the mempool")) + assert_equal(node.getmempoolinfo()["size"], 0)) +) + orphanage = node.getorphantxs(verbosity=0)) + self.log.info("Check the size of the orphanage")) + assert_equal(len(orphanage), 2)) + self.log.info("Check that undefined verbosity is disallowed")) + assert_raises_rpc_error(-8, "Invalid verbosity value -1", node.getorphantxs, verbosity=-1)) + assert_raises_rpc_error(-8, "Invalid verbosity value 3", node.getorphantxs, verbosity=3)) + self.log.info("Check that both children are in the orphanage")) + assert tx_in_orphanage(node, tx_child_1["tx"])) + assert tx_in_orphanage(node, tx_child_2["tx"])) +) + self.log.info("Broadcast parent 1")) + peer.send_and_ping(msg_tx(tx_parent_1["tx"]))) + self.log.info("Check that parent 1 and child 1 are in the mempool")) + raw_mempool = node.getrawmempool()) + assert_equal(len(raw_mempool), 2)) + assert tx_parent_1["txid"] in raw_mempool) + assert tx_child_1["txid"] in raw_mempool) +) + self.log.info("Check that orphanage only contains child 2")) + orphanage = node.getorphantxs()) + assert_equal(len(orphanage), 1)) + assert tx_in_orphanage(node, tx_child_2["tx"])) +) + peer.send_and_ping(msg_tx(tx_parent_2["tx"]))) + self.log.info("Check that all parents and children are now in the mempool")) + raw_mempool = node.getrawmempool()) + assert_equal(len(raw_mempool), 4)) + assert tx_parent_1["txid"] in raw_mempool) + assert tx_child_1["txid"] in raw_mempool) + assert tx_parent_2["txid"] in raw_mempool) + assert tx_child_2["txid"] in raw_mempool) + self.log.info("Check that the orphanage is empty")) + assert_equal(len(node.getorphantxs()), 0)) +) + self.log.info("Confirm the transactions (clears mempool)")) + self.generate(node, 1)) + assert_equal(node.getmempoolinfo()["size"], 0)) +) + def test_orphan_details(self):) + self.log.info("Check the transaction details returned from getorphantxs")) + node = self.nodes[0]) +) + self.log.info("Create two orphans, from different peers")) + tx_parent_1 = self.wallet.create_self_transfer()) + tx_child_1 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_1["new_utxo"])) + tx_parent_2 = self.wallet.create_self_transfer()) + tx_child_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_parent_2["new_utxo"])) + peer_1 = node.add_p2p_connection(P2PInterface())) + peer_2 = node.add_p2p_connection(P2PInterface())) + entry_time = int(time.time())) + node.setmocktime(entry_time)) + peer_1.send_and_ping(msg_tx(tx_child_1["tx"]))) + peer_2.send_and_ping(msg_tx(tx_child_2["tx"]))) +) + orphanage = node.getorphantxs(verbosity=2)) + assert tx_in_orphanage(node, tx_child_1["tx"])) + assert tx_in_orphanage(node, tx_child_2["tx"])) +) + self.log.info("Check that orphan 1 and 2 were from different peers")) + assert_not_equal(orphanage[0]["from"][0], orphanage[1]["from"][0])) +) + self.log.info("Unorphan child 2")) + peer_2.send_and_ping(msg_tx(tx_parent_2["tx"]))) + assert not tx_in_orphanage(node, tx_child_2["tx"])) +) + self.log.info("Checking orphan details")) + orphanage = node.getorphantxs(verbosity=1)) + assert_equal(len(node.getorphantxs()), 1)) + orphan_1 = orphanage[0]) + self.orphan_details_match(orphan_1, tx_child_1, verbosity=1)) + self.log.info("Checking orphan entry/expiration times")) + assert_equal(orphan_1["entry"], entry_time)) + assert_equal(orphan_1["expiration"], entry_time + ORPHAN_TX_EXPIRE_TIME)) +) + self.log.info("Checking orphan details (verbosity 2)")) + orphanage = node.getorphantxs(verbosity=2)) + orphan_1 = orphanage[0]) + self.orphan_details_match(orphan_1, tx_child_1, verbosity=2)) +) + def orphan_details_match(self, orphan, tx, verbosity):) + self.log.info("Check txid/wtxid of orphan")) + assert_equal(orphan["txid"], tx["txid"])) + assert_equal(orphan["wtxid"], tx["wtxid"])) +) + self.log.info("Check the sizes of orphan")) + assert_equal(orphan["bytes"], len(tx["tx"].serialize()))) + assert_equal(orphan["vsize"], tx["tx"].get_vsize())) + assert_equal(orphan["weight"], tx["tx"].get_weight())) +) + if verbosity == 2:) + self.log.info("Check the transaction hex of orphan")) + assert_equal(orphan["hex"], tx["hex"])) +) + def test_misc(self):) + node = self.nodes[0]) + assert_raises_rpc_error(-3, "Verbosity was boolean but only integer allowed", node.getorphantxs, verbosity=True)) + assert_raises_rpc_error(-3, "Verbosity was boolean but only integer allowed", node.getorphantxs, verbosity=False)) + help_output = node.help()) + self.log.info("Check that getorphantxs is a hidden RPC")) + assert "getorphantxs" not in help_output) + assert "unknown command: getorphantxs" not in node.help("getorphantxs")) +) +) +if __name__ == '__main__':) + OrphanRPCsTest(__file__).main()) diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py index 20a0f6e4b095c4..cbec02bbcd6009 100755 --- a/test/functional/rpc_preciousblock.py +++ b/test/functional/rpc_preciousblock.py @@ -1,113 +1,113 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015-2021 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the preciousblock RPC.""" - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, +#!/usr/bin/env python3) +# Copyright (c) 2015-2021 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the preciousblock RPC.""") ) - -def unidirectional_node_sync_via_rpc(node_src, node_dest): - blocks_to_copy = [] - blockhash = node_src.getbestblockhash() - while True: - try: - assert len(node_dest.getblock(blockhash, False)) > 0 - break - except Exception: - blocks_to_copy.append(blockhash) - blockhash = node_src.getblockheader(blockhash, True)['previousblockhash'] - blocks_to_copy.reverse() - for blockhash in blocks_to_copy: - blockdata = node_src.getblock(blockhash, False) - assert node_dest.submitblock(blockdata) in (None, 'inconclusive') - -def node_sync_via_rpc(nodes): - for node_src in nodes: - for node_dest in nodes: - if node_src is node_dest: - continue - unidirectional_node_sync_via_rpc(node_src, node_dest) - -class PreciousTest(BitcoinTestFramework): - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 3 - self.supports_cli = False - - def setup_network(self): - self.setup_nodes() - - def run_test(self): - self.log.info("Ensure submitblock can in principle reorg to a competing chain") - self.generate(self.nodes[0], 1, sync_fun=self.no_op) - assert_equal(self.nodes[0].getblockcount(), 1) - hashZ = self.generate(self.nodes[1], 2, sync_fun=self.no_op)[-1] - assert_equal(self.nodes[1].getblockcount(), 2) - node_sync_via_rpc(self.nodes[0:3]) - assert_equal(self.nodes[0].getbestblockhash(), hashZ) - - self.log.info("Mine blocks A-B-C on Node 0") - hashC = self.generate(self.nodes[0], 3, sync_fun=self.no_op)[-1] - assert_equal(self.nodes[0].getblockcount(), 5) - self.log.info("Mine competing blocks E-F-G on Node 1") - hashG = self.generate(self.nodes[1], 3, sync_fun=self.no_op)[-1] - assert_equal(self.nodes[1].getblockcount(), 5) - assert hashC != hashG - self.log.info("Connect nodes and check no reorg occurs") - # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync) - node_sync_via_rpc(self.nodes[0:2]) - self.connect_nodes(0, 1) - assert_equal(self.nodes[0].getbestblockhash(), hashC) - assert_equal(self.nodes[1].getbestblockhash(), hashG) - self.log.info("Make Node0 prefer block G") - self.nodes[0].preciousblock(hashG) - assert_equal(self.nodes[0].getbestblockhash(), hashG) - self.log.info("Make Node0 prefer block C again") - self.nodes[0].preciousblock(hashC) - assert_equal(self.nodes[0].getbestblockhash(), hashC) - self.log.info("Make Node1 prefer block C") - self.nodes[1].preciousblock(hashC) - self.sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC - assert_equal(self.nodes[1].getbestblockhash(), hashC) - self.log.info("Make Node1 prefer block G again") - self.nodes[1].preciousblock(hashG) - assert_equal(self.nodes[1].getbestblockhash(), hashG) - self.log.info("Make Node0 prefer block G again") - self.nodes[0].preciousblock(hashG) - assert_equal(self.nodes[0].getbestblockhash(), hashG) - self.log.info("Make Node1 prefer block C again") - self.nodes[1].preciousblock(hashC) - assert_equal(self.nodes[1].getbestblockhash(), hashC) - self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1") - self.generate(self.nodes[0], 1, sync_fun=self.no_op) - assert_equal(self.nodes[0].getblockcount(), 6) - self.sync_blocks(self.nodes[0:2]) - hashH = self.nodes[0].getbestblockhash() - assert_equal(self.nodes[1].getbestblockhash(), hashH) - self.log.info("Node1 should not be able to prefer block C anymore") - self.nodes[1].preciousblock(hashC) - assert_equal(self.nodes[1].getbestblockhash(), hashH) - self.log.info("Mine competing blocks I-J-K-L on Node 2") - self.generate(self.nodes[2], 4, sync_fun=self.no_op) - assert_equal(self.nodes[2].getblockcount(), 6) - hashL = self.nodes[2].getbestblockhash() - self.log.info("Connect nodes and check no reorg occurs") - node_sync_via_rpc(self.nodes[1:3]) - self.connect_nodes(1, 2) - self.connect_nodes(0, 2) - assert_equal(self.nodes[0].getbestblockhash(), hashH) - assert_equal(self.nodes[1].getbestblockhash(), hashH) - assert_equal(self.nodes[2].getbestblockhash(), hashL) - self.log.info("Make Node1 prefer block L") - self.nodes[1].preciousblock(hashL) - assert_equal(self.nodes[1].getbestblockhash(), hashL) - self.log.info("Make Node2 prefer block H") - self.nodes[2].preciousblock(hashH) - assert_equal(self.nodes[2].getbestblockhash(), hashH) - -if __name__ == '__main__': - PreciousTest(__file__).main() +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) +)) +) +def unidirectional_node_sync_via_rpc(node_src, node_dest):) + blocks_to_copy = []) + blockhash = node_src.getbestblockhash()) + while True:) + try:) + assert len(node_dest.getblock(blockhash, False)) > 0) + break) + except Exception:) + blocks_to_copy.append(blockhash)) + blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']) + blocks_to_copy.reverse()) + for blockhash in blocks_to_copy:) + blockdata = node_src.getblock(blockhash, False)) + assert node_dest.submitblock(blockdata) in (None, 'inconclusive')) +) +def node_sync_via_rpc(nodes):) + for node_src in nodes:) + for node_dest in nodes:) + if node_src is node_dest:) + continue) + unidirectional_node_sync_via_rpc(node_src, node_dest)) +) +class PreciousTest(BitcoinTestFramework):) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 3) + self.supports_cli = False) +) + def setup_network(self):) + self.setup_nodes()) +) + def run_test(self):) + self.log.info("Ensure submitblock can in principle reorg to a competing chain")) + self.generate(self.nodes[0], 1, sync_fun=self.no_op)) + assert_equal(self.nodes[0].getblockcount(), 1)) + hashZ = self.generate(self.nodes[1], 2, sync_fun=self.no_op)[-1]) + assert_equal(self.nodes[1].getblockcount(), 2)) + node_sync_via_rpc(self.nodes[0:3])) + assert_equal(self.nodes[0].getbestblockhash(), hashZ)) +) + self.log.info("Mine blocks A-B-C on Node 0")) + hashC = self.generate(self.nodes[0], 3, sync_fun=self.no_op)[-1]) + assert_equal(self.nodes[0].getblockcount(), 5)) + self.log.info("Mine competing blocks E-F-G on Node 1")) + hashG = self.generate(self.nodes[1], 3, sync_fun=self.no_op)[-1]) + assert_equal(self.nodes[1].getblockcount(), 5)) + assert_not_equal(hashC, hashG)) + self.log.info("Connect nodes and check no reorg occurs")) + # Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)) + node_sync_via_rpc(self.nodes[0:2])) + self.connect_nodes(0, 1)) + assert_equal(self.nodes[0].getbestblockhash(), hashC)) + assert_equal(self.nodes[1].getbestblockhash(), hashG)) + self.log.info("Make Node0 prefer block G")) + self.nodes[0].preciousblock(hashG)) + assert_equal(self.nodes[0].getbestblockhash(), hashG)) + self.log.info("Make Node0 prefer block C again")) + self.nodes[0].preciousblock(hashC)) + assert_equal(self.nodes[0].getbestblockhash(), hashC)) + self.log.info("Make Node1 prefer block C")) + self.nodes[1].preciousblock(hashC)) + self.sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC) + assert_equal(self.nodes[1].getbestblockhash(), hashC)) + self.log.info("Make Node1 prefer block G again")) + self.nodes[1].preciousblock(hashG)) + assert_equal(self.nodes[1].getbestblockhash(), hashG)) + self.log.info("Make Node0 prefer block G again")) + self.nodes[0].preciousblock(hashG)) + assert_equal(self.nodes[0].getbestblockhash(), hashG)) + self.log.info("Make Node1 prefer block C again")) + self.nodes[1].preciousblock(hashC)) + assert_equal(self.nodes[1].getbestblockhash(), hashC)) + self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")) + self.generate(self.nodes[0], 1, sync_fun=self.no_op)) + assert_equal(self.nodes[0].getblockcount(), 6)) + self.sync_blocks(self.nodes[0:2])) + hashH = self.nodes[0].getbestblockhash()) + assert_equal(self.nodes[1].getbestblockhash(), hashH)) + self.log.info("Node1 should not be able to prefer block C anymore")) + self.nodes[1].preciousblock(hashC)) + assert_equal(self.nodes[1].getbestblockhash(), hashH)) + self.log.info("Mine competing blocks I-J-K-L on Node 2")) + self.generate(self.nodes[2], 4, sync_fun=self.no_op)) + assert_equal(self.nodes[2].getblockcount(), 6)) + hashL = self.nodes[2].getbestblockhash()) + self.log.info("Connect nodes and check no reorg occurs")) + node_sync_via_rpc(self.nodes[1:3])) + self.connect_nodes(1, 2)) + self.connect_nodes(0, 2)) + assert_equal(self.nodes[0].getbestblockhash(), hashH)) + assert_equal(self.nodes[1].getbestblockhash(), hashH)) + assert_equal(self.nodes[2].getbestblockhash(), hashL)) + self.log.info("Make Node1 prefer block L")) + self.nodes[1].preciousblock(hashL)) + assert_equal(self.nodes[1].getbestblockhash(), hashL)) + self.log.info("Make Node2 prefer block H")) + self.nodes[2].preciousblock(hashH)) + assert_equal(self.nodes[2].getbestblockhash(), hashH)) +) +if __name__ == '__main__':) + PreciousTest(__file__).main()) diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index cb84357f0e7ca1..1599585d7d67f8 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -1,1057 +1,1057 @@ -#!/usr/bin/env python3 -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the Partially Signed Transaction RPCs. -""" -from decimal import Decimal -from itertools import product -from random import randbytes - -from test_framework.blocktools import ( - MAX_STANDARD_TX_WEIGHT, -) -from test_framework.descriptors import descsum_create -from test_framework.key import H_POINT -from test_framework.messages import ( - COutPoint, - CTransaction, - CTxIn, - CTxOut, - MAX_BIP125_RBF_SEQUENCE, - WITNESS_SCALE_FACTOR, -) -from test_framework.psbt import ( - PSBT, - PSBTMap, - PSBT_GLOBAL_UNSIGNED_TX, - PSBT_IN_RIPEMD160, - PSBT_IN_SHA256, - PSBT_IN_HASH160, - PSBT_IN_HASH256, - PSBT_IN_NON_WITNESS_UTXO, - PSBT_IN_WITNESS_UTXO, - PSBT_OUT_TAP_TREE, -) -from test_framework.script import CScript, OP_TRUE -from test_framework.script_util import MIN_STANDARD_TX_NONWITNESS_SIZE -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_approx, - assert_equal, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises_rpc_error, - find_vout_for_address, -) -from test_framework.wallet_util import ( - calculate_input_weight, - generate_keypair, - get_generate_key, -) - -import json -import os - - -class PSBTTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 3 - self.extra_args = [ - ["-walletrbf=1", "-addresstype=bech32", "-changetype=bech32"], #TODO: Remove address type restrictions once taproot has psbt extensions - ["-walletrbf=0", "-changetype=legacy"], - [] - ] - # whitelist peers to speed up tx relay / mempool sync - for args in self.extra_args: - args.append("-whitelist=noban@127.0.0.1") - self.supports_cli = False - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def test_psbt_incomplete_after_invalid_modification(self): - self.log.info("Check that PSBT is correctly marked as incomplete after invalid modification") - node = self.nodes[2] - wallet = node.get_wallet_rpc(self.default_wallet_name) - address = wallet.getnewaddress() - wallet.sendtoaddress(address=address, amount=1.0) - self.generate(node, nblocks=1, sync_fun=lambda: self.sync_all(self.nodes[:2])) - - utxos = wallet.listunspent(addresses=[address]) - psbt = wallet.createpsbt([{"txid": utxos[0]["txid"], "vout": utxos[0]["vout"]}], [{wallet.getnewaddress(): 0.9999}]) - signed_psbt = wallet.walletprocesspsbt(psbt)["psbt"] - - # Modify the raw transaction by changing the output address, so the signature is no longer valid - signed_psbt_obj = PSBT.from_base64(signed_psbt) - substitute_addr = wallet.getnewaddress() - raw = wallet.createrawtransaction([{"txid": utxos[0]["txid"], "vout": utxos[0]["vout"]}], [{substitute_addr: 0.9999}]) - signed_psbt_obj.g.map[PSBT_GLOBAL_UNSIGNED_TX] = bytes.fromhex(raw) - - # Check that the walletprocesspsbt call succeeds but also recognizes that the transaction is not complete - signed_psbt_incomplete = wallet.walletprocesspsbt(signed_psbt_obj.to_base64(), finalize=False) - assert signed_psbt_incomplete["complete"] is False - - def test_utxo_conversion(self): - self.log.info("Check that non-witness UTXOs are removed for segwit v1+ inputs") - mining_node = self.nodes[2] - offline_node = self.nodes[0] - online_node = self.nodes[1] - - # Disconnect offline node from others - # Topology of test network is linear, so this one call is enough - self.disconnect_nodes(0, 1) - - # Create watchonly on online_node - online_node.createwallet(wallet_name='wonline', disable_private_keys=True) - wonline = online_node.get_wallet_rpc('wonline') - w2 = online_node.get_wallet_rpc(self.default_wallet_name) - - # Mine a transaction that credits the offline address - offline_addr = offline_node.getnewaddress(address_type="bech32m") - online_addr = w2.getnewaddress(address_type="bech32m") - wonline.importaddress(offline_addr, "", False) - mining_wallet = mining_node.get_wallet_rpc(self.default_wallet_name) - mining_wallet.sendtoaddress(address=offline_addr, amount=1.0) - self.generate(mining_node, nblocks=1, sync_fun=lambda: self.sync_all([online_node, mining_node])) - - # Construct an unsigned PSBT on the online node - utxos = wonline.listunspent(addresses=[offline_addr]) - raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}]) - psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"] - assert not "not_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0] - - # add non-witness UTXO manually - psbt_new = PSBT.from_base64(psbt) - prev_tx = wonline.gettransaction(utxos[0]["txid"])["hex"] - psbt_new.i[0].map[PSBT_IN_NON_WITNESS_UTXO] = bytes.fromhex(prev_tx) - assert "non_witness_utxo" in mining_node.decodepsbt(psbt_new.to_base64())["inputs"][0] - - # Have the offline node sign the PSBT (which will remove the non-witness UTXO) - signed_psbt = offline_node.walletprocesspsbt(psbt_new.to_base64()) - assert not "non_witness_utxo" in mining_node.decodepsbt(signed_psbt["psbt"])["inputs"][0] - - # Make sure we can mine the resulting transaction - txid = mining_node.sendrawtransaction(signed_psbt["hex"]) - self.generate(mining_node, nblocks=1, sync_fun=lambda: self.sync_all([online_node, mining_node])) - assert_equal(online_node.gettxout(txid,0)["confirmations"], 1) - - wonline.unloadwallet() - - # Reconnect - self.connect_nodes(1, 0) - self.connect_nodes(0, 2) - - def test_input_confs_control(self): - self.nodes[0].createwallet("minconf") - wallet = self.nodes[0].get_wallet_rpc("minconf") - - # Fund the wallet with different chain heights - for _ in range(2): - self.nodes[1].sendmany("", {wallet.getnewaddress():1, wallet.getnewaddress():1}) - self.generate(self.nodes[1], 1) - - unconfirmed_txid = wallet.sendtoaddress(wallet.getnewaddress(), 0.5) - - self.log.info("Crafting PSBT using an unconfirmed input") - target_address = self.nodes[1].getnewaddress() - psbtx1 = wallet.walletcreatefundedpsbt([], {target_address: 0.1}, 0, {'fee_rate': 1, 'maxconf': 0})['psbt'] - - # Make sure we only had the one input - tx1_inputs = self.nodes[0].decodepsbt(psbtx1)['tx']['vin'] - assert_equal(len(tx1_inputs), 1) - - utxo1 = tx1_inputs[0] - assert_equal(unconfirmed_txid, utxo1['txid']) - - signed_tx1 = wallet.walletprocesspsbt(psbtx1) - txid1 = self.nodes[0].sendrawtransaction(signed_tx1['hex']) - - mempool = self.nodes[0].getrawmempool() - assert txid1 in mempool - - self.log.info("Fail to craft a new PSBT that sends more funds with add_inputs = False") - assert_raises_rpc_error(-4, "The preselected coins total amount does not cover the transaction target. Please allow other inputs to be automatically selected or include more coins manually", wallet.walletcreatefundedpsbt, [{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': False}) - - self.log.info("Fail to craft a new PSBT with minconf above highest one") - assert_raises_rpc_error(-4, "Insufficient funds", wallet.walletcreatefundedpsbt, [{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': True, 'minconf': 3, 'fee_rate': 10}) - - self.log.info("Fail to broadcast a new PSBT with maxconf 0 due to BIP125 rules to verify it actually chose unconfirmed outputs") - psbt_invalid = wallet.walletcreatefundedpsbt([{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': True, 'maxconf': 0, 'fee_rate': 10})['psbt'] - signed_invalid = wallet.walletprocesspsbt(psbt_invalid) - assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, signed_invalid['hex']) - - self.log.info("Craft a replacement adding inputs with highest confs possible") - psbtx2 = wallet.walletcreatefundedpsbt([{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': True, 'minconf': 2, 'fee_rate': 10})['psbt'] - tx2_inputs = self.nodes[0].decodepsbt(psbtx2)['tx']['vin'] - assert_greater_than_or_equal(len(tx2_inputs), 2) - for vin in tx2_inputs: - if vin['txid'] != unconfirmed_txid: - assert_greater_than_or_equal(self.nodes[0].gettxout(vin['txid'], vin['vout'])['confirmations'], 2) - - signed_tx2 = wallet.walletprocesspsbt(psbtx2) - txid2 = self.nodes[0].sendrawtransaction(signed_tx2['hex']) - - mempool = self.nodes[0].getrawmempool() - assert txid1 not in mempool - assert txid2 in mempool - - wallet.unloadwallet() - - def assert_change_type(self, psbtx, expected_type): - """Assert that the given PSBT has a change output with the given type.""" - - # The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node - decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"]) - changepos = psbtx["changepos"] - assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type) - - def run_test(self): - # Create and fund a raw tx for sending 10 BTC - psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt'] - - self.log.info("Test for invalid maximum transaction weights") - dest_arg = [{self.nodes[0].getnewaddress(): 1}] - min_tx_weight = MIN_STANDARD_TX_NONWITNESS_SIZE * WITNESS_SCALE_FACTOR - assert_raises_rpc_error(-4, f"Maximum transaction weight must be between {min_tx_weight} and {MAX_STANDARD_TX_WEIGHT}", self.nodes[0].walletcreatefundedpsbt, [], dest_arg, 0, {"max_tx_weight": -1}) - assert_raises_rpc_error(-4, f"Maximum transaction weight must be between {min_tx_weight} and {MAX_STANDARD_TX_WEIGHT}", self.nodes[0].walletcreatefundedpsbt, [], dest_arg, 0, {"max_tx_weight": 0}) - assert_raises_rpc_error(-4, f"Maximum transaction weight must be between {min_tx_weight} and {MAX_STANDARD_TX_WEIGHT}", self.nodes[0].walletcreatefundedpsbt, [], dest_arg, 0, {"max_tx_weight": MAX_STANDARD_TX_WEIGHT + 1}) - - # Base transaction vsize: version (4) + locktime (4) + input count (1) + witness overhead (1) = 10 vbytes - base_tx_vsize = 10 - # One P2WPKH output vsize: outpoint (31 vbytes) - p2wpkh_output_vsize = 31 - # 1 vbyte for output count - output_count = 1 - tx_weight_without_inputs = (base_tx_vsize + output_count + p2wpkh_output_vsize) * WITNESS_SCALE_FACTOR - # min_tx_weight is greater than transaction weight without inputs - assert_greater_than(min_tx_weight, tx_weight_without_inputs) - - # In order to test for when the passed max weight is less than the transaction weight without inputs - # Define destination with two outputs. - dest_arg_large = [{self.nodes[0].getnewaddress(): 1}, {self.nodes[0].getnewaddress(): 1}] - large_tx_vsize_without_inputs = base_tx_vsize + output_count + (p2wpkh_output_vsize * 2) - large_tx_weight_without_inputs = large_tx_vsize_without_inputs * WITNESS_SCALE_FACTOR - assert_greater_than(large_tx_weight_without_inputs, min_tx_weight) - # Test for max_tx_weight less than Transaction weight without inputs - assert_raises_rpc_error(-4, "Maximum transaction weight is less than transaction weight without inputs", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": min_tx_weight}) - assert_raises_rpc_error(-4, "Maximum transaction weight is less than transaction weight without inputs", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": large_tx_weight_without_inputs}) - - # Test for max_tx_weight just enough to include inputs but not change output - assert_raises_rpc_error(-4, "Maximum transaction weight is too low, can not accommodate change output", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": (large_tx_vsize_without_inputs + 1) * WITNESS_SCALE_FACTOR}) - self.log.info("Test that a funded PSBT is always faithful to max_tx_weight option") - large_tx_vsize_with_change = large_tx_vsize_without_inputs + p2wpkh_output_vsize - # It's enough but won't accommodate selected input size - assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": (large_tx_vsize_with_change) * WITNESS_SCALE_FACTOR}) - - max_tx_weight_sufficient = 1000 # 1k vbytes is enough - psbt = self.nodes[0].walletcreatefundedpsbt(outputs=dest_arg,locktime=0, options={"max_tx_weight": max_tx_weight_sufficient})["psbt"] - weight = self.nodes[0].decodepsbt(psbt)["tx"]["weight"] - # ensure the transaction's weight is below the specified max_tx_weight. - assert_greater_than_or_equal(max_tx_weight_sufficient, weight) - - # If inputs are specified, do not automatically add more: - utxo1 = self.nodes[0].listunspent()[0] - assert_raises_rpc_error(-4, "The preselected coins total amount does not cover the transaction target. " - "Please allow other inputs to be automatically selected or include more coins manually", - self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}) - - psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt'] - assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2) - - # Inputs argument can be null - self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10}) - - # Node 1 should not be able to add anything to it but still return the psbtx same as before - psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt'] - assert_equal(psbtx1, psbtx) - - # Node 0 should not be able to sign the transaction with the wallet is locked - self.nodes[0].encryptwallet("password") - assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].walletprocesspsbt, psbtx) - - # Node 0 should be able to process without signing though - unsigned_tx = self.nodes[0].walletprocesspsbt(psbtx, False) - assert_equal(unsigned_tx['complete'], False) - - self.nodes[0].walletpassphrase(passphrase="password", timeout=1000000) - - # Sign the transaction but don't finalize - processed_psbt = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=False) - assert "hex" not in processed_psbt - signed_psbt = processed_psbt['psbt'] - - # Finalize and send - finalized_hex = self.nodes[0].finalizepsbt(signed_psbt)['hex'] - self.nodes[0].sendrawtransaction(finalized_hex) - - # Alternative method: sign AND finalize in one command - processed_finalized_psbt = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=True) - finalized_psbt = processed_finalized_psbt['psbt'] - finalized_psbt_hex = processed_finalized_psbt['hex'] - assert signed_psbt != finalized_psbt - assert finalized_psbt_hex == finalized_hex - - # Manually selected inputs can be locked: - assert_equal(len(self.nodes[0].listlockunspent()), 0) - utxo1 = self.nodes[0].listunspent()[0] - psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"] - assert_equal(len(self.nodes[0].listlockunspent()), 1) - - # Locks are ignored for manually selected inputs - self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0) - - # Create p2sh, p2wpkh, and p2wsh addresses - pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey'] - pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey'] - pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey'] - - # Setup watchonly wallets - self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True) - wmulti = self.nodes[2].get_wallet_rpc('wmulti') - - # Create all the addresses - p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address'] - p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address'] - p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address'] - if not self.options.descriptors: - wmulti.importaddress(p2sh) - wmulti.importaddress(p2wsh) - wmulti.importaddress(p2sh_p2wsh) - p2wpkh = self.nodes[1].getnewaddress("", "bech32") - p2pkh = self.nodes[1].getnewaddress("", "legacy") - p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit") - - # fund those addresses - rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10}) - rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3}) - signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex'] - txid = self.nodes[0].sendrawtransaction(signed_tx) - self.generate(self.nodes[0], 6) - - # Find the output pos - p2sh_pos = -1 - p2wsh_pos = -1 - p2wpkh_pos = -1 - p2pkh_pos = -1 - p2sh_p2wsh_pos = -1 - p2sh_p2wpkh_pos = -1 - decoded = self.nodes[0].decoderawtransaction(signed_tx) - for out in decoded['vout']: - if out['scriptPubKey']['address'] == p2sh: - p2sh_pos = out['n'] - elif out['scriptPubKey']['address'] == p2wsh: - p2wsh_pos = out['n'] - elif out['scriptPubKey']['address'] == p2wpkh: - p2wpkh_pos = out['n'] - elif out['scriptPubKey']['address'] == p2sh_p2wsh: - p2sh_p2wsh_pos = out['n'] - elif out['scriptPubKey']['address'] == p2sh_p2wpkh: - p2sh_p2wpkh_pos = out['n'] - elif out['scriptPubKey']['address'] == p2pkh: - p2pkh_pos = out['n'] - - inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}] - outputs = [{self.nodes[1].getnewaddress(): 29.99}] - - # spend single key from node 1 - created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs) - walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt']) - # Make sure it has both types of UTXOs - decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt']) - assert 'non_witness_utxo' in decoded['inputs'][0] - assert 'witness_utxo' in decoded['inputs'][0] - # Check decodepsbt fee calculation (input values shall only be counted once per UTXO) - assert_equal(decoded['fee'], created_psbt['fee']) - assert_equal(walletprocesspsbt_out['complete'], True) - self.nodes[1].sendrawtransaction(walletprocesspsbt_out['hex']) - - self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 BTC/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)") - res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True}) - assert_approx(res1["fee"], 0.055, 0.005) - res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True}) - assert_approx(res2["fee"], 0.055, 0.005) - - self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed") - res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True}) - assert_approx(res3["fee"], 0.00000381, 0.0000001) - res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True}) - assert_approx(res4["fee"], 0.00000381, 0.0000001) - - self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid") - for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]): - assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"]) - - self.log.info("Test invalid fee rate settings") - for param, value in {("fee_rate", 100000), ("feeRate", 1)}: - assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True}) - assert_raises_rpc_error(-3, "Amount out of range", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True}) - assert_raises_rpc_error(-3, "Amount is not a number or string", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True}) - # Test fee rate values that don't pass fixed-point parsing checks. - for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]: - assert_raises_rpc_error(-3, "Invalid amount", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True}) - # Test fee_rate values that cannot be represented in sat/vB. - for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999]: - assert_raises_rpc_error(-3, "Invalid amount", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True}) - - self.log.info("- raises RPC error if both feeRate and fee_rate are passed") - assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True}) - - self.log.info("- raises RPC error if both feeRate and estimate_mode passed") - assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True}) - - for param in ["feeRate", "fee_rate"]: - self.log.info("- raises RPC error if both {} and conf_target are passed".format(param)) - assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation " - "target in blocks for automatic fee estimation, or an explicit fee rate.".format(param), - self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True}) - - self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed") - assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate", - self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True}) - - self.log.info("- raises RPC error with invalid estimate_mode settings") - for k, v in {"number": 42, "object": {"foo": "bar"}}.items(): - assert_raises_rpc_error(-3, f"JSON value of type {k} for field estimate_mode is not of expected type string", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True}) - for mode in ["", "foo", Decimal("3.141592")]: - assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"', - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True}) - - self.log.info("- raises RPC error with invalid conf_target settings") - for mode in ["unset", "economical", "conservative"]: - self.log.debug("{}".format(mode)) - for k, v in {"string": "", "object": {"foo": "bar"}}.items(): - assert_raises_rpc_error(-3, f"JSON value of type {k} for field conf_target is not of expected type number", - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True}) - for n in [-1, 0, 1009]: - assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h - self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True}) - - self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error") - # previously this was silently capped at -maxtxfee - for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items(): - msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)" - assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add}) - assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add}) - - self.log.info("Test various PSBT operations") - # partially sign multisig things with node 1 - psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, changeAddress=self.nodes[1].getrawchangeaddress())['psbt'] - walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx) - psbtx = walletprocesspsbt_out['psbt'] - assert_equal(walletprocesspsbt_out['complete'], False) - - # Unload wmulti, we don't need it anymore - wmulti.unloadwallet() - - # partially sign with node 2. This should be complete and sendable - walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx) - assert_equal(walletprocesspsbt_out['complete'], True) - self.nodes[2].sendrawtransaction(walletprocesspsbt_out['hex']) - - # check that walletprocesspsbt fails to decode a non-psbt - rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99}) - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx) - - # Convert a non-psbt to psbt and make sure we can decode it - rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10}) - rawtx = self.nodes[0].fundrawtransaction(rawtx) - new_psbt = self.nodes[0].converttopsbt(rawtx['hex']) - self.nodes[0].decodepsbt(new_psbt) - - # Make sure that a non-psbt with signatures cannot be converted - signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex']) - assert_raises_rpc_error(-22, "Inputs must not have scriptSigs and scriptWitnesses", - self.nodes[0].converttopsbt, hexstring=signedtx['hex']) # permitsigdata=False by default - assert_raises_rpc_error(-22, "Inputs must not have scriptSigs and scriptWitnesses", - self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False) - assert_raises_rpc_error(-22, "Inputs must not have scriptSigs and scriptWitnesses", - self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True) - # Unless we allow it to convert and strip signatures - self.nodes[0].converttopsbt(hexstring=signedtx['hex'], permitsigdata=True) - - # Create outputs to nodes 1 and 2 - # (note that we intentionally create two different txs here, as we want - # to check that each node is missing prevout data for one of the two - # utxos, see "should only have data for one input" test below) - node1_addr = self.nodes[1].getnewaddress() - node2_addr = self.nodes[2].getnewaddress() - utxo1 = self.create_outpoints(self.nodes[0], outputs=[{node1_addr: 13}])[0] - utxo2 = self.create_outpoints(self.nodes[0], outputs=[{node2_addr: 13}])[0] - self.generate(self.nodes[0], 6)[0] - - # Create a psbt spending outputs from nodes 1 and 2 - psbt_orig = self.nodes[0].createpsbt([utxo1, utxo2], {self.nodes[0].getnewaddress():25.999}) - - # Update psbts, should only have data for one input and not the other - psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt'] - psbt1_decoded = self.nodes[0].decodepsbt(psbt1) - assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1] - # Check that BIP32 path was added - assert "bip32_derivs" in psbt1_decoded['inputs'][0] - psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt'] - psbt2_decoded = self.nodes[0].decodepsbt(psbt2) - assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1] - # Check that BIP32 paths were not added - assert "bip32_derivs" not in psbt2_decoded['inputs'][1] - - # Sign PSBTs (workaround issue #18039) - psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt'] - psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt'] - - # Combine, finalize, and send the psbts - combined = self.nodes[0].combinepsbt([psbt1, psbt2]) - finalized = self.nodes[0].finalizepsbt(combined)['hex'] - self.nodes[0].sendrawtransaction(finalized) - self.generate(self.nodes[0], 6) - - # Test additional args in walletcreatepsbt - # Make sure both pre-included and funded inputs - # have the correct sequence numbers based on - # replaceable arg - block_height = self.nodes[0].getblockcount() - unspent = self.nodes[0].listunspent()[0] - psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False) - decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"]) - for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]): - assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE) - assert "bip32_derivs" not in psbt_in - assert_equal(decoded_psbt["tx"]["locktime"], block_height+2) - - # Same construction with only locktime set and RBF explicitly enabled - psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True) - decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"]) - for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]): - assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE) - assert "bip32_derivs" in psbt_in - assert_equal(decoded_psbt["tx"]["locktime"], block_height) - - # Same construction without optional arguments - psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}]) - decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"]) - for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]): - assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE) - assert "bip32_derivs" in psbt_in - assert_equal(decoded_psbt["tx"]["locktime"], 0) - - # Same construction without optional arguments, for a node with -walletrbf=0 - unspent1 = self.nodes[1].listunspent()[0] - psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True}) - decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"]) - for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]): - assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE) - assert "bip32_derivs" in psbt_in - - # Make sure change address wallet does not have P2SH innerscript access to results in success - # when attempting BnB coin selection - self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False) - - # Make sure the wallet's change type is respected by default - small_output = {self.nodes[0].getnewaddress():0.1} - psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output]) - self.assert_change_type(psbtx_native, "witness_v0_keyhash") - psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output]) - self.assert_change_type(psbtx_legacy, "pubkeyhash") - - # Make sure the change type of the wallet can also be overwritten - psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"}) - self.assert_change_type(psbtx_np2wkh, "scripthash") - - # Make sure the change type cannot be specified if a change address is given - invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()} - assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options) - - # Regression test for 14473 (mishandling of already-signed witness transaction): - psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True}) - complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"]) - double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"]) - assert_equal(complete_psbt, double_processed_psbt) - # We don't care about the decode result, but decoding must succeed. - self.nodes[0].decodepsbt(double_processed_psbt["psbt"]) - - # Make sure unsafe inputs are included if specified - self.nodes[2].createwallet(wallet_name="unsafe") - wunsafe = self.nodes[2].get_wallet_rpc("unsafe") - self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2) - self.sync_mempools() - assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}]) - wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True}) - - # BIP 174 Test Vectors - - # Check that unknown values are just passed through - unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA=" - unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt'] - assert_equal(unknown_psbt, unknown_out) - - # Open the data file - with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f: - d = json.load(f) - invalids = d['invalid'] - invalid_with_msgs = d["invalid_with_msg"] - valids = d['valid'] - creators = d['creator'] - signers = d['signer'] - combiners = d['combiner'] - finalizers = d['finalizer'] - extractors = d['extractor'] - - # Invalid PSBTs - for invalid in invalids: - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid) - for invalid in invalid_with_msgs: - psbt, msg = invalid - assert_raises_rpc_error(-22, f"TX decode failed {msg}", self.nodes[0].decodepsbt, psbt) - - # Valid PSBTs - for valid in valids: - self.nodes[0].decodepsbt(valid) - - # Creator Tests - for creator in creators: - created_tx = self.nodes[0].createpsbt(inputs=creator['inputs'], outputs=creator['outputs'], replaceable=False) - assert_equal(created_tx, creator['result']) - - # Signer tests - for i, signer in enumerate(signers): - self.nodes[2].createwallet(wallet_name="wallet{}".format(i)) - wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i)) - for key in signer['privkeys']: - wrpc.importprivkey(key) - signed_tx = wrpc.walletprocesspsbt(signer['psbt'], True, "ALL")['psbt'] - assert_equal(signed_tx, signer['result']) - - # Combiner test - for combiner in combiners: - combined = self.nodes[2].combinepsbt(combiner['combine']) - assert_equal(combined, combiner['result']) - - # Empty combiner test - assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, []) - - # Finalizer test - for finalizer in finalizers: - finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt'] - assert_equal(finalized, finalizer['result']) - - # Extractor test - for extractor in extractors: - extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex'] - assert_equal(extracted, extractor['result']) - - # Unload extra wallets - for i, signer in enumerate(signers): - self.nodes[2].unloadwallet("wallet{}".format(i)) - - if self.options.descriptors: - self.test_utxo_conversion() - self.test_psbt_incomplete_after_invalid_modification() - - self.test_input_confs_control() - - # Test that psbts with p2pkh outputs are created properly - p2pkh = self.nodes[0].getnewaddress(address_type='legacy') - psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True) - self.nodes[0].decodepsbt(psbt['psbt']) - - # Test decoding error: invalid base64 - assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;") - - # Send to all types of addresses - addr1 = self.nodes[1].getnewaddress("", "bech32") - addr2 = self.nodes[1].getnewaddress("", "legacy") - addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit") - utxo1, utxo2, utxo3 = self.create_outpoints(self.nodes[1], outputs=[{addr1: 11}, {addr2: 11}, {addr3: 11}]) - self.sync_all() - - def test_psbt_input_keys(psbt_input, keys): - """Check that the psbt input has only the expected keys.""" - assert_equal(set(keys), set(psbt_input.keys())) - - # Create a PSBT. None of the inputs are filled initially - psbt = self.nodes[1].createpsbt([utxo1, utxo2, utxo3], {self.nodes[0].getnewaddress():32.999}) - decoded = self.nodes[1].decodepsbt(psbt) - test_psbt_input_keys(decoded['inputs'][0], []) - test_psbt_input_keys(decoded['inputs'][1], []) - test_psbt_input_keys(decoded['inputs'][2], []) - - # Update a PSBT with UTXOs from the node - # Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness - updated = self.nodes[1].utxoupdatepsbt(psbt) - decoded = self.nodes[1].decodepsbt(updated) - test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo']) - test_psbt_input_keys(decoded['inputs'][1], ['non_witness_utxo']) - test_psbt_input_keys(decoded['inputs'][2], ['non_witness_utxo']) - - # Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in - descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]] - updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs) - decoded = self.nodes[1].decodepsbt(updated) - test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'bip32_derivs']) - test_psbt_input_keys(decoded['inputs'][1], ['non_witness_utxo', 'bip32_derivs']) - test_psbt_input_keys(decoded['inputs'][2], ['non_witness_utxo','witness_utxo', 'bip32_derivs', 'redeem_script']) - - # Two PSBTs with a common input should not be joinable - psbt1 = self.nodes[1].createpsbt([utxo1], {self.nodes[0].getnewaddress():Decimal('10.999')}) - assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated]) - - # Join two distinct PSBTs - addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit") - utxo4 = self.create_outpoints(self.nodes[0], outputs=[{addr4: 5}])[0] - self.generate(self.nodes[0], 6) - psbt2 = self.nodes[1].createpsbt([utxo4], {self.nodes[0].getnewaddress():Decimal('4.999')}) - psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt'] - psbt2_decoded = self.nodes[0].decodepsbt(psbt2) - assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0] - joined = self.nodes[0].joinpsbts([psbt, psbt2]) - joined_decoded = self.nodes[0].decodepsbt(joined) - assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3] - - # Check that joining shuffles the inputs and outputs - # 10 attempts should be enough to get a shuffled join - shuffled = False - for _ in range(10): - shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2]) - shuffled |= joined != shuffled_joined - if shuffled: - break - assert shuffled - - # Newly created PSBT needs UTXOs and updating - addr = self.nodes[1].getnewaddress("", "p2sh-segwit") - utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 7}])[0] - addrinfo = self.nodes[1].getaddressinfo(addr) - self.generate(self.nodes[0], 6)[0] - psbt = self.nodes[1].createpsbt([utxo], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')}) - analyzed = self.nodes[0].analyzepsbt(psbt) - assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater' - - # After update with wallet, only needs signing - updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt'] - analyzed = self.nodes[0].analyzepsbt(updated) - assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program'] - - # Check fee and size things - assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268') - - # After signing and finalizing, needs extracting - signed = self.nodes[1].walletprocesspsbt(updated)['psbt'] - analyzed = self.nodes[0].analyzepsbt(signed) - assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor' - - self.log.info("PSBT spending unspendable outputs should have error message and Creator as next") - analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA') - assert_equal(analysis['next'], 'creator') - assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output') - - self.log.info("PSBT with invalid values should have error message and Creator as next") - analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA') - assert_equal(analysis['next'], 'creator') - assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value') - - self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next") - analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA') - assert_equal(analysis['next'], 'finalizer') - - analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA') - assert_equal(analysis['next'], 'creator') - assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid') - - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].analyzepsbt, "cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==") - - assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].walletprocesspsbt, "cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==") - - self.log.info("Test that we can fund psbts with external inputs specified") - - privkey, _ = generate_keypair(wif=True) - - self.nodes[1].createwallet("extfund") - wallet = self.nodes[1].get_wallet_rpc("extfund") - - # Make a weird but signable script. sh(wsh(pkh())) descriptor accomplishes this - desc = descsum_create("sh(wsh(pkh({})))".format(privkey)) - if self.options.descriptors: - res = self.nodes[0].importdescriptors([{"desc": desc, "timestamp": "now"}]) - else: - res = self.nodes[0].importmulti([{"desc": desc, "timestamp": "now"}]) - assert res[0]["success"] - addr = self.nodes[0].deriveaddresses(desc)[0] - addr_info = self.nodes[0].getaddressinfo(addr) - - self.nodes[0].sendtoaddress(addr, 10) - self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10) - self.generate(self.nodes[0], 6) - ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0] - - # An external input without solving data should result in an error - assert_raises_rpc_error(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]), wallet.walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 15}) - - # But funding should work when the solving data is provided - psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"], addr_info["embedded"]["embedded"]["scriptPubKey"]]}}) - signed = wallet.walletprocesspsbt(psbt['psbt']) - assert not signed['complete'] - signed = self.nodes[0].walletprocesspsbt(signed['psbt']) - assert signed['complete'] - - psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data":{"descriptors": [desc]}}) - signed = wallet.walletprocesspsbt(psbt['psbt']) - assert not signed['complete'] - signed = self.nodes[0].walletprocesspsbt(signed['psbt']) - assert signed['complete'] - final = signed['hex'] - - dec = self.nodes[0].decodepsbt(signed["psbt"]) - for i, txin in enumerate(dec["tx"]["vin"]): - if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]: - input_idx = i - break - psbt_in = dec["inputs"][input_idx] - scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else "" - witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None - input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex) - low_input_weight = input_weight // 2 - high_input_weight = input_weight * 2 - - # Input weight error conditions - assert_raises_rpc_error( - -8, - "Input weights should be specified in inputs rather than in options.", - wallet.walletcreatefundedpsbt, - inputs=[ext_utxo], - outputs={self.nodes[0].getnewaddress(): 15}, - options={"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]} - ) - - # Funding should also work if the input weight is provided - psbt = wallet.walletcreatefundedpsbt( - inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}], - outputs={self.nodes[0].getnewaddress(): 15}, - add_inputs=True, - ) - signed = wallet.walletprocesspsbt(psbt["psbt"]) - signed = self.nodes[0].walletprocesspsbt(signed["psbt"]) - final = signed["hex"] - assert self.nodes[0].testmempoolaccept([final])[0]["allowed"] - # Reducing the weight should have a lower fee - psbt2 = wallet.walletcreatefundedpsbt( - inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}], - outputs={self.nodes[0].getnewaddress(): 15}, - add_inputs=True, - ) - assert_greater_than(psbt["fee"], psbt2["fee"]) - # Increasing the weight should have a higher fee - psbt2 = wallet.walletcreatefundedpsbt( - inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], - outputs={self.nodes[0].getnewaddress(): 15}, - add_inputs=True, - ) - assert_greater_than(psbt2["fee"], psbt["fee"]) - # The provided weight should override the calculated weight when solving data is provided - psbt3 = wallet.walletcreatefundedpsbt( - inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], - outputs={self.nodes[0].getnewaddress(): 15}, - add_inputs=True, solving_data={"descriptors": [desc]}, - ) - assert_equal(psbt2["fee"], psbt3["fee"]) - - # Import the external utxo descriptor so that we can sign for it from the test wallet - if self.options.descriptors: - res = wallet.importdescriptors([{"desc": desc, "timestamp": "now"}]) - else: - res = wallet.importmulti([{"desc": desc, "timestamp": "now"}]) - assert res[0]["success"] - # The provided weight should override the calculated weight for a wallet input - psbt3 = wallet.walletcreatefundedpsbt( - inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], - outputs={self.nodes[0].getnewaddress(): 15}, - add_inputs=True, - ) - assert_equal(psbt2["fee"], psbt3["fee"]) - - self.log.info("Test signing inputs that the wallet has keys for but is not watching the scripts") - self.nodes[1].createwallet(wallet_name="scriptwatchonly", disable_private_keys=True) - watchonly = self.nodes[1].get_wallet_rpc("scriptwatchonly") - - privkey, pubkey = generate_keypair(wif=True) - - desc = descsum_create("wsh(pkh({}))".format(pubkey.hex())) - if self.options.descriptors: - res = watchonly.importdescriptors([{"desc": desc, "timestamp": "now"}]) - else: - res = watchonly.importmulti([{"desc": desc, "timestamp": "now"}]) - assert res[0]["success"] - addr = self.nodes[0].deriveaddresses(desc)[0] - self.nodes[0].sendtoaddress(addr, 10) - self.generate(self.nodes[0], 1) - self.nodes[0].importprivkey(privkey) - - psbt = watchonly.sendall([wallet.getnewaddress()])["psbt"] - signed_tx = self.nodes[0].walletprocesspsbt(psbt) - self.nodes[0].sendrawtransaction(signed_tx["hex"]) - - # Same test but for taproot - if self.options.descriptors: - privkey, pubkey = generate_keypair(wif=True) - - desc = descsum_create("tr({},pk({}))".format(H_POINT, pubkey.hex())) - res = watchonly.importdescriptors([{"desc": desc, "timestamp": "now"}]) - assert res[0]["success"] - addr = self.nodes[0].deriveaddresses(desc)[0] - self.nodes[0].sendtoaddress(addr, 10) - self.generate(self.nodes[0], 1) - self.nodes[0].importdescriptors([{"desc": descsum_create("tr({})".format(privkey)), "timestamp":"now"}]) - - psbt = watchonly.sendall([wallet.getnewaddress(), addr])["psbt"] - processed_psbt = self.nodes[0].walletprocesspsbt(psbt) - txid = self.nodes[0].sendrawtransaction(processed_psbt["hex"]) - vout = find_vout_for_address(self.nodes[0], txid, addr) - - # Make sure tap tree is in psbt - parsed_psbt = PSBT.from_base64(psbt) - assert_greater_than(len(parsed_psbt.o[vout].map[PSBT_OUT_TAP_TREE]), 0) - assert "taproot_tree" in self.nodes[0].decodepsbt(psbt)["outputs"][vout] - parsed_psbt.make_blank() - comb_psbt = self.nodes[0].combinepsbt([psbt, parsed_psbt.to_base64()]) - assert_equal(comb_psbt, psbt) - - self.log.info("Test that walletprocesspsbt both updates and signs a non-updated psbt containing Taproot inputs") - addr = self.nodes[0].getnewaddress("", "bech32m") - utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 1}])[0] - psbt = self.nodes[0].createpsbt([utxo], [{self.nodes[0].getnewaddress(): 0.9999}]) - signed = self.nodes[0].walletprocesspsbt(psbt) - rawtx = signed["hex"] - self.nodes[0].sendrawtransaction(rawtx) - self.generate(self.nodes[0], 1) - - # Make sure tap tree is not in psbt - parsed_psbt = PSBT.from_base64(psbt) - assert PSBT_OUT_TAP_TREE not in parsed_psbt.o[0].map - assert "taproot_tree" not in self.nodes[0].decodepsbt(psbt)["outputs"][0] - parsed_psbt.make_blank() - comb_psbt = self.nodes[0].combinepsbt([psbt, parsed_psbt.to_base64()]) - assert_equal(comb_psbt, psbt) - - self.log.info("Test walletprocesspsbt raises if an invalid sighashtype is passed") - assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].walletprocesspsbt, psbt, sighashtype="all") - - self.log.info("Test decoding PSBT with per-input preimage types") - # note that the decodepsbt RPC doesn't check whether preimages and hashes match - hash_ripemd160, preimage_ripemd160 = randbytes(20), randbytes(50) - hash_sha256, preimage_sha256 = randbytes(32), randbytes(50) - hash_hash160, preimage_hash160 = randbytes(20), randbytes(50) - hash_hash256, preimage_hash256 = randbytes(32), randbytes(50) - - tx = CTransaction() - tx.vin = [CTxIn(outpoint=COutPoint(hash=int('aa' * 32, 16), n=0), scriptSig=b""), - CTxIn(outpoint=COutPoint(hash=int('bb' * 32, 16), n=0), scriptSig=b""), - CTxIn(outpoint=COutPoint(hash=int('cc' * 32, 16), n=0), scriptSig=b""), - CTxIn(outpoint=COutPoint(hash=int('dd' * 32, 16), n=0), scriptSig=b"")] - tx.vout = [CTxOut(nValue=0, scriptPubKey=b"")] - psbt = PSBT() - psbt.g = PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()}) - psbt.i = [PSBTMap({bytes([PSBT_IN_RIPEMD160]) + hash_ripemd160: preimage_ripemd160}), - PSBTMap({bytes([PSBT_IN_SHA256]) + hash_sha256: preimage_sha256}), - PSBTMap({bytes([PSBT_IN_HASH160]) + hash_hash160: preimage_hash160}), - PSBTMap({bytes([PSBT_IN_HASH256]) + hash_hash256: preimage_hash256})] - psbt.o = [PSBTMap()] - res_inputs = self.nodes[0].decodepsbt(psbt.to_base64())["inputs"] - assert_equal(len(res_inputs), 4) - preimage_keys = ["ripemd160_preimages", "sha256_preimages", "hash160_preimages", "hash256_preimages"] - expected_hashes = [hash_ripemd160, hash_sha256, hash_hash160, hash_hash256] - expected_preimages = [preimage_ripemd160, preimage_sha256, preimage_hash160, preimage_hash256] - for res_input, preimage_key, hash, preimage in zip(res_inputs, preimage_keys, expected_hashes, expected_preimages): - assert preimage_key in res_input - assert_equal(len(res_input[preimage_key]), 1) - assert hash.hex() in res_input[preimage_key] - assert_equal(res_input[preimage_key][hash.hex()], preimage.hex()) - - self.log.info("Test that combining PSBTs with different transactions fails") - tx = CTransaction() - tx.vin = [CTxIn(outpoint=COutPoint(hash=int('aa' * 32, 16), n=0), scriptSig=b"")] - tx.vout = [CTxOut(nValue=0, scriptPubKey=b"")] - psbt1 = PSBT(g=PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()}), i=[PSBTMap()], o=[PSBTMap()]).to_base64() - tx.vout[0].nValue += 1 # slightly modify tx - psbt2 = PSBT(g=PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()}), i=[PSBTMap()], o=[PSBTMap()]).to_base64() - assert_raises_rpc_error(-8, "PSBTs not compatible (different transactions)", self.nodes[0].combinepsbt, [psbt1, psbt2]) - assert_equal(self.nodes[0].combinepsbt([psbt1, psbt1]), psbt1) - - self.log.info("Test that PSBT inputs are being checked via script execution") - acs_prevout = CTxOut(nValue=0, scriptPubKey=CScript([OP_TRUE])) - tx = CTransaction() - tx.vin = [CTxIn(outpoint=COutPoint(hash=int('dd' * 32, 16), n=0), scriptSig=b"")] - tx.vout = [CTxOut(nValue=0, scriptPubKey=b"")] - psbt = PSBT() - psbt.g = PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()}) - psbt.i = [PSBTMap({bytes([PSBT_IN_WITNESS_UTXO]) : acs_prevout.serialize()})] - psbt.o = [PSBTMap()] - assert_equal(self.nodes[0].finalizepsbt(psbt.to_base64()), - {'hex': '0200000001dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd0000000000000000000100000000000000000000000000', 'complete': True}) - - self.log.info("Test we don't crash when making a 0-value funded transaction at 0 fee without forcing an input selection") - assert_raises_rpc_error(-4, "Transaction requires one destination of non-0 value, a non-0 feerate, or a pre-selected input", self.nodes[0].walletcreatefundedpsbt, [], [{"data": "deadbeef"}], 0, {"fee_rate": "0"}) - - self.log.info("Test descriptorprocesspsbt updates and signs a psbt with descriptors") - - self.generate(self.nodes[2], 1) - - # Disable the wallet for node 2 since `descriptorprocesspsbt` does not use the wallet - self.restart_node(2, extra_args=["-disablewallet"]) - self.connect_nodes(0, 2) - self.connect_nodes(1, 2) - - key_info = get_generate_key() - key = key_info.privkey - address = key_info.p2wpkh_addr - - descriptor = descsum_create(f"wpkh({key})") - - utxo = self.create_outpoints(self.nodes[0], outputs=[{address: 1}])[0] - self.sync_all() - - psbt = self.nodes[2].createpsbt([utxo], {self.nodes[0].getnewaddress(): 0.99999}) - decoded = self.nodes[2].decodepsbt(psbt) - test_psbt_input_keys(decoded['inputs'][0], []) - - # Test that even if the wrong descriptor is given, `witness_utxo` and `non_witness_utxo` - # are still added to the psbt - alt_descriptor = descsum_create(f"wpkh({get_generate_key().privkey})") - alt_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[alt_descriptor], sighashtype="ALL")["psbt"] - decoded = self.nodes[2].decodepsbt(alt_psbt) - test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo']) - - # Test that the psbt is not finalized and does not have bip32_derivs unless specified - processed_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=True, finalize=False) - decoded = self.nodes[2].decodepsbt(processed_psbt['psbt']) - test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'partial_signatures', 'bip32_derivs']) - - # If psbt not finalized, test that result does not have hex - assert "hex" not in processed_psbt - - processed_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=False, finalize=True) - decoded = self.nodes[2].decodepsbt(processed_psbt['psbt']) - test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'final_scriptwitness']) - - # Test psbt is complete - assert_equal(processed_psbt['complete'], True) - - # Broadcast transaction - self.nodes[2].sendrawtransaction(processed_psbt['hex']) - - self.log.info("Test descriptorprocesspsbt raises if an invalid sighashtype is passed") - assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all") - - -if __name__ == '__main__': - PSBTTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2018-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the Partially Signed Transaction RPCs.) +""") +from decimal import Decimal) +from itertools import product) +from random import randbytes) +) +from test_framework.blocktools import () + MAX_STANDARD_TX_WEIGHT,) +)) +from test_framework.descriptors import descsum_create) +from test_framework.key import H_POINT) +from test_framework.messages import () + COutPoint,) + CTransaction,) + CTxIn,) + CTxOut,) + MAX_BIP125_RBF_SEQUENCE,) + WITNESS_SCALE_FACTOR,) +)) +from test_framework.psbt import () + PSBT,) + PSBTMap,) + PSBT_GLOBAL_UNSIGNED_TX,) + PSBT_IN_RIPEMD160,) + PSBT_IN_SHA256,) + PSBT_IN_HASH160,) + PSBT_IN_HASH256,) + PSBT_IN_NON_WITNESS_UTXO,) + PSBT_IN_WITNESS_UTXO,) + PSBT_OUT_TAP_TREE,) +)) +from test_framework.script import CScript, OP_TRUE) +from test_framework.script_util import MIN_STANDARD_TX_NONWITNESS_SIZE) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_approx,) + assert_equal,) + assert_greater_than,) + assert_greater_than_or_equal,) + assert_raises_rpc_error,) + find_vout_for_address,) +)) +from test_framework.wallet_util import () + calculate_input_weight,) + generate_keypair,) + get_generate_key,) +)) +) +import json) +import os) +) +) +class PSBTTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 3) + self.extra_args = [) + ["-walletrbf=1", "-addresstype=bech32", "-changetype=bech32"], #TODO: Remove address type restrictions once taproot has psbt extensions) + ["-walletrbf=0", "-changetype=legacy"],) + []) + ]) + # whitelist peers to speed up tx relay / mempool sync) + for args in self.extra_args:) + args.append("-whitelist=noban@127.0.0.1")) + self.supports_cli = False) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def test_psbt_incomplete_after_invalid_modification(self):) + self.log.info("Check that PSBT is correctly marked as incomplete after invalid modification")) + node = self.nodes[2]) + wallet = node.get_wallet_rpc(self.default_wallet_name)) + address = wallet.getnewaddress()) + wallet.sendtoaddress(address=address, amount=1.0)) + self.generate(node, nblocks=1, sync_fun=lambda: self.sync_all(self.nodes[:2]))) +) + utxos = wallet.listunspent(addresses=[address])) + psbt = wallet.createpsbt([{"txid": utxos[0]["txid"], "vout": utxos[0]["vout"]}], [{wallet.getnewaddress(): 0.9999}])) + signed_psbt = wallet.walletprocesspsbt(psbt)["psbt"]) +) + # Modify the raw transaction by changing the output address, so the signature is no longer valid) + signed_psbt_obj = PSBT.from_base64(signed_psbt)) + substitute_addr = wallet.getnewaddress()) + raw = wallet.createrawtransaction([{"txid": utxos[0]["txid"], "vout": utxos[0]["vout"]}], [{substitute_addr: 0.9999}])) + signed_psbt_obj.g.map[PSBT_GLOBAL_UNSIGNED_TX] = bytes.fromhex(raw)) +) + # Check that the walletprocesspsbt call succeeds but also recognizes that the transaction is not complete) + signed_psbt_incomplete = wallet.walletprocesspsbt(signed_psbt_obj.to_base64(), finalize=False)) + assert signed_psbt_incomplete["complete"] is False) +) + def test_utxo_conversion(self):) + self.log.info("Check that non-witness UTXOs are removed for segwit v1+ inputs")) + mining_node = self.nodes[2]) + offline_node = self.nodes[0]) + online_node = self.nodes[1]) +) + # Disconnect offline node from others) + # Topology of test network is linear, so this one call is enough) + self.disconnect_nodes(0, 1)) +) + # Create watchonly on online_node) + online_node.createwallet(wallet_name='wonline', disable_private_keys=True)) + wonline = online_node.get_wallet_rpc('wonline')) + w2 = online_node.get_wallet_rpc(self.default_wallet_name)) +) + # Mine a transaction that credits the offline address) + offline_addr = offline_node.getnewaddress(address_type="bech32m")) + online_addr = w2.getnewaddress(address_type="bech32m")) + wonline.importaddress(offline_addr, "", False)) + mining_wallet = mining_node.get_wallet_rpc(self.default_wallet_name)) + mining_wallet.sendtoaddress(address=offline_addr, amount=1.0)) + self.generate(mining_node, nblocks=1, sync_fun=lambda: self.sync_all([online_node, mining_node]))) +) + # Construct an unsigned PSBT on the online node) + utxos = wonline.listunspent(addresses=[offline_addr])) + raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])) + psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]) + assert not "not_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]) +) + # add non-witness UTXO manually) + psbt_new = PSBT.from_base64(psbt)) + prev_tx = wonline.gettransaction(utxos[0]["txid"])["hex"]) + psbt_new.i[0].map[PSBT_IN_NON_WITNESS_UTXO] = bytes.fromhex(prev_tx)) + assert "non_witness_utxo" in mining_node.decodepsbt(psbt_new.to_base64())["inputs"][0]) +) + # Have the offline node sign the PSBT (which will remove the non-witness UTXO)) + signed_psbt = offline_node.walletprocesspsbt(psbt_new.to_base64())) + assert not "non_witness_utxo" in mining_node.decodepsbt(signed_psbt["psbt"])["inputs"][0]) +) + # Make sure we can mine the resulting transaction) + txid = mining_node.sendrawtransaction(signed_psbt["hex"])) + self.generate(mining_node, nblocks=1, sync_fun=lambda: self.sync_all([online_node, mining_node]))) + assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)) +) + wonline.unloadwallet()) +) + # Reconnect) + self.connect_nodes(1, 0)) + self.connect_nodes(0, 2)) +) + def test_input_confs_control(self):) + self.nodes[0].createwallet("minconf")) + wallet = self.nodes[0].get_wallet_rpc("minconf")) +) + # Fund the wallet with different chain heights) + for _ in range(2):) + self.nodes[1].sendmany("", {wallet.getnewaddress():1, wallet.getnewaddress():1})) + self.generate(self.nodes[1], 1)) +) + unconfirmed_txid = wallet.sendtoaddress(wallet.getnewaddress(), 0.5)) +) + self.log.info("Crafting PSBT using an unconfirmed input")) + target_address = self.nodes[1].getnewaddress()) + psbtx1 = wallet.walletcreatefundedpsbt([], {target_address: 0.1}, 0, {'fee_rate': 1, 'maxconf': 0})['psbt']) +) + # Make sure we only had the one input) + tx1_inputs = self.nodes[0].decodepsbt(psbtx1)['tx']['vin']) + assert_equal(len(tx1_inputs), 1)) +) + utxo1 = tx1_inputs[0]) + assert_equal(unconfirmed_txid, utxo1['txid'])) +) + signed_tx1 = wallet.walletprocesspsbt(psbtx1)) + txid1 = self.nodes[0].sendrawtransaction(signed_tx1['hex'])) +) + mempool = self.nodes[0].getrawmempool()) + assert txid1 in mempool) +) + self.log.info("Fail to craft a new PSBT that sends more funds with add_inputs = False")) + assert_raises_rpc_error(-4, "The preselected coins total amount does not cover the transaction target. Please allow other inputs to be automatically selected or include more coins manually", wallet.walletcreatefundedpsbt, [{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': False})) +) + self.log.info("Fail to craft a new PSBT with minconf above highest one")) + assert_raises_rpc_error(-4, "Insufficient funds", wallet.walletcreatefundedpsbt, [{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': True, 'minconf': 3, 'fee_rate': 10})) +) + self.log.info("Fail to broadcast a new PSBT with maxconf 0 due to BIP125 rules to verify it actually chose unconfirmed outputs")) + psbt_invalid = wallet.walletcreatefundedpsbt([{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': True, 'maxconf': 0, 'fee_rate': 10})['psbt']) + signed_invalid = wallet.walletprocesspsbt(psbt_invalid)) + assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, signed_invalid['hex'])) +) + self.log.info("Craft a replacement adding inputs with highest confs possible")) + psbtx2 = wallet.walletcreatefundedpsbt([{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}, 0, {'add_inputs': True, 'minconf': 2, 'fee_rate': 10})['psbt']) + tx2_inputs = self.nodes[0].decodepsbt(psbtx2)['tx']['vin']) + assert_greater_than_or_equal(len(tx2_inputs), 2)) + for vin in tx2_inputs:) + if vin['txid'],unconfirmed_txid:) + assert_greater_than_or_equal(self.nodes[0].gettxout(vin['txid'], vin['vout'])['confirmations'], 2)) +) + signed_tx2 = wallet.walletprocesspsbt(psbtx2)) + txid2 = self.nodes[0].sendrawtransaction(signed_tx2['hex'])) +) + mempool = self.nodes[0].getrawmempool()) + assert txid1 not in mempool) + assert txid2 in mempool) +) + wallet.unloadwallet()) +) + def assert_change_type(self, psbtx, expected_type):) + """Assert that the given PSBT has a change output with the given type.""") +) + # The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node) + decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])) + changepos = psbtx["changepos"]) + assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)) +) + def run_test(self):) + # Create and fund a raw tx for sending 10 BTC) + psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']) +) + self.log.info("Test for invalid maximum transaction weights")) + dest_arg = [{self.nodes[0].getnewaddress(): 1}]) + min_tx_weight = MIN_STANDARD_TX_NONWITNESS_SIZE * WITNESS_SCALE_FACTOR) + assert_raises_rpc_error(-4, f"Maximum transaction weight must be between {min_tx_weight} and {MAX_STANDARD_TX_WEIGHT}", self.nodes[0].walletcreatefundedpsbt, [], dest_arg, 0, {"max_tx_weight": -1})) + assert_raises_rpc_error(-4, f"Maximum transaction weight must be between {min_tx_weight} and {MAX_STANDARD_TX_WEIGHT}", self.nodes[0].walletcreatefundedpsbt, [], dest_arg, 0, {"max_tx_weight": 0})) + assert_raises_rpc_error(-4, f"Maximum transaction weight must be between {min_tx_weight} and {MAX_STANDARD_TX_WEIGHT}", self.nodes[0].walletcreatefundedpsbt, [], dest_arg, 0, {"max_tx_weight": MAX_STANDARD_TX_WEIGHT + 1})) +) + # Base transaction vsize: version (4) + locktime (4) + input count (1) + witness overhead (1) = 10 vbytes) + base_tx_vsize = 10) + # One P2WPKH output vsize: outpoint (31 vbytes)) + p2wpkh_output_vsize = 31) + # 1 vbyte for output count) + output_count = 1) + tx_weight_without_inputs = (base_tx_vsize + output_count + p2wpkh_output_vsize) * WITNESS_SCALE_FACTOR) + # min_tx_weight is greater than transaction weight without inputs) + assert_greater_than(min_tx_weight, tx_weight_without_inputs)) +) + # In order to test for when the passed max weight is less than the transaction weight without inputs) + # Define destination with two outputs.) + dest_arg_large = [{self.nodes[0].getnewaddress(): 1}, {self.nodes[0].getnewaddress(): 1}]) + large_tx_vsize_without_inputs = base_tx_vsize + output_count + (p2wpkh_output_vsize * 2)) + large_tx_weight_without_inputs = large_tx_vsize_without_inputs * WITNESS_SCALE_FACTOR) + assert_greater_than(large_tx_weight_without_inputs, min_tx_weight)) + # Test for max_tx_weight less than Transaction weight without inputs) + assert_raises_rpc_error(-4, "Maximum transaction weight is less than transaction weight without inputs", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": min_tx_weight})) + assert_raises_rpc_error(-4, "Maximum transaction weight is less than transaction weight without inputs", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": large_tx_weight_without_inputs})) +) + # Test for max_tx_weight just enough to include inputs but not change output) + assert_raises_rpc_error(-4, "Maximum transaction weight is too low, can not accommodate change output", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": (large_tx_vsize_without_inputs + 1) * WITNESS_SCALE_FACTOR})) + self.log.info("Test that a funded PSBT is always faithful to max_tx_weight option")) + large_tx_vsize_with_change = large_tx_vsize_without_inputs + p2wpkh_output_vsize) + # It's enough but won't accommodate selected input size) + assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight", self.nodes[0].walletcreatefundedpsbt, [], dest_arg_large, 0, {"max_tx_weight": (large_tx_vsize_with_change) * WITNESS_SCALE_FACTOR})) +) + max_tx_weight_sufficient = 1000 # 1k vbytes is enough) + psbt = self.nodes[0].walletcreatefundedpsbt(outputs=dest_arg,locktime=0, options={"max_tx_weight": max_tx_weight_sufficient})["psbt"]) + weight = self.nodes[0].decodepsbt(psbt)["tx"]["weight"]) + # ensure the transaction's weight is below the specified max_tx_weight.) + assert_greater_than_or_equal(max_tx_weight_sufficient, weight)) +) + # If inputs are specified, do not automatically add more:) + utxo1 = self.nodes[0].listunspent()[0]) + assert_raises_rpc_error(-4, "The preselected coins total amount does not cover the transaction target. ") + "Please allow other inputs to be automatically selected or include more coins manually",) + self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})) +) + psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']) + assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)) +) + # Inputs argument can be null) + self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})) +) + # Node 1 should not be able to add anything to it but still return the psbtx same as before) + psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']) + assert_equal(psbtx1, psbtx)) +) + # Node 0 should not be able to sign the transaction with the wallet is locked) + self.nodes[0].encryptwallet("password")) + assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].walletprocesspsbt, psbtx)) +) + # Node 0 should be able to process without signing though) + unsigned_tx = self.nodes[0].walletprocesspsbt(psbtx, False)) + assert_equal(unsigned_tx['complete'], False)) +) + self.nodes[0].walletpassphrase(passphrase="password", timeout=1000000)) +) + # Sign the transaction but don't finalize) + processed_psbt = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=False)) + assert "hex" not in processed_psbt) + signed_psbt = processed_psbt['psbt']) +) + # Finalize and send) + finalized_hex = self.nodes[0].finalizepsbt(signed_psbt)['hex']) + self.nodes[0].sendrawtransaction(finalized_hex)) +) + # Alternative method: sign AND finalize in one command) + processed_finalized_psbt = self.nodes[0].walletprocesspsbt(psbt=psbtx, finalize=True)) + finalized_psbt = processed_finalized_psbt['psbt']) + finalized_psbt_hex = processed_finalized_psbt['hex']) + assert_not_equal(signed_psbt, finalized_psbt)) + assert finalized_psbt_hex == finalized_hex) +) + # Manually selected inputs can be locked:) + assert_equal(len(self.nodes[0].listlockunspent()), 0)) + utxo1 = self.nodes[0].listunspent()[0]) + psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]) + assert_equal(len(self.nodes[0].listlockunspent()), 1)) +) + # Locks are ignored for manually selected inputs) + self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)) +) + # Create p2sh, p2wpkh, and p2wsh addresses) + pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']) + pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']) + pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']) +) + # Setup watchonly wallets) + self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)) + wmulti = self.nodes[2].get_wallet_rpc('wmulti')) +) + # Create all the addresses) + p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']) + p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']) + p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']) + if not self.options.descriptors:) + wmulti.importaddress(p2sh)) + wmulti.importaddress(p2wsh)) + wmulti.importaddress(p2sh_p2wsh)) + p2wpkh = self.nodes[1].getnewaddress("", "bech32")) + p2pkh = self.nodes[1].getnewaddress("", "legacy")) + p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")) +) + # fund those addresses) + rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})) + rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})) + signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']) + txid = self.nodes[0].sendrawtransaction(signed_tx)) + self.generate(self.nodes[0], 6)) +) + # Find the output pos) + p2sh_pos = -1) + p2wsh_pos = -1) + p2wpkh_pos = -1) + p2pkh_pos = -1) + p2sh_p2wsh_pos = -1) + p2sh_p2wpkh_pos = -1) + decoded = self.nodes[0].decoderawtransaction(signed_tx)) + for out in decoded['vout']:) + if out['scriptPubKey']['address'] == p2sh:) + p2sh_pos = out['n']) + elif out['scriptPubKey']['address'] == p2wsh:) + p2wsh_pos = out['n']) + elif out['scriptPubKey']['address'] == p2wpkh:) + p2wpkh_pos = out['n']) + elif out['scriptPubKey']['address'] == p2sh_p2wsh:) + p2sh_p2wsh_pos = out['n']) + elif out['scriptPubKey']['address'] == p2sh_p2wpkh:) + p2sh_p2wpkh_pos = out['n']) + elif out['scriptPubKey']['address'] == p2pkh:) + p2pkh_pos = out['n']) +) + inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]) + outputs = [{self.nodes[1].getnewaddress(): 29.99}]) +) + # spend single key from node 1) + created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)) + walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])) + # Make sure it has both types of UTXOs) + decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])) + assert 'non_witness_utxo' in decoded['inputs'][0]) + assert 'witness_utxo' in decoded['inputs'][0]) + # Check decodepsbt fee calculation (input values shall only be counted once per UTXO)) + assert_equal(decoded['fee'], created_psbt['fee'])) + assert_equal(walletprocesspsbt_out['complete'], True)) + self.nodes[1].sendrawtransaction(walletprocesspsbt_out['hex'])) +) + self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 BTC/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")) + res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})) + assert_approx(res1["fee"], 0.055, 0.005)) + res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})) + assert_approx(res2["fee"], 0.055, 0.005)) +) + self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")) + res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})) + assert_approx(res3["fee"], 0.00000381, 0.0000001)) + res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})) + assert_approx(res4["fee"], 0.00000381, 0.0000001)) +) + self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")) + for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):) + assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])) +) + self.log.info("Test invalid fee rate settings")) + for param, value in {("fee_rate", 100000), ("feeRate", 1)}:) + assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})) + assert_raises_rpc_error(-3, "Amount out of range",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})) + assert_raises_rpc_error(-3, "Amount is not a number or string",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})) + # Test fee rate values that don't pass fixed-point parsing checks.) + for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:) + assert_raises_rpc_error(-3, "Invalid amount",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})) + # Test fee_rate values that cannot be represented in sat/vB.) + for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999]:) + assert_raises_rpc_error(-3, "Invalid amount",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})) +) + self.log.info("- raises RPC error if both feeRate and fee_rate are passed")) + assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})) +) + self.log.info("- raises RPC error if both feeRate and estimate_mode passed")) + assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})) +) + for param in ["feeRate", "fee_rate"]:) + self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))) + assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation ") + "target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),) + self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})) +) + self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")) + assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",) + self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})) +) + self.log.info("- raises RPC error with invalid estimate_mode settings")) + for k, v in {"number": 42, "object": {"foo": "bar"}}.items():) + assert_raises_rpc_error(-3, f"JSON value of type {k} for field estimate_mode is not of expected type string",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})) + for mode in ["", "foo", Decimal("3.141592")]:) + assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})) +) + self.log.info("- raises RPC error with invalid conf_target settings")) + for mode in ["unset", "economical", "conservative"]:) + self.log.debug("{}".format(mode))) + for k, v in {"string": "", "object": {"foo": "bar"}}.items():) + assert_raises_rpc_error(-3, f"JSON value of type {k} for field conf_target is not of expected type number",) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})) + for n in [-1, 0, 1009]:) + assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h) + self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})) +) + self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")) + # previously this was silently capped at -maxtxfee) + for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():) + msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)") + assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})) + assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})) +) + self.log.info("Test various PSBT operations")) + # partially sign multisig things with node 1) + psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, changeAddress=self.nodes[1].getrawchangeaddress())['psbt']) + walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)) + psbtx = walletprocesspsbt_out['psbt']) + assert_equal(walletprocesspsbt_out['complete'], False)) +) + # Unload wmulti, we don't need it anymore) + wmulti.unloadwallet()) +) + # partially sign with node 2. This should be complete and sendable) + walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)) + assert_equal(walletprocesspsbt_out['complete'], True)) + self.nodes[2].sendrawtransaction(walletprocesspsbt_out['hex'])) +) + # check that walletprocesspsbt fails to decode a non-psbt) + rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})) + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)) +) + # Convert a non-psbt to psbt and make sure we can decode it) + rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})) + rawtx = self.nodes[0].fundrawtransaction(rawtx)) + new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])) + self.nodes[0].decodepsbt(new_psbt)) +) + # Make sure that a non-psbt with signatures cannot be converted) + signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])) + assert_raises_rpc_error(-22, "Inputs must not have scriptSigs and scriptWitnesses",) + self.nodes[0].converttopsbt, hexstring=signedtx['hex']) # permitsigdata=False by default) + assert_raises_rpc_error(-22, "Inputs must not have scriptSigs and scriptWitnesses",) + self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False)) + assert_raises_rpc_error(-22, "Inputs must not have scriptSigs and scriptWitnesses",) + self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)) + # Unless we allow it to convert and strip signatures) + self.nodes[0].converttopsbt(hexstring=signedtx['hex'], permitsigdata=True)) +) + # Create outputs to nodes 1 and 2) + # (note that we intentionally create two different txs here, as we want) + # to check that each node is missing prevout data for one of the two) + # utxos, see "should only have data for one input" test below)) + node1_addr = self.nodes[1].getnewaddress()) + node2_addr = self.nodes[2].getnewaddress()) + utxo1 = self.create_outpoints(self.nodes[0], outputs=[{node1_addr: 13}])[0]) + utxo2 = self.create_outpoints(self.nodes[0], outputs=[{node2_addr: 13}])[0]) + self.generate(self.nodes[0], 6)[0]) +) + # Create a psbt spending outputs from nodes 1 and 2) + psbt_orig = self.nodes[0].createpsbt([utxo1, utxo2], {self.nodes[0].getnewaddress():25.999})) +) + # Update psbts, should only have data for one input and not the other) + psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']) + psbt1_decoded = self.nodes[0].decodepsbt(psbt1)) + assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]) + # Check that BIP32 path was added) + assert "bip32_derivs" in psbt1_decoded['inputs'][0]) + psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']) + psbt2_decoded = self.nodes[0].decodepsbt(psbt2)) + assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]) + # Check that BIP32 paths were not added) + assert "bip32_derivs" not in psbt2_decoded['inputs'][1]) +) + # Sign PSBTs (workaround issue #18039)) + psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']) + psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']) +) + # Combine, finalize, and send the psbts) + combined = self.nodes[0].combinepsbt([psbt1, psbt2])) + finalized = self.nodes[0].finalizepsbt(combined)['hex']) + self.nodes[0].sendrawtransaction(finalized)) + self.generate(self.nodes[0], 6)) +) + # Test additional args in walletcreatepsbt) + # Make sure both pre-included and funded inputs) + # have the correct sequence numbers based on) + # replaceable arg) + block_height = self.nodes[0].getblockcount()) + unspent = self.nodes[0].listunspent()[0]) + psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)) + decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])) + for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):) + assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)) + assert "bip32_derivs" not in psbt_in) + assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)) +) + # Same construction with only locktime set and RBF explicitly enabled) + psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)) + decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])) + for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):) + assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)) + assert "bip32_derivs" in psbt_in) + assert_equal(decoded_psbt["tx"]["locktime"], block_height)) +) + # Same construction without optional arguments) + psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])) + decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])) + for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):) + assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)) + assert "bip32_derivs" in psbt_in) + assert_equal(decoded_psbt["tx"]["locktime"], 0)) +) + # Same construction without optional arguments, for a node with -walletrbf=0) + unspent1 = self.nodes[1].listunspent()[0]) + psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})) + decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])) + for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):) + assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)) + assert "bip32_derivs" in psbt_in) +) + # Make sure change address wallet does not have P2SH innerscript access to results in success) + # when attempting BnB coin selection) + self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)) +) + # Make sure the wallet's change type is respected by default) + small_output = {self.nodes[0].getnewaddress():0.1}) + psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])) + self.assert_change_type(psbtx_native, "witness_v0_keyhash")) + psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])) + self.assert_change_type(psbtx_legacy, "pubkeyhash")) +) + # Make sure the change type of the wallet can also be overwritten) + psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})) + self.assert_change_type(psbtx_np2wkh, "scripthash")) +) + # Make sure the change type cannot be specified if a change address is given) + invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}) + assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)) +) + # Regression test for 14473 (mishandling of already-signed witness transaction):) + psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})) + complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])) + double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])) + assert_equal(complete_psbt, double_processed_psbt)) + # We don't care about the decode result, but decoding must succeed.) + self.nodes[0].decodepsbt(double_processed_psbt["psbt"])) +) + # Make sure unsafe inputs are included if specified) + self.nodes[2].createwallet(wallet_name="unsafe")) + wunsafe = self.nodes[2].get_wallet_rpc("unsafe")) + self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)) + self.sync_mempools()) + assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])) + wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})) +) + # BIP 174 Test Vectors) +) + # Check that unknown values are just passed through) + unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA=") + unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']) + assert_equal(unknown_psbt, unknown_out)) +) + # Open the data file) + with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:) + d = json.load(f)) + invalids = d['invalid']) + invalid_with_msgs = d["invalid_with_msg"]) + valids = d['valid']) + creators = d['creator']) + signers = d['signer']) + combiners = d['combiner']) + finalizers = d['finalizer']) + extractors = d['extractor']) +) + # Invalid PSBTs) + for invalid in invalids:) + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)) + for invalid in invalid_with_msgs:) + psbt, msg = invalid) + assert_raises_rpc_error(-22, f"TX decode failed {msg}", self.nodes[0].decodepsbt, psbt)) +) + # Valid PSBTs) + for valid in valids:) + self.nodes[0].decodepsbt(valid)) +) + # Creator Tests) + for creator in creators:) + created_tx = self.nodes[0].createpsbt(inputs=creator['inputs'], outputs=creator['outputs'], replaceable=False)) + assert_equal(created_tx, creator['result'])) +) + # Signer tests) + for i, signer in enumerate(signers):) + self.nodes[2].createwallet(wallet_name="wallet{}".format(i))) + wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))) + for key in signer['privkeys']:) + wrpc.importprivkey(key)) + signed_tx = wrpc.walletprocesspsbt(signer['psbt'], True, "ALL")['psbt']) + assert_equal(signed_tx, signer['result'])) +) + # Combiner test) + for combiner in combiners:) + combined = self.nodes[2].combinepsbt(combiner['combine'])) + assert_equal(combined, combiner['result'])) +) + # Empty combiner test) + assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])) +) + # Finalizer test) + for finalizer in finalizers:) + finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']) + assert_equal(finalized, finalizer['result'])) +) + # Extractor test) + for extractor in extractors:) + extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']) + assert_equal(extracted, extractor['result'])) +) + # Unload extra wallets) + for i, signer in enumerate(signers):) + self.nodes[2].unloadwallet("wallet{}".format(i))) +) + if self.options.descriptors:) + self.test_utxo_conversion()) + self.test_psbt_incomplete_after_invalid_modification()) +) + self.test_input_confs_control()) +) + # Test that psbts with p2pkh outputs are created properly) + p2pkh = self.nodes[0].getnewaddress(address_type='legacy')) + psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)) + self.nodes[0].decodepsbt(psbt['psbt'])) +) + # Test decoding error: invalid base64) + assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")) +) + # Send to all types of addresses) + addr1 = self.nodes[1].getnewaddress("", "bech32")) + addr2 = self.nodes[1].getnewaddress("", "legacy")) + addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")) + utxo1, utxo2, utxo3 = self.create_outpoints(self.nodes[1], outputs=[{addr1: 11}, {addr2: 11}, {addr3: 11}])) + self.sync_all()) +) + def test_psbt_input_keys(psbt_input, keys):) + """Check that the psbt input has only the expected keys.""") + assert_equal(set(keys), set(psbt_input.keys()))) +) + # Create a PSBT. None of the inputs are filled initially) + psbt = self.nodes[1].createpsbt([utxo1, utxo2, utxo3], {self.nodes[0].getnewaddress():32.999})) + decoded = self.nodes[1].decodepsbt(psbt)) + test_psbt_input_keys(decoded['inputs'][0], [])) + test_psbt_input_keys(decoded['inputs'][1], [])) + test_psbt_input_keys(decoded['inputs'][2], [])) +) + # Update a PSBT with UTXOs from the node) + # Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness) + updated = self.nodes[1].utxoupdatepsbt(psbt)) + decoded = self.nodes[1].decodepsbt(updated)) + test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo'])) + test_psbt_input_keys(decoded['inputs'][1], ['non_witness_utxo'])) + test_psbt_input_keys(decoded['inputs'][2], ['non_witness_utxo'])) +) + # Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in) + descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]) + updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)) + decoded = self.nodes[1].decodepsbt(updated)) + test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'bip32_derivs'])) + test_psbt_input_keys(decoded['inputs'][1], ['non_witness_utxo', 'bip32_derivs'])) + test_psbt_input_keys(decoded['inputs'][2], ['non_witness_utxo','witness_utxo', 'bip32_derivs', 'redeem_script'])) +) + # Two PSBTs with a common input should not be joinable) + psbt1 = self.nodes[1].createpsbt([utxo1], {self.nodes[0].getnewaddress():Decimal('10.999')})) + assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])) +) + # Join two distinct PSBTs) + addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")) + utxo4 = self.create_outpoints(self.nodes[0], outputs=[{addr4: 5}])[0]) + self.generate(self.nodes[0], 6)) + psbt2 = self.nodes[1].createpsbt([utxo4], {self.nodes[0].getnewaddress():Decimal('4.999')})) + psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']) + psbt2_decoded = self.nodes[0].decodepsbt(psbt2)) + assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]) + joined = self.nodes[0].joinpsbts([psbt, psbt2])) + joined_decoded = self.nodes[0].decodepsbt(joined)) + assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]) +) + # Check that joining shuffles the inputs and outputs) + # 10 attempts should be enough to get a shuffled join) + shuffled = False) + for _ in range(10):) + shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])) + shuffled |= joined,shuffled_joined) + if shuffled:) + break) + assert shuffled) +) + # Newly created PSBT needs UTXOs and updating) + addr = self.nodes[1].getnewaddress("", "p2sh-segwit")) + utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 7}])[0]) + addrinfo = self.nodes[1].getaddressinfo(addr)) + self.generate(self.nodes[0], 6)[0]) + psbt = self.nodes[1].createpsbt([utxo], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})) + analyzed = self.nodes[0].analyzepsbt(psbt)) + assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater') +) + # After update with wallet, only needs signing) + updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']) + analyzed = self.nodes[0].analyzepsbt(updated)) + assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']) +) + # Check fee and size things) + assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')) +) + # After signing and finalizing, needs extracting) + signed = self.nodes[1].walletprocesspsbt(updated)['psbt']) + analyzed = self.nodes[0].analyzepsbt(signed)) + assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor') +) + self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")) + analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')) + assert_equal(analysis['next'], 'creator')) + assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')) +) + self.log.info("PSBT with invalid values should have error message and Creator as next")) + analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')) + assert_equal(analysis['next'], 'creator')) + assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')) +) + self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")) + analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')) + assert_equal(analysis['next'], 'finalizer')) +) + analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')) + assert_equal(analysis['next'], 'creator')) + assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')) +) + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].analyzepsbt, "cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==")) +) + assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].walletprocesspsbt, "cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==")) +) + self.log.info("Test that we can fund psbts with external inputs specified")) +) + privkey, _ = generate_keypair(wif=True)) +) + self.nodes[1].createwallet("extfund")) + wallet = self.nodes[1].get_wallet_rpc("extfund")) +) + # Make a weird but signable script. sh(wsh(pkh())) descriptor accomplishes this) + desc = descsum_create("sh(wsh(pkh({})))".format(privkey))) + if self.options.descriptors:) + res = self.nodes[0].importdescriptors([{"desc": desc, "timestamp": "now"}])) + else:) + res = self.nodes[0].importmulti([{"desc": desc, "timestamp": "now"}])) + assert res[0]["success"]) + addr = self.nodes[0].deriveaddresses(desc)[0]) + addr_info = self.nodes[0].getaddressinfo(addr)) +) + self.nodes[0].sendtoaddress(addr, 10)) + self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10)) + self.generate(self.nodes[0], 6)) + ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]) +) + # An external input without solving data should result in an error) + assert_raises_rpc_error(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]), wallet.walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 15})) +) + # But funding should work when the solving data is provided) + psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"], addr_info["embedded"]["embedded"]["scriptPubKey"]]}})) + signed = wallet.walletprocesspsbt(psbt['psbt'])) + assert not signed['complete']) + signed = self.nodes[0].walletprocesspsbt(signed['psbt'])) + assert signed['complete']) +) + psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data":{"descriptors": [desc]}})) + signed = wallet.walletprocesspsbt(psbt['psbt'])) + assert not signed['complete']) + signed = self.nodes[0].walletprocesspsbt(signed['psbt'])) + assert signed['complete']) + final = signed['hex']) +) + dec = self.nodes[0].decodepsbt(signed["psbt"])) + for i, txin in enumerate(dec["tx"]["vin"]):) + if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:) + input_idx = i) + break) + psbt_in = dec["inputs"][input_idx]) + scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else "") + witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None) + input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex)) + low_input_weight = input_weight // 2) + high_input_weight = input_weight * 2) +) + # Input weight error conditions) + assert_raises_rpc_error() + -8,) + "Input weights should be specified in inputs rather than in options.",) + wallet.walletcreatefundedpsbt,) + inputs=[ext_utxo],) + outputs={self.nodes[0].getnewaddress(): 15},) + options={"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}) + )) +) + # Funding should also work if the input weight is provided) + psbt = wallet.walletcreatefundedpsbt() + inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],) + outputs={self.nodes[0].getnewaddress(): 15},) + add_inputs=True,) + )) + signed = wallet.walletprocesspsbt(psbt["psbt"])) + signed = self.nodes[0].walletprocesspsbt(signed["psbt"])) + final = signed["hex"]) + assert self.nodes[0].testmempoolaccept([final])[0]["allowed"]) + # Reducing the weight should have a lower fee) + psbt2 = wallet.walletcreatefundedpsbt() + inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}],) + outputs={self.nodes[0].getnewaddress(): 15},) + add_inputs=True,) + )) + assert_greater_than(psbt["fee"], psbt2["fee"])) + # Increasing the weight should have a higher fee) + psbt2 = wallet.walletcreatefundedpsbt() + inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],) + outputs={self.nodes[0].getnewaddress(): 15},) + add_inputs=True,) + )) + assert_greater_than(psbt2["fee"], psbt["fee"])) + # The provided weight should override the calculated weight when solving data is provided) + psbt3 = wallet.walletcreatefundedpsbt() + inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],) + outputs={self.nodes[0].getnewaddress(): 15},) + add_inputs=True, solving_data={"descriptors": [desc]},) + )) + assert_equal(psbt2["fee"], psbt3["fee"])) +) + # Import the external utxo descriptor so that we can sign for it from the test wallet) + if self.options.descriptors:) + res = wallet.importdescriptors([{"desc": desc, "timestamp": "now"}])) + else:) + res = wallet.importmulti([{"desc": desc, "timestamp": "now"}])) + assert res[0]["success"]) + # The provided weight should override the calculated weight for a wallet input) + psbt3 = wallet.walletcreatefundedpsbt() + inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],) + outputs={self.nodes[0].getnewaddress(): 15},) + add_inputs=True,) + )) + assert_equal(psbt2["fee"], psbt3["fee"])) +) + self.log.info("Test signing inputs that the wallet has keys for but is not watching the scripts")) + self.nodes[1].createwallet(wallet_name="scriptwatchonly", disable_private_keys=True)) + watchonly = self.nodes[1].get_wallet_rpc("scriptwatchonly")) +) + privkey, pubkey = generate_keypair(wif=True)) +) + desc = descsum_create("wsh(pkh({}))".format(pubkey.hex()))) + if self.options.descriptors:) + res = watchonly.importdescriptors([{"desc": desc, "timestamp": "now"}])) + else:) + res = watchonly.importmulti([{"desc": desc, "timestamp": "now"}])) + assert res[0]["success"]) + addr = self.nodes[0].deriveaddresses(desc)[0]) + self.nodes[0].sendtoaddress(addr, 10)) + self.generate(self.nodes[0], 1)) + self.nodes[0].importprivkey(privkey)) +) + psbt = watchonly.sendall([wallet.getnewaddress()])["psbt"]) + signed_tx = self.nodes[0].walletprocesspsbt(psbt)) + self.nodes[0].sendrawtransaction(signed_tx["hex"])) +) + # Same test but for taproot) + if self.options.descriptors:) + privkey, pubkey = generate_keypair(wif=True)) +) + desc = descsum_create("tr({},pk({}))".format(H_POINT, pubkey.hex()))) + res = watchonly.importdescriptors([{"desc": desc, "timestamp": "now"}])) + assert res[0]["success"]) + addr = self.nodes[0].deriveaddresses(desc)[0]) + self.nodes[0].sendtoaddress(addr, 10)) + self.generate(self.nodes[0], 1)) + self.nodes[0].importdescriptors([{"desc": descsum_create("tr({})".format(privkey)), "timestamp":"now"}])) +) + psbt = watchonly.sendall([wallet.getnewaddress(), addr])["psbt"]) + processed_psbt = self.nodes[0].walletprocesspsbt(psbt)) + txid = self.nodes[0].sendrawtransaction(processed_psbt["hex"])) + vout = find_vout_for_address(self.nodes[0], txid, addr)) +) + # Make sure tap tree is in psbt) + parsed_psbt = PSBT.from_base64(psbt)) + assert_greater_than(len(parsed_psbt.o[vout].map[PSBT_OUT_TAP_TREE]), 0)) + assert "taproot_tree" in self.nodes[0].decodepsbt(psbt)["outputs"][vout]) + parsed_psbt.make_blank()) + comb_psbt = self.nodes[0].combinepsbt([psbt, parsed_psbt.to_base64()])) + assert_equal(comb_psbt, psbt)) +) + self.log.info("Test that walletprocesspsbt both updates and signs a non-updated psbt containing Taproot inputs")) + addr = self.nodes[0].getnewaddress("", "bech32m")) + utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 1}])[0]) + psbt = self.nodes[0].createpsbt([utxo], [{self.nodes[0].getnewaddress(): 0.9999}])) + signed = self.nodes[0].walletprocesspsbt(psbt)) + rawtx = signed["hex"]) + self.nodes[0].sendrawtransaction(rawtx)) + self.generate(self.nodes[0], 1)) +) + # Make sure tap tree is not in psbt) + parsed_psbt = PSBT.from_base64(psbt)) + assert PSBT_OUT_TAP_TREE not in parsed_psbt.o[0].map) + assert "taproot_tree" not in self.nodes[0].decodepsbt(psbt)["outputs"][0]) + parsed_psbt.make_blank()) + comb_psbt = self.nodes[0].combinepsbt([psbt, parsed_psbt.to_base64()])) + assert_equal(comb_psbt, psbt)) +) + self.log.info("Test walletprocesspsbt raises if an invalid sighashtype is passed")) + assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[0].walletprocesspsbt, psbt, sighashtype="all")) +) + self.log.info("Test decoding PSBT with per-input preimage types")) + # note that the decodepsbt RPC doesn't check whether preimages and hashes match) + hash_ripemd160, preimage_ripemd160 = randbytes(20), randbytes(50)) + hash_sha256, preimage_sha256 = randbytes(32), randbytes(50)) + hash_hash160, preimage_hash160 = randbytes(20), randbytes(50)) + hash_hash256, preimage_hash256 = randbytes(32), randbytes(50)) +) + tx = CTransaction()) + tx.vin = [CTxIn(outpoint=COutPoint(hash=int('aa' * 32, 16), n=0), scriptSig=b""),) + CTxIn(outpoint=COutPoint(hash=int('bb' * 32, 16), n=0), scriptSig=b""),) + CTxIn(outpoint=COutPoint(hash=int('cc' * 32, 16), n=0), scriptSig=b""),) + CTxIn(outpoint=COutPoint(hash=int('dd' * 32, 16), n=0), scriptSig=b"")]) + tx.vout = [CTxOut(nValue=0, scriptPubKey=b"")]) + psbt = PSBT()) + psbt.g = PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()})) + psbt.i = [PSBTMap({bytes([PSBT_IN_RIPEMD160]) + hash_ripemd160: preimage_ripemd160}),) + PSBTMap({bytes([PSBT_IN_SHA256]) + hash_sha256: preimage_sha256}),) + PSBTMap({bytes([PSBT_IN_HASH160]) + hash_hash160: preimage_hash160}),) + PSBTMap({bytes([PSBT_IN_HASH256]) + hash_hash256: preimage_hash256})]) + psbt.o = [PSBTMap()]) + res_inputs = self.nodes[0].decodepsbt(psbt.to_base64())["inputs"]) + assert_equal(len(res_inputs), 4)) + preimage_keys = ["ripemd160_preimages", "sha256_preimages", "hash160_preimages", "hash256_preimages"]) + expected_hashes = [hash_ripemd160, hash_sha256, hash_hash160, hash_hash256]) + expected_preimages = [preimage_ripemd160, preimage_sha256, preimage_hash160, preimage_hash256]) + for res_input, preimage_key, hash, preimage in zip(res_inputs, preimage_keys, expected_hashes, expected_preimages):) + assert preimage_key in res_input) + assert_equal(len(res_input[preimage_key]), 1)) + assert hash.hex() in res_input[preimage_key]) + assert_equal(res_input[preimage_key][hash.hex()], preimage.hex())) +) + self.log.info("Test that combining PSBTs with different transactions fails")) + tx = CTransaction()) + tx.vin = [CTxIn(outpoint=COutPoint(hash=int('aa' * 32, 16), n=0), scriptSig=b"")]) + tx.vout = [CTxOut(nValue=0, scriptPubKey=b"")]) + psbt1 = PSBT(g=PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()}), i=[PSBTMap()], o=[PSBTMap()]).to_base64()) + tx.vout[0].nValue += 1 # slightly modify tx) + psbt2 = PSBT(g=PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()}), i=[PSBTMap()], o=[PSBTMap()]).to_base64()) + assert_raises_rpc_error(-8, "PSBTs not compatible (different transactions)", self.nodes[0].combinepsbt, [psbt1, psbt2])) + assert_equal(self.nodes[0].combinepsbt([psbt1, psbt1]), psbt1)) +) + self.log.info("Test that PSBT inputs are being checked via script execution")) + acs_prevout = CTxOut(nValue=0, scriptPubKey=CScript([OP_TRUE]))) + tx = CTransaction()) + tx.vin = [CTxIn(outpoint=COutPoint(hash=int('dd' * 32, 16), n=0), scriptSig=b"")]) + tx.vout = [CTxOut(nValue=0, scriptPubKey=b"")]) + psbt = PSBT()) + psbt.g = PSBTMap({PSBT_GLOBAL_UNSIGNED_TX: tx.serialize()})) + psbt.i = [PSBTMap({bytes([PSBT_IN_WITNESS_UTXO]) : acs_prevout.serialize()})]) + psbt.o = [PSBTMap()]) + assert_equal(self.nodes[0].finalizepsbt(psbt.to_base64()),) + {'hex': '0200000001dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd0000000000000000000100000000000000000000000000', 'complete': True})) +) + self.log.info("Test we don't crash when making a 0-value funded transaction at 0 fee without forcing an input selection")) + assert_raises_rpc_error(-4, "Transaction requires one destination of non-0 value, a non-0 feerate, or a pre-selected input", self.nodes[0].walletcreatefundedpsbt, [], [{"data": "deadbeef"}], 0, {"fee_rate": "0"})) +) + self.log.info("Test descriptorprocesspsbt updates and signs a psbt with descriptors")) +) + self.generate(self.nodes[2], 1)) +) + # Disable the wallet for node 2 since `descriptorprocesspsbt` does not use the wallet) + self.restart_node(2, extra_args=["-disablewallet"])) + self.connect_nodes(0, 2)) + self.connect_nodes(1, 2)) +) + key_info = get_generate_key()) + key = key_info.privkey) + address = key_info.p2wpkh_addr) +) + descriptor = descsum_create(f"wpkh({key})")) +) + utxo = self.create_outpoints(self.nodes[0], outputs=[{address: 1}])[0]) + self.sync_all()) +) + psbt = self.nodes[2].createpsbt([utxo], {self.nodes[0].getnewaddress(): 0.99999})) + decoded = self.nodes[2].decodepsbt(psbt)) + test_psbt_input_keys(decoded['inputs'][0], [])) +) + # Test that even if the wrong descriptor is given, `witness_utxo` and `non_witness_utxo`) + # are still added to the psbt) + alt_descriptor = descsum_create(f"wpkh({get_generate_key().privkey})")) + alt_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[alt_descriptor], sighashtype="ALL")["psbt"]) + decoded = self.nodes[2].decodepsbt(alt_psbt)) + test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo'])) +) + # Test that the psbt is not finalized and does not have bip32_derivs unless specified) + processed_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=True, finalize=False)) + decoded = self.nodes[2].decodepsbt(processed_psbt['psbt'])) + test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'partial_signatures', 'bip32_derivs'])) +) + # If psbt not finalized, test that result does not have hex) + assert "hex" not in processed_psbt) +) + processed_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=False, finalize=True)) + decoded = self.nodes[2].decodepsbt(processed_psbt['psbt'])) + test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'final_scriptwitness'])) +) + # Test psbt is complete) + assert_equal(processed_psbt['complete'], True)) +) + # Broadcast transaction) + self.nodes[2].sendrawtransaction(processed_psbt['hex'])) +) + self.log.info("Test descriptorprocesspsbt raises if an invalid sighashtype is passed")) + assert_raises_rpc_error(-8, "'all' is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all")) +) +) +if __name__ == '__main__':) + PSBTTest(__file__).main()) diff --git a/test/functional/test_framework/crypto/secp256k1.py b/test/functional/test_framework/crypto/secp256k1.py index 1b36406fb5678b..b409bb7c670aaf 100644 --- a/test/functional/test_framework/crypto/secp256k1.py +++ b/test/functional/test_framework/crypto/secp256k1.py @@ -1,355 +1,355 @@ -# Copyright (c) 2022-2023 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -"""Test-only implementation of low-level secp256k1 field and group arithmetic - -It is designed for ease of understanding, not performance. - -WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for -anything but tests. - -Exports: -* FE: class for secp256k1 field elements -* GE: class for secp256k1 group elements -* G: the secp256k1 generator point -""" - -import unittest -from hashlib import sha256 -from test_framework.util import assert_not_equal - -class FE: - """Objects of this class represent elements of the field GF(2**256 - 2**32 - 977). - - They are represented internally in numerator / denominator form, in order to delay inversions. - """ - - # The size of the field (also its modulus and characteristic). - SIZE = 2**256 - 2**32 - 977 - - def __init__(self, a=0, b=1): - """Initialize a field element a/b; both a and b can be ints or field elements.""" - if isinstance(a, FE): - num = a._num - den = a._den - else: - num = a % FE.SIZE - den = 1 - if isinstance(b, FE): - den = (den * b._num) % FE.SIZE - num = (num * b._den) % FE.SIZE - else: - den = (den * b) % FE.SIZE - assert den != 0 - if num == 0: - den = 1 - self._num = num - self._den = den - - def __add__(self, a): - """Compute the sum of two field elements (second may be int).""" - if isinstance(a, FE): - return FE(self._num * a._den + self._den * a._num, self._den * a._den) - return FE(self._num + self._den * a, self._den) - - def __radd__(self, a): - """Compute the sum of an integer and a field element.""" - return FE(a) + self - - def __sub__(self, a): - """Compute the difference of two field elements (second may be int).""" - if isinstance(a, FE): - return FE(self._num * a._den - self._den * a._num, self._den * a._den) - return FE(self._num - self._den * a, self._den) - - def __rsub__(self, a): - """Compute the difference of an integer and a field element.""" - return FE(a) - self - - def __mul__(self, a): - """Compute the product of two field elements (second may be int).""" - if isinstance(a, FE): - return FE(self._num * a._num, self._den * a._den) - return FE(self._num * a, self._den) - - def __rmul__(self, a): - """Compute the product of an integer with a field element.""" - return FE(a) * self - - def __truediv__(self, a): - """Compute the ratio of two field elements (second may be int).""" - return FE(self, a) - - def __pow__(self, a): - """Raise a field element to an integer power.""" - return FE(pow(self._num, a, FE.SIZE), pow(self._den, a, FE.SIZE)) - - def __neg__(self): - """Negate a field element.""" - return FE(-self._num, self._den) - - def __int__(self): - """Convert a field element to an integer in range 0..p-1. The result is cached.""" - if self._den != 1: - self._num = (self._num * pow(self._den, -1, FE.SIZE)) % FE.SIZE - self._den = 1 - return self._num - - def sqrt(self): - """Compute the square root of a field element if it exists (None otherwise). - - Due to the fact that our modulus is of the form (p % 4) == 3, the Tonelli-Shanks - algorithm (https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm) is simply - raising the argument to the power (p + 1) / 4. - - To see why: (p-1) % 2 = 0, so 2 divides the order of the multiplicative group, - and thus only half of the non-zero field elements are squares. An element a is - a (nonzero) square when Euler's criterion, a^((p-1)/2) = 1 (mod p), holds. We're - looking for x such that x^2 = a (mod p). Given a^((p-1)/2) = 1, that is equivalent - to x^2 = a^(1 + (p-1)/2) mod p. As (1 + (p-1)/2) is even, this is equivalent to - x = a^((1 + (p-1)/2)/2) mod p, or x = a^((p+1)/4) mod p.""" - v = int(self) - s = pow(v, (FE.SIZE + 1) // 4, FE.SIZE) - if s**2 % FE.SIZE == v: - return FE(s) - return None - - def is_square(self): - """Determine if this field element has a square root.""" - # A more efficient algorithm is possible here (Jacobi symbol). - return self.sqrt() is not None - - def is_even(self): - """Determine whether this field element, represented as integer in 0..p-1, is even.""" - return int(self) & 1 == 0 - - def __eq__(self, a): - """Check whether two field elements are equal (second may be an int).""" - if isinstance(a, FE): - return (self._num * a._den - self._den * a._num) % FE.SIZE == 0 - return (self._num - self._den * a) % FE.SIZE == 0 - - def to_bytes(self): - """Convert a field element to a 32-byte array (BE byte order).""" - return int(self).to_bytes(32, 'big') - - @staticmethod - def from_bytes(b): - """Convert a 32-byte array to a field element (BE byte order, no overflow allowed).""" - v = int.from_bytes(b, 'big') - if v >= FE.SIZE: - return None - return FE(v) - - def __str__(self): - """Convert this field element to a 64 character hex string.""" - return f"{int(self):064x}" - - def __repr__(self): - """Get a string representation of this field element.""" - return f"FE(0x{int(self):x})" - - -class GE: - """Objects of this class represent secp256k1 group elements (curve points or infinity) - - Normal points on the curve have fields: - * x: the x coordinate (a field element) - * y: the y coordinate (a field element, satisfying y^2 = x^3 + 7) - * infinity: False - - The point at infinity has field: - * infinity: True - """ - - # Order of the group (number of points on the curve, plus 1 for infinity) - ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 - - # Number of valid distinct x coordinates on the curve. - ORDER_HALF = ORDER // 2 - - def __init__(self, x=None, y=None): - """Initialize a group element with specified x and y coordinates, or infinity.""" - if x is None: - # Initialize as infinity. - assert y is None - self.infinity = True - else: - # Initialize as point on the curve (and check that it is). - fx = FE(x) - fy = FE(y) - assert fy**2 == fx**3 + 7 - self.infinity = False - self.x = fx - self.y = fy - - def __add__(self, a): - """Add two group elements together.""" - # Deal with infinity: a + infinity == infinity + a == a. - if self.infinity: - return a - if a.infinity: - return self - if self.x == a.x: - if self.y != a.y: - # A point added to its own negation is infinity. - assert self.y + a.y == 0 - return GE() - else: - # For identical inputs, use the tangent (doubling formula). - lam = (3 * self.x**2) / (2 * self.y) - else: - # For distinct inputs, use the line through both points (adding formula). - lam = (self.y - a.y) / (self.x - a.x) - # Determine point opposite to the intersection of that line with the curve. - x = lam**2 - (self.x + a.x) - y = lam * (self.x - x) - self.y - return GE(x, y) - - @staticmethod - def mul(*aps): - """Compute a (batch) scalar group element multiplication. - - GE.mul((a1, p1), (a2, p2), (a3, p3)) is identical to a1*p1 + a2*p2 + a3*p3, - but more efficient.""" - # Reduce all the scalars modulo order first (so we can deal with negatives etc). - naps = [(a % GE.ORDER, p) for a, p in aps] - # Start with point at infinity. - r = GE() - # Iterate over all bit positions, from high to low. - for i in range(255, -1, -1): - # Double what we have so far. - r = r + r - # Add then add the points for which the corresponding scalar bit is set. - for (a, p) in naps: - if (a >> i) & 1: - r += p - return r - - def __rmul__(self, a): - """Multiply an integer with a group element.""" - if self == G: - return FAST_G.mul(a) - return GE.mul((a, self)) - - def __neg__(self): - """Compute the negation of a group element.""" - if self.infinity: - return self - return GE(self.x, -self.y) - - def to_bytes_compressed(self): - """Convert a non-infinite group element to 33-byte compressed encoding.""" - assert not self.infinity - return bytes([3 - self.y.is_even()]) + self.x.to_bytes() - - def to_bytes_uncompressed(self): - """Convert a non-infinite group element to 65-byte uncompressed encoding.""" - assert not self.infinity - return b'\x04' + self.x.to_bytes() + self.y.to_bytes() - - def to_bytes_xonly(self): - """Convert (the x coordinate of) a non-infinite group element to 32-byte xonly encoding.""" - assert not self.infinity - return self.x.to_bytes() - - @staticmethod - def lift_x(x): - """Return group element with specified field element as x coordinate (and even y).""" - y = (FE(x)**3 + 7).sqrt() - if y is None: - return None - if not y.is_even(): - y = -y - return GE(x, y) - - @staticmethod - def from_bytes(b): - """Convert a compressed or uncompressed encoding to a group element.""" - assert len(b) in (33, 65) - if len(b) == 33: - if b[0] != 2 and b[0] != 3: - return None - x = FE.from_bytes(b[1:]) - if x is None: - return None - r = GE.lift_x(x) - if r is None: - return None - if b[0] == 3: - r = -r - return r - else: - if b[0] != 4: - return None - x = FE.from_bytes(b[1:33]) - y = FE.from_bytes(b[33:]) - if y**2 != x**3 + 7: - return None - return GE(x, y) - - @staticmethod - def from_bytes_xonly(b): - """Convert a point given in xonly encoding to a group element.""" - assert len(b) == 32 - x = FE.from_bytes(b) - if x is None: - return None - return GE.lift_x(x) - - @staticmethod - def is_valid_x(x): - """Determine whether the provided field element is a valid X coordinate.""" - return (FE(x)**3 + 7).is_square() - - def __str__(self): - """Convert this group element to a string.""" - if self.infinity: - return "(inf)" - return f"({self.x},{self.y})" - - def __repr__(self): - """Get a string representation for this group element.""" - if self.infinity: - return "GE()" - return f"GE(0x{int(self.x):x},0x{int(self.y):x})" - -# The secp256k1 generator point -G = GE.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798) - - -class FastGEMul: - """Table for fast multiplication with a constant group element. - - Speed up scalar multiplication with a fixed point P by using a precomputed lookup table with - its powers of 2: - - table = [P, 2*P, 4*P, (2^3)*P, (2^4)*P, ..., (2^255)*P] - - During multiplication, the points corresponding to each bit set in the scalar are added up, - i.e. on average ~128 point additions take place. - """ - - def __init__(self, p): - self.table = [p] # table[i] = (2^i) * p - for _ in range(255): - p = p + p - self.table.append(p) - - def mul(self, a): - result = GE() - a = a % GE.ORDER - for bit in range(a.bit_length()): - if a & (1 << bit): - result += self.table[bit] - return result - -# Precomputed table with multiples of G for fast multiplication -FAST_G = FastGEMul(G) - -class TestFrameworkSecp256k1(unittest.TestCase): - def test_H(self): - H = sha256(G.to_bytes_uncompressed()).digest() - assert GE.lift_x(FE.from_bytes(H)) is not None - self.assertEqual(H.hex(), "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0") +# Copyright (c) 2022-2023 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +) +"""Test-only implementation of low-level secp256k1 field and group arithmetic) +) +It is designed for ease of understanding, not performance.) +) +WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for) +anything but tests.) +) +Exports:) +* FE: class for secp256k1 field elements) +* GE: class for secp256k1 group elements) +* G: the secp256k1 generator point) +""") +) +import unittest) +from hashlib import sha256) +from test_framework.util import assert_not_equal) +) +class FE:) + """Objects of this class represent elements of the field GF(2**256 - 2**32 - 977).) +) + They are represented internally in numerator / denominator form, in order to delay inversions.) + """) +) + # The size of the field (also its modulus and characteristic).) + SIZE = 2**256 - 2**32 - 977) +) + def __init__(self, a=0, b=1):) + """Initialize a field element a/b; both a and b can be ints or field elements.""") + if isinstance(a, FE):) + num = a._num) + den = a._den) + else:) + num = a % FE.SIZE) + den = 1) + if isinstance(b, FE):) + den = (den * b._num) % FE.SIZE) + num = (num * b._den) % FE.SIZE) + else:) + den = (den * b) % FE.SIZE) + assert_not_equal(den, 0)) + if num == 0:) + den = 1) + self._num = num) + self._den = den) +) + def __add__(self, a):) + """Compute the sum of two field elements (second may be int).""") + if isinstance(a, FE):) + return FE(self._num * a._den + self._den * a._num, self._den * a._den)) + return FE(self._num + self._den * a, self._den)) +) + def __radd__(self, a):) + """Compute the sum of an integer and a field element.""") + return FE(a) + self) +) + def __sub__(self, a):) + """Compute the difference of two field elements (second may be int).""") + if isinstance(a, FE):) + return FE(self._num * a._den - self._den * a._num, self._den * a._den)) + return FE(self._num - self._den * a, self._den)) +) + def __rsub__(self, a):) + """Compute the difference of an integer and a field element.""") + return FE(a) - self) +) + def __mul__(self, a):) + """Compute the product of two field elements (second may be int).""") + if isinstance(a, FE):) + return FE(self._num * a._num, self._den * a._den)) + return FE(self._num * a, self._den)) +) + def __rmul__(self, a):) + """Compute the product of an integer with a field element.""") + return FE(a) * self) +) + def __truediv__(self, a):) + """Compute the ratio of two field elements (second may be int).""") + return FE(self, a)) +) + def __pow__(self, a):) + """Raise a field element to an integer power.""") + return FE(pow(self._num, a, FE.SIZE), pow(self._den, a, FE.SIZE))) +) + def __neg__(self):) + """Negate a field element.""") + return FE(-self._num, self._den)) +) + def __int__(self):) + """Convert a field element to an integer in range 0..p-1. The result is cached.""") + if self._den,1:) + self._num = (self._num * pow(self._den, -1, FE.SIZE)) % FE.SIZE) + self._den = 1) + return self._num) +) + def sqrt(self):) + """Compute the square root of a field element if it exists (None otherwise).) +) + Due to the fact that our modulus is of the form (p % 4) == 3, the Tonelli-Shanks) + algorithm (https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm) is simply) + raising the argument to the power (p + 1) / 4.) +) + To see why: (p-1) % 2 = 0, so 2 divides the order of the multiplicative group,) + and thus only half of the non-zero field elements are squares. An element a is) + a (nonzero) square when Euler's criterion, a^((p-1)/2) = 1 (mod p), holds. We're) + looking for x such that x^2 = a (mod p). Given a^((p-1)/2) = 1, that is equivalent) + to x^2 = a^(1 + (p-1)/2) mod p. As (1 + (p-1)/2) is even, this is equivalent to) + x = a^((1 + (p-1)/2)/2) mod p, or x = a^((p+1)/4) mod p.""") + v = int(self)) + s = pow(v, (FE.SIZE + 1) // 4, FE.SIZE)) + if s**2 % FE.SIZE == v:) + return FE(s)) + return None) +) + def is_square(self):) + """Determine if this field element has a square root.""") + # A more efficient algorithm is possible here (Jacobi symbol).) + return self.sqrt() is not None) +) + def is_even(self):) + """Determine whether this field element, represented as integer in 0..p-1, is even.""") + return int(self) & 1 == 0) +) + def __eq__(self, a):) + """Check whether two field elements are equal (second may be an int).""") + if isinstance(a, FE):) + return (self._num * a._den - self._den * a._num) % FE.SIZE == 0) + return (self._num - self._den * a) % FE.SIZE == 0) +) + def to_bytes(self):) + """Convert a field element to a 32-byte array (BE byte order).""") + return int(self).to_bytes(32, 'big')) +) + @staticmethod) + def from_bytes(b):) + """Convert a 32-byte array to a field element (BE byte order, no overflow allowed).""") + v = int.from_bytes(b, 'big')) + if v >= FE.SIZE:) + return None) + return FE(v)) +) + def __str__(self):) + """Convert this field element to a 64 character hex string.""") + return f"{int(self):064x}") +) + def __repr__(self):) + """Get a string representation of this field element.""") + return f"FE(0x{int(self):x})") +) +) +class GE:) + """Objects of this class represent secp256k1 group elements (curve points or infinity)) +) + Normal points on the curve have fields:) + * x: the x coordinate (a field element)) + * y: the y coordinate (a field element, satisfying y^2 = x^3 + 7)) + * infinity: False) +) + The point at infinity has field:) + * infinity: True) + """) +) + # Order of the group (number of points on the curve, plus 1 for infinity)) + ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141) +) + # Number of valid distinct x coordinates on the curve.) + ORDER_HALF = ORDER // 2) +) + def __init__(self, x=None, y=None):) + """Initialize a group element with specified x and y coordinates, or infinity.""") + if x is None:) + # Initialize as infinity.) + assert y is None) + self.infinity = True) + else:) + # Initialize as point on the curve (and check that it is).) + fx = FE(x)) + fy = FE(y)) + assert fy**2 == fx**3 + 7) + self.infinity = False) + self.x = fx) + self.y = fy) +) + def __add__(self, a):) + """Add two group elements together.""") + # Deal with infinity: a + infinity == infinity + a == a.) + if self.infinity:) + return a) + if a.infinity:) + return self) + if self.x == a.x:) + if self.y,a.y:) + # A point added to its own negation is infinity.) + assert self.y + a.y == 0) + return GE()) + else:) + # For identical inputs, use the tangent (doubling formula).) + lam = (3 * self.x**2) / (2 * self.y)) + else:) + # For distinct inputs, use the line through both points (adding formula).) + lam = (self.y - a.y) / (self.x - a.x)) + # Determine point opposite to the intersection of that line with the curve.) + x = lam**2 - (self.x + a.x)) + y = lam * (self.x - x) - self.y) + return GE(x, y)) +) + @staticmethod) + def mul(*aps):) + """Compute a (batch) scalar group element multiplication.) +) + GE.mul((a1, p1), (a2, p2), (a3, p3)) is identical to a1*p1 + a2*p2 + a3*p3,) + but more efficient.""") + # Reduce all the scalars modulo order first (so we can deal with negatives etc).) + naps = [(a % GE.ORDER, p) for a, p in aps]) + # Start with point at infinity.) + r = GE()) + # Iterate over all bit positions, from high to low.) + for i in range(255, -1, -1):) + # Double what we have so far.) + r = r + r) + # Add then add the points for which the corresponding scalar bit is set.) + for (a, p) in naps:) + if (a >> i) & 1:) + r += p) + return r) +) + def __rmul__(self, a):) + """Multiply an integer with a group element.""") + if self == G:) + return FAST_G.mul(a)) + return GE.mul((a, self))) +) + def __neg__(self):) + """Compute the negation of a group element.""") + if self.infinity:) + return self) + return GE(self.x, -self.y)) +) + def to_bytes_compressed(self):) + """Convert a non-infinite group element to 33-byte compressed encoding.""") + assert not self.infinity) + return bytes([3 - self.y.is_even()]) + self.x.to_bytes()) +) + def to_bytes_uncompressed(self):) + """Convert a non-infinite group element to 65-byte uncompressed encoding.""") + assert not self.infinity) + return b'\x04' + self.x.to_bytes() + self.y.to_bytes()) +) + def to_bytes_xonly(self):) + """Convert (the x coordinate of) a non-infinite group element to 32-byte xonly encoding.""") + assert not self.infinity) + return self.x.to_bytes()) +) + @staticmethod) + def lift_x(x):) + """Return group element with specified field element as x coordinate (and even y).""") + y = (FE(x)**3 + 7).sqrt()) + if y is None:) + return None) + if not y.is_even():) + y = -y) + return GE(x, y)) +) + @staticmethod) + def from_bytes(b):) + """Convert a compressed or uncompressed encoding to a group element.""") + assert len(b) in (33, 65)) + if len(b) == 33:) + if b[0],2 and b[0] != 3:) + return None) + x = FE.from_bytes(b[1:])) + if x is None:) + return None) + r = GE.lift_x(x)) + if r is None:) + return None) + if b[0] == 3:) + r = -r) + return r) + else:) + if b[0],4:) + return None) + x = FE.from_bytes(b[1:33])) + y = FE.from_bytes(b[33:])) + if y**2,x**3 + 7:) + return None) + return GE(x, y)) +) + @staticmethod) + def from_bytes_xonly(b):) + """Convert a point given in xonly encoding to a group element.""") + assert len(b) == 32) + x = FE.from_bytes(b)) + if x is None:) + return None) + return GE.lift_x(x)) +) + @staticmethod) + def is_valid_x(x):) + """Determine whether the provided field element is a valid X coordinate.""") + return (FE(x)**3 + 7).is_square()) +) + def __str__(self):) + """Convert this group element to a string.""") + if self.infinity:) + return "(inf)") + return f"({self.x},{self.y})") +) + def __repr__(self):) + """Get a string representation for this group element.""") + if self.infinity:) + return "GE()") + return f"GE(0x{int(self.x):x},0x{int(self.y):x})") +) +# The secp256k1 generator point) +G = GE.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798)) +) +) +class FastGEMul:) + """Table for fast multiplication with a constant group element.) +) + Speed up scalar multiplication with a fixed point P by using a precomputed lookup table with) + its powers of 2:) +) + table = [P, 2*P, 4*P, (2^3)*P, (2^4)*P, ..., (2^255)*P]) +) + During multiplication, the points corresponding to each bit set in the scalar are added up,) + i.e. on average ~128 point additions take place.) + """) +) + def __init__(self, p):) + self.table = [p] # table[i] = (2^i) * p) + for _ in range(255):) + p = p + p) + self.table.append(p)) +) + def mul(self, a):) + result = GE()) + a = a % GE.ORDER) + for bit in range(a.bit_length()):) + if a & (1 << bit):) + result += self.table[bit]) + return result) +) +# Precomputed table with multiples of G for fast multiplication) +FAST_G = FastGEMul(G)) +) +class TestFrameworkSecp256k1(unittest.TestCase):) + def test_H(self):) + H = sha256(G.to_bytes_uncompressed()).digest()) + assert GE.lift_x(FE.from_bytes(H)) is not None) + self.assertEqual(H.hex(), "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0")) diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index ec37a315f8928c..05f264bae29c30 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,347 +1,347 @@ -# Copyright (c) 2019-2020 Pieter Wuille -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test-only secp256k1 elliptic curve protocols implementation - -WARNING: This code is slow, uses bad randomness, does not properly protect -keys, and is trivially vulnerable to side channel attacks. Do not use for -anything but tests.""" -import csv -import hashlib -import hmac -import os -import random -import unittest - -from test_framework.crypto import secp256k1 -from test_framework.util import assert_not_equal, random_bitflip - -# Point with no known discrete log. -H_POINT = "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0" - -# Order of the secp256k1 curve -ORDER = secp256k1.GE.ORDER - -def TaggedHash(tag, data): - ss = hashlib.sha256(tag.encode('utf-8')).digest() - ss += ss - ss += data - return hashlib.sha256(ss).digest() - - -class ECPubKey: - """A secp256k1 public key""" - - def __init__(self): - """Construct an uninitialized public key""" - self.p = None - - def set(self, data): - """Construct a public key from a serialization in compressed or uncompressed format""" - self.p = secp256k1.GE.from_bytes(data) - self.compressed = len(data) == 33 - - @property - def is_compressed(self): - return self.compressed - - @property - def is_valid(self): - return self.p is not None - - def get_bytes(self): - assert self.is_valid - if self.compressed: - return self.p.to_bytes_compressed() - else: - return self.p.to_bytes_uncompressed() - - def verify_ecdsa(self, sig, msg, low_s=True): - """Verify a strictly DER-encoded ECDSA signature against this pubkey. - - See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the - ECDSA verifier algorithm""" - assert self.is_valid - - # Extract r and s from the DER formatted signature. Return false for - # any DER encoding errors. - if (sig[1] + 2 != len(sig)): - return False - if (len(sig) < 4): - return False - if (sig[0] != 0x30): - return False - if (sig[2] != 0x02): - return False - rlen = sig[3] - if (len(sig) < 6 + rlen): - return False - if rlen < 1 or rlen > 33: - return False - if sig[4] >= 0x80: - return False - if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): - return False - r = int.from_bytes(sig[4:4+rlen], 'big') - if (sig[4+rlen] != 0x02): - return False - slen = sig[5+rlen] - if slen < 1 or slen > 33: - return False - if (len(sig) != 6 + rlen + slen): - return False - if sig[6+rlen] >= 0x80: - return False - if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)): - return False - s = int.from_bytes(sig[6+rlen:6+rlen+slen], 'big') - - # Verify that r and s are within the group order - if r < 1 or s < 1 or r >= ORDER or s >= ORDER: - return False - if low_s and s >= secp256k1.GE.ORDER_HALF: - return False - z = int.from_bytes(msg, 'big') - - # Run verifier algorithm on r, s - w = pow(s, -1, ORDER) - R = secp256k1.GE.mul((z * w, secp256k1.G), (r * w, self.p)) - if R.infinity or (int(R.x) % ORDER) != r: - return False - return True - -def generate_privkey(): - """Generate a valid random 32-byte private key.""" - return random.randrange(1, ORDER).to_bytes(32, 'big') - -def rfc6979_nonce(key): - """Compute signing nonce using RFC6979.""" - v = bytes([1] * 32) - k = bytes([0] * 32) - k = hmac.new(k, v + b"\x00" + key, 'sha256').digest() - v = hmac.new(k, v, 'sha256').digest() - k = hmac.new(k, v + b"\x01" + key, 'sha256').digest() - v = hmac.new(k, v, 'sha256').digest() - return hmac.new(k, v, 'sha256').digest() - -class ECKey: - """A secp256k1 private key""" - - def __init__(self): - self.valid = False - - def set(self, secret, compressed): - """Construct a private key object with given 32-byte secret and compressed flag.""" - assert len(secret) == 32 - secret = int.from_bytes(secret, 'big') - self.valid = (secret > 0 and secret < ORDER) - if self.valid: - self.secret = secret - self.compressed = compressed - - def generate(self, compressed=True): - """Generate a random private key (compressed or uncompressed).""" - self.set(generate_privkey(), compressed) - - def get_bytes(self): - """Retrieve the 32-byte representation of this key.""" - assert self.valid - return self.secret.to_bytes(32, 'big') - - @property - def is_valid(self): - return self.valid - - @property - def is_compressed(self): - return self.compressed - - def get_pubkey(self): - """Compute an ECPubKey object for this secret key.""" - assert self.valid - ret = ECPubKey() - ret.p = self.secret * secp256k1.G - ret.compressed = self.compressed - return ret - - def sign_ecdsa(self, msg, low_s=True, rfc6979=False): - """Construct a DER-encoded ECDSA signature with this key. - - See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the - ECDSA signer algorithm.""" - assert self.valid - z = int.from_bytes(msg, 'big') - # Note: no RFC6979 by default, but a simple random nonce (some tests rely on distinct transactions for the same operation) - if rfc6979: - k = int.from_bytes(rfc6979_nonce(self.secret.to_bytes(32, 'big') + msg), 'big') - else: - k = random.randrange(1, ORDER) - R = k * secp256k1.G - r = int(R.x) % ORDER - s = (pow(k, -1, ORDER) * (z + self.secret * r)) % ORDER - if low_s and s > secp256k1.GE.ORDER_HALF: - s = ORDER - s - # Represent in DER format. The byte representations of r and s have - # length rounded up (255 bits becomes 32 bytes and 256 bits becomes 33 - # bytes). - rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') - sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') - return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb - -def compute_xonly_pubkey(key): - """Compute an x-only (32 byte) public key from a (32 byte) private key. - - This also returns whether the resulting public key was negated. - """ - - assert len(key) == 32 - x = int.from_bytes(key, 'big') - if x == 0 or x >= ORDER: - return (None, None) - P = x * secp256k1.G - return (P.to_bytes_xonly(), not P.y.is_even()) - -def tweak_add_privkey(key, tweak): - """Tweak a private key (after negating it if needed).""" - - assert len(key) == 32 - assert len(tweak) == 32 - - x = int.from_bytes(key, 'big') - if x == 0 or x >= ORDER: - return None - if not (x * secp256k1.G).y.is_even(): - x = ORDER - x - t = int.from_bytes(tweak, 'big') - if t >= ORDER: - return None - x = (x + t) % ORDER - if x == 0: - return None - return x.to_bytes(32, 'big') - -def tweak_add_pubkey(key, tweak): - """Tweak a public key and return whether the result had to be negated.""" - - assert len(key) == 32 - assert len(tweak) == 32 - - P = secp256k1.GE.from_bytes_xonly(key) - if P is None: - return None - t = int.from_bytes(tweak, 'big') - if t >= ORDER: - return None - Q = t * secp256k1.G + P - if Q.infinity: - return None - return (Q.to_bytes_xonly(), not Q.y.is_even()) - -def verify_schnorr(key, sig, msg): - """Verify a Schnorr signature (see BIP 340). - - - key is a 32-byte xonly pubkey (computed using compute_xonly_pubkey). - - sig is a 64-byte Schnorr signature - - msg is a 32-byte message - """ - assert len(key) == 32 - assert len(msg) == 32 - assert len(sig) == 64 - - P = secp256k1.GE.from_bytes_xonly(key) - if P is None: - return False - r = int.from_bytes(sig[0:32], 'big') - if r >= secp256k1.FE.SIZE: - return False - s = int.from_bytes(sig[32:64], 'big') - if s >= ORDER: - return False - e = int.from_bytes(TaggedHash("BIP0340/challenge", sig[0:32] + key + msg), 'big') % ORDER - R = secp256k1.GE.mul((s, secp256k1.G), (-e, P)) - if R.infinity or not R.y.is_even(): - return False - if r != R.x: - return False - return True - -def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): - """Create a Schnorr signature (see BIP 340).""" - - if aux is None: - aux = bytes(32) - - assert len(key) == 32 - assert len(msg) == 32 - assert len(aux) == 32 - - sec = int.from_bytes(key, 'big') - if sec == 0 or sec >= ORDER: - return None - P = sec * secp256k1.G - if P.y.is_even() == flip_p: - sec = ORDER - sec - t = (sec ^ int.from_bytes(TaggedHash("BIP0340/aux", aux), 'big')).to_bytes(32, 'big') - kp = int.from_bytes(TaggedHash("BIP0340/nonce", t + P.to_bytes_xonly() + msg), 'big') % ORDER - assert kp != 0 - R = kp * secp256k1.G - k = kp if R.y.is_even() != flip_r else ORDER - kp - e = int.from_bytes(TaggedHash("BIP0340/challenge", R.to_bytes_xonly() + P.to_bytes_xonly() + msg), 'big') % ORDER - return R.to_bytes_xonly() + ((k + e * sec) % ORDER).to_bytes(32, 'big') - - -class TestFrameworkKey(unittest.TestCase): - def test_ecdsa_and_schnorr(self): - """Test the Python ECDSA and Schnorr implementations.""" - byte_arrays = [generate_privkey() for _ in range(3)] + [v.to_bytes(32, 'big') for v in [0, ORDER - 1, ORDER, 2**256 - 1]] - keys = {} - for privkey_bytes in byte_arrays: # build array of key/pubkey pairs - privkey = ECKey() - privkey.set(privkey_bytes, compressed=True) - if privkey.is_valid: - keys[privkey] = privkey.get_pubkey() - for msg in byte_arrays: # test every combination of message, signing key, verification key - for sign_privkey, _ in keys.items(): - sig_ecdsa = sign_privkey.sign_ecdsa(msg) - sig_schnorr = sign_schnorr(sign_privkey.get_bytes(), msg) - for verify_privkey, verify_pubkey in keys.items(): - verify_xonly_pubkey = verify_pubkey.get_bytes()[1:] - if verify_privkey == sign_privkey: - self.assertTrue(verify_pubkey.verify_ecdsa(sig_ecdsa, msg)) - self.assertTrue(verify_schnorr(verify_xonly_pubkey, sig_schnorr, msg)) - sig_ecdsa = random_bitflip(sig_ecdsa) # damaging signature should break things - sig_schnorr = random_bitflip(sig_schnorr) - self.assertFalse(verify_pubkey.verify_ecdsa(sig_ecdsa, msg)) - self.assertFalse(verify_schnorr(verify_xonly_pubkey, sig_schnorr, msg)) - - def test_schnorr_testvectors(self): - """Implement the BIP340 test vectors (read from bip340_test_vectors.csv).""" - num_tests = 0 - vectors_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bip340_test_vectors.csv') - with open(vectors_file, newline='', encoding='utf8') as csvfile: - reader = csv.reader(csvfile) - next(reader) - for row in reader: - (i_str, seckey_hex, pubkey_hex, aux_rand_hex, msg_hex, sig_hex, result_str, comment) = row - i = int(i_str) - pubkey = bytes.fromhex(pubkey_hex) - msg = bytes.fromhex(msg_hex) - sig = bytes.fromhex(sig_hex) - result = result_str == 'TRUE' - if seckey_hex != '': - seckey = bytes.fromhex(seckey_hex) - pubkey_actual = compute_xonly_pubkey(seckey)[0] - self.assertEqual(pubkey.hex(), pubkey_actual.hex(), "BIP340 test vector %i (%s): pubkey mismatch" % (i, comment)) - aux_rand = bytes.fromhex(aux_rand_hex) - try: - sig_actual = sign_schnorr(seckey, msg, aux_rand) - self.assertEqual(sig.hex(), sig_actual.hex(), "BIP340 test vector %i (%s): sig mismatch" % (i, comment)) - except RuntimeError as e: - self.fail("BIP340 test vector %i (%s): signing raised exception %s" % (i, comment, e)) - result_actual = verify_schnorr(pubkey, sig, msg) - if result: - self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification failed" % (i, comment)) - else: - self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification succeeded unexpectedly" % (i, comment)) - num_tests += 1 - self.assertTrue(num_tests >= 15) # expect at least 15 test vectors +# Copyright (c) 2019-2020 Pieter Wuille) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test-only secp256k1 elliptic curve protocols implementation) +) +WARNING: This code is slow, uses bad randomness, does not properly protect) +keys, and is trivially vulnerable to side channel attacks. Do not use for) +anything but tests.""") +import csv) +import hashlib) +import hmac) +import os) +import random) +import unittest) +) +from test_framework.crypto import secp256k1) +from test_framework.util import assert_not_equal, random_bitflip) +) +# Point with no known discrete log.) +H_POINT = "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0") +) +# Order of the secp256k1 curve) +ORDER = secp256k1.GE.ORDER) +) +def TaggedHash(tag, data):) + ss = hashlib.sha256(tag.encode('utf-8')).digest()) + ss += ss) + ss += data) + return hashlib.sha256(ss).digest()) +) +) +class ECPubKey:) + """A secp256k1 public key""") +) + def __init__(self):) + """Construct an uninitialized public key""") + self.p = None) +) + def set(self, data):) + """Construct a public key from a serialization in compressed or uncompressed format""") + self.p = secp256k1.GE.from_bytes(data)) + self.compressed = len(data) == 33) +) + @property) + def is_compressed(self):) + return self.compressed) +) + @property) + def is_valid(self):) + return self.p is not None) +) + def get_bytes(self):) + assert self.is_valid) + if self.compressed:) + return self.p.to_bytes_compressed()) + else:) + return self.p.to_bytes_uncompressed()) +) + def verify_ecdsa(self, sig, msg, low_s=True):) + """Verify a strictly DER-encoded ECDSA signature against this pubkey.) +) + See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the) + ECDSA verifier algorithm""") + assert self.is_valid) +) + # Extract r and s from the DER formatted signature. Return false for) + # any DER encoding errors.) + if (sig[1] + 2,len(sig)):) + return False) + if (len(sig) < 4):) + return False) + if (sig[0],0x30):) + return False) + if (sig[2],0x02):) + return False) + rlen = sig[3]) + if (len(sig) < 6 + rlen):) + return False) + if rlen < 1 or rlen > 33:) + return False) + if sig[4] >= 0x80:) + return False) + if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)):) + return False) + r = int.from_bytes(sig[4:4+rlen], 'big')) + if (sig[4+rlen],0x02):) + return False) + slen = sig[5+rlen]) + if slen < 1 or slen > 33:) + return False) + if (len(sig),6 + rlen + slen):) + return False) + if sig[6+rlen] >= 0x80:) + return False) + if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)):) + return False) + s = int.from_bytes(sig[6+rlen:6+rlen+slen], 'big')) +) + # Verify that r and s are within the group order) + if r < 1 or s < 1 or r >= ORDER or s >= ORDER:) + return False) + if low_s and s >= secp256k1.GE.ORDER_HALF:) + return False) + z = int.from_bytes(msg, 'big')) +) + # Run verifier algorithm on r, s) + w = pow(s, -1, ORDER)) + R = secp256k1.GE.mul((z * w, secp256k1.G), (r * w, self.p))) + if R.infinity or (int(R.x) % ORDER),r:) + return False) + return True) +) +def generate_privkey():) + """Generate a valid random 32-byte private key.""") + return random.randrange(1, ORDER).to_bytes(32, 'big')) +) +def rfc6979_nonce(key):) + """Compute signing nonce using RFC6979.""") + v = bytes([1] * 32)) + k = bytes([0] * 32)) + k = hmac.new(k, v + b"\x00" + key, 'sha256').digest()) + v = hmac.new(k, v, 'sha256').digest()) + k = hmac.new(k, v + b"\x01" + key, 'sha256').digest()) + v = hmac.new(k, v, 'sha256').digest()) + return hmac.new(k, v, 'sha256').digest()) +) +class ECKey:) + """A secp256k1 private key""") +) + def __init__(self):) + self.valid = False) +) + def set(self, secret, compressed):) + """Construct a private key object with given 32-byte secret and compressed flag.""") + assert len(secret) == 32) + secret = int.from_bytes(secret, 'big')) + self.valid = (secret > 0 and secret < ORDER)) + if self.valid:) + self.secret = secret) + self.compressed = compressed) +) + def generate(self, compressed=True):) + """Generate a random private key (compressed or uncompressed).""") + self.set(generate_privkey(), compressed)) +) + def get_bytes(self):) + """Retrieve the 32-byte representation of this key.""") + assert self.valid) + return self.secret.to_bytes(32, 'big')) +) + @property) + def is_valid(self):) + return self.valid) +) + @property) + def is_compressed(self):) + return self.compressed) +) + def get_pubkey(self):) + """Compute an ECPubKey object for this secret key.""") + assert self.valid) + ret = ECPubKey()) + ret.p = self.secret * secp256k1.G) + ret.compressed = self.compressed) + return ret) +) + def sign_ecdsa(self, msg, low_s=True, rfc6979=False):) + """Construct a DER-encoded ECDSA signature with this key.) +) + See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the) + ECDSA signer algorithm.""") + assert self.valid) + z = int.from_bytes(msg, 'big')) + # Note: no RFC6979 by default, but a simple random nonce (some tests rely on distinct transactions for the same operation)) + if rfc6979:) + k = int.from_bytes(rfc6979_nonce(self.secret.to_bytes(32, 'big') + msg), 'big')) + else:) + k = random.randrange(1, ORDER)) + R = k * secp256k1.G) + r = int(R.x) % ORDER) + s = (pow(k, -1, ORDER) * (z + self.secret * r)) % ORDER) + if low_s and s > secp256k1.GE.ORDER_HALF:) + s = ORDER - s) + # Represent in DER format. The byte representations of r and s have) + # length rounded up (255 bits becomes 32 bytes and 256 bits becomes 33) + # bytes).) + rb = r.to_bytes((r.bit_length() + 8) // 8, 'big')) + sb = s.to_bytes((s.bit_length() + 8) // 8, 'big')) + return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb) +) +def compute_xonly_pubkey(key):) + """Compute an x-only (32 byte) public key from a (32 byte) private key.) +) + This also returns whether the resulting public key was negated.) + """) +) + assert len(key) == 32) + x = int.from_bytes(key, 'big')) + if x == 0 or x >= ORDER:) + return (None, None)) + P = x * secp256k1.G) + return (P.to_bytes_xonly(), not P.y.is_even())) +) +def tweak_add_privkey(key, tweak):) + """Tweak a private key (after negating it if needed).""") +) + assert len(key) == 32) + assert len(tweak) == 32) +) + x = int.from_bytes(key, 'big')) + if x == 0 or x >= ORDER:) + return None) + if not (x * secp256k1.G).y.is_even():) + x = ORDER - x) + t = int.from_bytes(tweak, 'big')) + if t >= ORDER:) + return None) + x = (x + t) % ORDER) + if x == 0:) + return None) + return x.to_bytes(32, 'big')) +) +def tweak_add_pubkey(key, tweak):) + """Tweak a public key and return whether the result had to be negated.""") +) + assert len(key) == 32) + assert len(tweak) == 32) +) + P = secp256k1.GE.from_bytes_xonly(key)) + if P is None:) + return None) + t = int.from_bytes(tweak, 'big')) + if t >= ORDER:) + return None) + Q = t * secp256k1.G + P) + if Q.infinity:) + return None) + return (Q.to_bytes_xonly(), not Q.y.is_even())) +) +def verify_schnorr(key, sig, msg):) + """Verify a Schnorr signature (see BIP 340).) +) + - key is a 32-byte xonly pubkey (computed using compute_xonly_pubkey).) + - sig is a 64-byte Schnorr signature) + - msg is a 32-byte message) + """) + assert len(key) == 32) + assert len(msg) == 32) + assert len(sig) == 64) +) + P = secp256k1.GE.from_bytes_xonly(key)) + if P is None:) + return False) + r = int.from_bytes(sig[0:32], 'big')) + if r >= secp256k1.FE.SIZE:) + return False) + s = int.from_bytes(sig[32:64], 'big')) + if s >= ORDER:) + return False) + e = int.from_bytes(TaggedHash("BIP0340/challenge", sig[0:32] + key + msg), 'big') % ORDER) + R = secp256k1.GE.mul((s, secp256k1.G), (-e, P))) + if R.infinity or not R.y.is_even():) + return False) + if r,R.x:) + return False) + return True) +) +def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False):) + """Create a Schnorr signature (see BIP 340).""") +) + if aux is None:) + aux = bytes(32)) +) + assert len(key) == 32) + assert len(msg) == 32) + assert len(aux) == 32) +) + sec = int.from_bytes(key, 'big')) + if sec == 0 or sec >= ORDER:) + return None) + P = sec * secp256k1.G) + if P.y.is_even() == flip_p:) + sec = ORDER - sec) + t = (sec ^ int.from_bytes(TaggedHash("BIP0340/aux", aux), 'big')).to_bytes(32, 'big')) + kp = int.from_bytes(TaggedHash("BIP0340/nonce", t + P.to_bytes_xonly() + msg), 'big') % ORDER) + assert_not_equal(kp, 0)) + R = kp * secp256k1.G) + k = kp if R.y.is_even(),flip_r else ORDER - kp) + e = int.from_bytes(TaggedHash("BIP0340/challenge", R.to_bytes_xonly() + P.to_bytes_xonly() + msg), 'big') % ORDER) + return R.to_bytes_xonly() + ((k + e * sec) % ORDER).to_bytes(32, 'big')) +) +) +class TestFrameworkKey(unittest.TestCase):) + def test_ecdsa_and_schnorr(self):) + """Test the Python ECDSA and Schnorr implementations.""") + byte_arrays = [generate_privkey() for _ in range(3)] + [v.to_bytes(32, 'big') for v in [0, ORDER - 1, ORDER, 2**256 - 1]]) + keys = {}) + for privkey_bytes in byte_arrays: # build array of key/pubkey pairs) + privkey = ECKey()) + privkey.set(privkey_bytes, compressed=True)) + if privkey.is_valid:) + keys[privkey] = privkey.get_pubkey()) + for msg in byte_arrays: # test every combination of message, signing key, verification key) + for sign_privkey, _ in keys.items():) + sig_ecdsa = sign_privkey.sign_ecdsa(msg)) + sig_schnorr = sign_schnorr(sign_privkey.get_bytes(), msg)) + for verify_privkey, verify_pubkey in keys.items():) + verify_xonly_pubkey = verify_pubkey.get_bytes()[1:]) + if verify_privkey == sign_privkey:) + self.assertTrue(verify_pubkey.verify_ecdsa(sig_ecdsa, msg))) + self.assertTrue(verify_schnorr(verify_xonly_pubkey, sig_schnorr, msg))) + sig_ecdsa = random_bitflip(sig_ecdsa) # damaging signature should break things) + sig_schnorr = random_bitflip(sig_schnorr)) + self.assertFalse(verify_pubkey.verify_ecdsa(sig_ecdsa, msg))) + self.assertFalse(verify_schnorr(verify_xonly_pubkey, sig_schnorr, msg))) +) + def test_schnorr_testvectors(self):) + """Implement the BIP340 test vectors (read from bip340_test_vectors.csv).""") + num_tests = 0) + vectors_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bip340_test_vectors.csv')) + with open(vectors_file, newline='', encoding='utf8') as csvfile:) + reader = csv.reader(csvfile)) + next(reader)) + for row in reader:) + (i_str, seckey_hex, pubkey_hex, aux_rand_hex, msg_hex, sig_hex, result_str, comment) = row) + i = int(i_str)) + pubkey = bytes.fromhex(pubkey_hex)) + msg = bytes.fromhex(msg_hex)) + sig = bytes.fromhex(sig_hex)) + result = result_str == 'TRUE') + if seckey_hex,'':) + seckey = bytes.fromhex(seckey_hex)) + pubkey_actual = compute_xonly_pubkey(seckey)[0]) + self.assertEqual(pubkey.hex(), pubkey_actual.hex(), "BIP340 test vector %i (%s): pubkey mismatch" % (i, comment))) + aux_rand = bytes.fromhex(aux_rand_hex)) + try:) + sig_actual = sign_schnorr(seckey, msg, aux_rand)) + self.assertEqual(sig.hex(), sig_actual.hex(), "BIP340 test vector %i (%s): sig mismatch" % (i, comment))) + except RuntimeError as e:) + self.fail("BIP340 test vector %i (%s): signing raised exception %s" % (i, comment, e))) + result_actual = verify_schnorr(pubkey, sig, msg)) + if result:) + self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification failed" % (i, comment))) + else:) + self.assertEqual(result, result_actual, "BIP340 test vector %i (%s): verification succeeded unexpectedly" % (i, comment))) + num_tests += 1) + self.assertTrue(num_tests >= 15) # expect at least 15 test vectors) diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 9b4e81f0d0ba0f..a77f59cd4ca609 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -1,955 +1,955 @@ -#!/usr/bin/env python3 -# Copyright (c) 2010 ArtForz -- public domain half-a-node -# Copyright (c) 2012 Jeff Garzik -# Copyright (c) 2010-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test objects for interacting with a bitcoind node over the p2p protocol. - -The P2PInterface objects interact with the bitcoind nodes under test using the -node's p2p interface. They can be used to send messages to the node, and -callbacks can be registered that execute when messages are received from the -node. Messages are sent to/received from the node on an asyncio event loop. -State held inside the objects must be guarded by the p2p_lock to avoid data -races between the main testing thread and the event loop. - -P2PConnection: A low-level connection object to a node's P2P interface -P2PInterface: A high-level interface object for communicating to a node over P2P -P2PDataStore: A p2p interface class that keeps a store of transactions and blocks - and can respond correctly to getdata and getheaders messages -P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps - a count of how many times each txid has been announced.""" - -import asyncio -from collections import defaultdict -from io import BytesIO -import logging -import platform -import struct -import sys -import threading - -from test_framework.messages import ( - CBlockHeader, - MAX_HEADERS_RESULTS, - msg_addr, - msg_addrv2, - msg_block, - MSG_BLOCK, - msg_blocktxn, - msg_cfcheckpt, - msg_cfheaders, - msg_cfilter, - msg_cmpctblock, - msg_feefilter, - msg_filteradd, - msg_filterclear, - msg_filterload, - msg_getaddr, - msg_getblocks, - msg_getblocktxn, - msg_getcfcheckpt, - msg_getcfheaders, - msg_getcfilters, - msg_getdata, - msg_getheaders, - msg_headers, - msg_inv, - msg_mempool, - msg_merkleblock, - msg_notfound, - msg_ping, - msg_pong, - msg_sendaddrv2, - msg_sendcmpct, - msg_sendheaders, - msg_sendtxrcncl, - msg_tx, - MSG_TX, - MSG_TYPE_MASK, - msg_verack, - msg_version, - MSG_WTX, - msg_wtxidrelay, - NODE_NETWORK, - NODE_WITNESS, - MAGIC_BYTES, - sha256, -) -from test_framework.util import ( - assert_not_equal, - MAX_NODES, - p2p_port, - wait_until_helper_internal, -) -from test_framework.v2_p2p import ( - EncryptedP2PState, - MSGTYPE_TO_SHORTID, - SHORTID, -) - -logger = logging.getLogger("TestFramework.p2p") - -# The minimum P2P version that this test framework supports -MIN_P2P_VERSION_SUPPORTED = 60001 -# The P2P version that this test framework implements and sends in its `version` message -# Version 70016 supports wtxid relay -P2P_VERSION = 70016 -# The services that this test framework offers in its `version` message -P2P_SERVICES = NODE_NETWORK | NODE_WITNESS -# The P2P user agent string that this test framework sends in its `version` message -P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" -# Value for relay that this test framework sends in its `version` message -P2P_VERSION_RELAY = 1 -# Delay after receiving a tx inv before requesting transactions from non-preferred peers, in seconds -NONPREF_PEER_TX_DELAY = 2 -# Delay for requesting transactions via txids if we have wtxid-relaying peers, in seconds -TXID_RELAY_DELAY = 2 -# Delay for requesting transactions if the peer has MAX_PEER_TX_REQUEST_IN_FLIGHT or more requests -OVERLOADED_PEER_TX_DELAY = 2 -# How long to wait before downloading a transaction from an additional peer -GETDATA_TX_INTERVAL = 60 - -MESSAGEMAP = { - b"addr": msg_addr, - b"addrv2": msg_addrv2, - b"block": msg_block, - b"blocktxn": msg_blocktxn, - b"cfcheckpt": msg_cfcheckpt, - b"cfheaders": msg_cfheaders, - b"cfilter": msg_cfilter, - b"cmpctblock": msg_cmpctblock, - b"feefilter": msg_feefilter, - b"filteradd": msg_filteradd, - b"filterclear": msg_filterclear, - b"filterload": msg_filterload, - b"getaddr": msg_getaddr, - b"getblocks": msg_getblocks, - b"getblocktxn": msg_getblocktxn, - b"getcfcheckpt": msg_getcfcheckpt, - b"getcfheaders": msg_getcfheaders, - b"getcfilters": msg_getcfilters, - b"getdata": msg_getdata, - b"getheaders": msg_getheaders, - b"headers": msg_headers, - b"inv": msg_inv, - b"mempool": msg_mempool, - b"merkleblock": msg_merkleblock, - b"notfound": msg_notfound, - b"ping": msg_ping, - b"pong": msg_pong, - b"sendaddrv2": msg_sendaddrv2, - b"sendcmpct": msg_sendcmpct, - b"sendheaders": msg_sendheaders, - b"sendtxrcncl": msg_sendtxrcncl, - b"tx": msg_tx, - b"verack": msg_verack, - b"version": msg_version, - b"wtxidrelay": msg_wtxidrelay, -} - - -class P2PConnection(asyncio.Protocol): - """A low-level connection object to a node's P2P interface. - - This class is responsible for: - - - opening and closing the TCP connection to the node - - reading bytes from and writing bytes to the socket - - deserializing and serializing the P2P message header - - logging messages as they are sent and received - - This class contains no logic for handing the P2P message payloads. It must be - sub-classed and the on_message() callback overridden.""" - - def __init__(self): - # The underlying transport of the connection. - # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe - self._transport = None - # This lock is acquired before sending messages over the socket. There's an implied lock order and - # p2p_lock must not be acquired after _send_lock as it could result in deadlocks. - self._send_lock = threading.Lock() - self.v2_state = None # EncryptedP2PState object needed for v2 p2p connections - self.reconnect = False # set if reconnection needs to happen - - @property - def is_connected(self): - return self._transport is not None - - @property - def supports_v2_p2p(self): - return self.v2_state is not None - - def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): - assert not self.is_connected - self.timeout_factor = timeout_factor - self.dstaddr = dstaddr - self.dstport = dstport - # The initial message to send after the connection was made: - self.on_connection_send_msg = None - self.recvbuf = b"" - self.magic_bytes = MAGIC_BYTES[net] - self.p2p_connected_to_node = dstport != 0 - - def peer_connect(self, dstaddr, dstport, *, net, timeout_factor, supports_v2_p2p): - self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) - if supports_v2_p2p: - self.v2_state = EncryptedP2PState(initiating=True, net=net) - - loop = NetworkThread.network_event_loop - logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) - coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) - return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) - - def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor, supports_v2_p2p, reconnect): - self.peer_connect_helper('0', 0, net, timeout_factor) - self.reconnect = reconnect - if supports_v2_p2p: - self.v2_state = EncryptedP2PState(initiating=False, net=net) - - logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id)) - return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) - - def peer_disconnect(self): - # Connection could have already been closed by other end. - NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort()) - - # Connection and disconnection methods - - def connection_made(self, transport): - """asyncio callback when a connection is opened.""" - assert not self._transport - info = transport.get_extra_info("socket") - us = info.getsockname() - them = info.getpeername() - logger.debug(f"Connected: us={us[0]}:{us[1]}, them={them[0]}:{them[1]}") - self.dstaddr = them[0] - self.dstport = them[1] - self._transport = transport - # in an inbound connection to the TestNode with P2PConnection as the initiator, [TestNode <---- P2PConnection] - # send the initial handshake immediately - if self.supports_v2_p2p and self.v2_state.initiating and not self.v2_state.tried_v2_handshake: - send_handshake_bytes = self.v2_state.initiate_v2_handshake() - logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data") - self.send_raw_message(send_handshake_bytes) - # for v1 outbound connections, send version message immediately after opening - # (for v2 outbound connections, send it after the initial v2 handshake) - if self.p2p_connected_to_node and not self.supports_v2_p2p: - self.send_version() - self.on_open() - - def connection_lost(self, exc): - """asyncio callback when a connection is closed.""" - # don't display warning if reconnection needs to be attempted using v1 P2P - if exc and not self.reconnect: - logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) - else: - logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) - self._transport = None - self.recvbuf = b"" - self.on_close() - - # v2 handshake method - def _on_data_v2_handshake(self): - """v2 handshake performed before P2P messages are exchanged (see BIP324). P2PConnection is the initiator - (in inbound connections to TestNode) and the responder (in outbound connections from TestNode). - Performed by: - * initiator using `initiate_v2_handshake()`, `complete_handshake()` and `authenticate_handshake()` - * responder using `respond_v2_handshake()`, `complete_handshake()` and `authenticate_handshake()` - - `initiate_v2_handshake()` is immediately done by the initiator when the connection is established in - `connection_made()`. The rest of the initial v2 handshake functions are handled here. - """ - if not self.v2_state.peer: - if not self.v2_state.initiating and not self.v2_state.sent_garbage: - # if the responder hasn't sent garbage yet, the responder is still reading ellswift bytes - # reads ellswift bytes till the first mismatch from 12 bytes V1_PREFIX - length, send_handshake_bytes = self.v2_state.respond_v2_handshake(BytesIO(self.recvbuf)) - self.recvbuf = self.recvbuf[length:] - if send_handshake_bytes == -1: - self.v2_state = None - return - elif send_handshake_bytes: - logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data") - self.send_raw_message(send_handshake_bytes) - elif send_handshake_bytes == b"": - return # only after send_handshake_bytes are sent can `complete_handshake()` be done - - # `complete_handshake()` reads the remaining ellswift bytes from recvbuf - # and sends response after deriving shared ECDH secret using received ellswift bytes - length, response = self.v2_state.complete_handshake(BytesIO(self.recvbuf)) - self.recvbuf = self.recvbuf[length:] - if response: - self.send_raw_message(response) - else: - return # only after response is sent can `authenticate_handshake()` be done - - # `self.v2_state.peer` is instantiated only after shared ECDH secret/BIP324 derived keys and ciphers - # is derived in `complete_handshake()`. - # so `authenticate_handshake()` which uses the BIP324 derived ciphers gets called after `complete_handshake()`. - assert self.v2_state.peer - length, is_mac_auth = self.v2_state.authenticate_handshake(self.recvbuf) - if not is_mac_auth: - raise ValueError("invalid v2 mac tag in handshake authentication") - self.recvbuf = self.recvbuf[length:] - if self.v2_state.tried_v2_handshake: - # for v2 outbound connections, send version message immediately after v2 handshake - if self.p2p_connected_to_node: - self.send_version() - # process post-v2-handshake data immediately, if available - if len(self.recvbuf) > 0: - self._on_data() - - # Socket read methods - - def data_received(self, t): - """asyncio callback when data is read from the socket.""" - if len(t) > 0: - self.recvbuf += t - if self.supports_v2_p2p and not self.v2_state.tried_v2_handshake: - self._on_data_v2_handshake() - else: - self._on_data() - - def _on_data(self): - """Try to read P2P messages from the recv buffer. - - This method reads data from the buffer in a loop. It deserializes, - parses and verifies the P2P header, then passes the P2P payload to - the on_message callback for processing.""" - try: - while True: - if self.supports_v2_p2p: - # v2 P2P messages are read - msglen, msg = self.v2_state.v2_receive_packet(self.recvbuf) - if msglen == -1: - raise ValueError("invalid v2 mac tag " + repr(self.recvbuf)) - elif msglen == 0: # need to receive more bytes in recvbuf - return - self.recvbuf = self.recvbuf[msglen:] - - if msg is None: # ignore decoy messages - return - assert msg # application layer messages (which aren't decoy messages) are non-empty - shortid = msg[0] # 1-byte short message type ID - if shortid == 0: - # next 12 bytes are interpreted as ASCII message type if shortid is b'\x00' - if len(msg) < 13: - raise IndexError("msg needs minimum required length of 13 bytes") - msgtype = msg[1:13].rstrip(b'\x00') - msg = msg[13:] # msg is set to be payload - else: - # a 1-byte short message type ID - msgtype = SHORTID.get(shortid, f"unknown-{shortid}") - msg = msg[1:] - else: - # v1 P2P messages are read - if len(self.recvbuf) < 4: - return - if self.recvbuf[:4] != self.magic_bytes: - raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf))) - if len(self.recvbuf) < 4 + 12 + 4 + 4: - return - msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0] - msglen = struct.unpack(" 500: - log_message += "... (msg truncated)" - logger.debug(log_message) - - -class P2PInterface(P2PConnection): - """A high-level P2P interface class for communicating with a Bitcoin node. - - This class provides high-level callbacks for processing P2P message - payloads, as well as convenience methods for interacting with the - node over P2P. - - Individual testcases should subclass this and override the on_* methods - if they want to alter message handling behaviour.""" - def __init__(self, support_addrv2=False, wtxidrelay=True): - super().__init__() - - # Track number of messages of each type received. - # Should be read-only in a test. - self.message_count = defaultdict(int) - - # Track the most recent message of each type. - # To wait for a message to be received, pop that message from - # this and use self.wait_until. - self.last_message = {} - - # A count of the number of ping messages we've sent to the node - self.ping_counter = 1 - - # The network services received from the peer - self.nServices = 0 - - self.support_addrv2 = support_addrv2 - - # If the peer supports wtxid-relay - self.wtxidrelay = wtxidrelay - - def peer_connect_send_version(self, services): - # Send a version msg - vt = msg_version() - vt.nVersion = P2P_VERSION - vt.strSubVer = P2P_SUBVERSION - vt.relay = P2P_VERSION_RELAY - vt.nServices = services - vt.addrTo.ip = self.dstaddr - vt.addrTo.port = self.dstport - vt.addrFrom.ip = "0.0.0.0" - vt.addrFrom.port = 0 - self.on_connection_send_msg = vt # Will be sent in connection_made callback - - def peer_connect(self, *, services=P2P_SERVICES, send_version, **kwargs): - create_conn = super().peer_connect(**kwargs) - - if send_version: - self.peer_connect_send_version(services) - - return create_conn - - def peer_accept_connection(self, *args, services=P2P_SERVICES, **kwargs): - create_conn = super().peer_accept_connection(*args, **kwargs) - self.peer_connect_send_version(services) - - return create_conn - - # Message receiving methods - - def on_message(self, message): - """Receive message and dispatch message to appropriate callback. - - We keep a count of how many of each message type has been received - and the most recent message of each type.""" - with p2p_lock: - try: - msgtype = message.msgtype.decode('ascii') - self.message_count[msgtype] += 1 - self.last_message[msgtype] = message - getattr(self, 'on_' + msgtype)(message) - except Exception: - print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0])) - raise - - # Callback methods. Can be overridden by subclasses in individual test - # cases to provide custom message handling behaviour. - - def on_open(self): - pass - - def on_close(self): - pass - - def on_addr(self, message): pass - def on_addrv2(self, message): pass - def on_block(self, message): pass - def on_blocktxn(self, message): pass - def on_cfcheckpt(self, message): pass - def on_cfheaders(self, message): pass - def on_cfilter(self, message): pass - def on_cmpctblock(self, message): pass - def on_feefilter(self, message): pass - def on_filteradd(self, message): pass - def on_filterclear(self, message): pass - def on_filterload(self, message): pass - def on_getaddr(self, message): pass - def on_getblocks(self, message): pass - def on_getblocktxn(self, message): pass - def on_getdata(self, message): pass - def on_getheaders(self, message): pass - def on_headers(self, message): pass - def on_mempool(self, message): pass - def on_merkleblock(self, message): pass - def on_notfound(self, message): pass - def on_pong(self, message): pass - def on_sendaddrv2(self, message): pass - def on_sendcmpct(self, message): pass - def on_sendheaders(self, message): pass - def on_sendtxrcncl(self, message): pass - def on_tx(self, message): pass - def on_wtxidrelay(self, message): pass - - def on_inv(self, message): - want = msg_getdata() - for i in message.inv: - if i.type != 0: - want.inv.append(i) - if len(want.inv): - self.send_message(want) - - def on_ping(self, message): - self.send_message(msg_pong(message.nonce)) - - def on_verack(self, message): - pass - - def on_version(self, message): - assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) - # for inbound connections, reply to version with own version message - # (could be due to v1 reconnect after a failed v2 handshake) - if not self.p2p_connected_to_node: - self.send_version() - self.reconnect = False - if message.nVersion >= 70016 and self.wtxidrelay: - self.send_message(msg_wtxidrelay()) - if self.support_addrv2: - self.send_message(msg_sendaddrv2()) - self.send_message(msg_verack()) - self.nServices = message.nServices - self.relay = message.relay - if self.p2p_connected_to_node: - self.send_message(msg_getaddr()) - - # Connection helper methods - - def wait_until(self, test_function_in, *, timeout=60, check_connected=True): - def test_function(): - if check_connected: - assert self.is_connected - return test_function_in() - - wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) - - def wait_for_connect(self, *, timeout=60): - test_function = lambda: self.is_connected - self.wait_until(test_function, timeout=timeout, check_connected=False) - - def wait_for_disconnect(self, *, timeout=60): - test_function = lambda: not self.is_connected - self.wait_until(test_function, timeout=timeout, check_connected=False) - - def wait_for_reconnect(self, *, timeout=60): - def test_function(): - return self.is_connected and self.last_message.get('version') and not self.supports_v2_p2p - self.wait_until(test_function, timeout=timeout, check_connected=False) - - # Message receiving helper methods - - def wait_for_tx(self, txid, *, timeout=60): - def test_function(): - if not self.last_message.get('tx'): - return False - return self.last_message['tx'].tx.rehash() == txid - - self.wait_until(test_function, timeout=timeout) - - def wait_for_block(self, blockhash, *, timeout=60): - def test_function(): - return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash - - self.wait_until(test_function, timeout=timeout) - - def wait_for_header(self, blockhash, *, timeout=60): - def test_function(): - last_headers = self.last_message.get('headers') - if not last_headers: - return False - return last_headers.headers[0].rehash() == int(blockhash, 16) - - self.wait_until(test_function, timeout=timeout) - - def wait_for_merkleblock(self, blockhash, *, timeout=60): - def test_function(): - last_filtered_block = self.last_message.get('merkleblock') - if not last_filtered_block: - return False - return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) - - self.wait_until(test_function, timeout=timeout) - - def wait_for_getdata(self, hash_list, *, timeout=60): - """Waits for a getdata message. - - The object hashes in the inventory vector must match the provided hash_list.""" - def test_function(): - last_data = self.last_message.get("getdata") - if not last_data: - return False - return [x.hash for x in last_data.inv] == hash_list - - self.wait_until(test_function, timeout=timeout) - - def wait_for_getheaders(self, block_hash=None, *, timeout=60): - """Waits for a getheaders message containing a specific block hash. - - If no block hash is provided, checks whether any getheaders message has been received by the node.""" - def test_function(): - last_getheaders = self.last_message.pop("getheaders", None) - if block_hash is None: - return last_getheaders - if last_getheaders is None: - return False - return block_hash == last_getheaders.locator.vHave[0] - - self.wait_until(test_function, timeout=timeout) - - def wait_for_inv(self, expected_inv, *, timeout=60): - """Waits for an INV message and checks that the first inv object in the message was as expected.""" - if len(expected_inv) > 1: - raise NotImplementedError("wait_for_inv() will only verify the first inv object") - - def test_function(): - return self.last_message.get("inv") and \ - self.last_message["inv"].inv[0].type == expected_inv[0].type and \ - self.last_message["inv"].inv[0].hash == expected_inv[0].hash - - self.wait_until(test_function, timeout=timeout) - - def wait_for_verack(self, *, timeout=60): - def test_function(): - return "verack" in self.last_message - - self.wait_until(test_function, timeout=timeout) - - # Message sending helper functions - - def send_version(self): - if self.on_connection_send_msg: - self.send_message(self.on_connection_send_msg) - self.on_connection_send_msg = None # Never used again - - def send_and_ping(self, message, *, timeout=60): - self.send_message(message) - self.sync_with_ping(timeout=timeout) - - def sync_with_ping(self, *, timeout=60): - """Ensure ProcessMessages and SendMessages is called on this connection""" - # Sending two pings back-to-back, requires that the node calls - # `ProcessMessage` twice, and thus ensures `SendMessages` must have - # been called at least once - self.send_message(msg_ping(nonce=0)) - self.send_message(msg_ping(nonce=self.ping_counter)) - - def test_function(): - return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter - - self.wait_until(test_function, timeout=timeout) - self.ping_counter += 1 - - -# One lock for synchronizing all data access between the network event loop (see -# NetworkThread below) and the thread running the test logic. For simplicity, -# P2PConnection acquires this lock whenever delivering a message to a P2PInterface. -# This lock should be acquired in the thread running the test logic to synchronize -# access to any data shared with the P2PInterface or P2PConnection. -p2p_lock = threading.Lock() - - -class NetworkThread(threading.Thread): - network_event_loop = None - - def __init__(self): - super().__init__(name="NetworkThread") - # There is only one event loop and no more than one thread must be created - assert not self.network_event_loop - - NetworkThread.listeners = {} - NetworkThread.protos = {} - if platform.system() == 'Windows': - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - NetworkThread.network_event_loop = asyncio.new_event_loop() - - def run(self): - """Start the network thread.""" - self.network_event_loop.run_forever() - - def close(self, *, timeout=10): - """Close the connections and network event loop.""" - self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) - wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout) - self.network_event_loop.close() - self.join(timeout) - # Safe to remove event loop. - NetworkThread.network_event_loop = None - - @classmethod - def listen(cls, p2p, callback, port=None, addr=None, idx=1): - """ Ensure a listening server is running on the given port, and run the - protocol specified by `p2p` on the next connection to it. Once ready - for connections, call `callback`.""" - - if port is None: - assert 0 < idx <= MAX_NODES - port = p2p_port(MAX_NODES - idx) - if addr is None: - addr = '127.0.0.1' - - def exception_handler(loop, context): - if not p2p.reconnect: - loop.default_exception_handler(context) - - cls.network_event_loop.set_exception_handler(exception_handler) - coroutine = cls.create_listen_server(addr, port, callback, p2p) - cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine) - - @classmethod - async def create_listen_server(cls, addr, port, callback, proto): - def peer_protocol(): - """Returns a function that does the protocol handling for a new - connection. To allow different connections to have different - behaviors, the protocol function is first put in the cls.protos - dict. When the connection is made, the function removes the - protocol function from that dict, and returns it so the event loop - can start executing it.""" - response = cls.protos.get((addr, port)) - # remove protocol function from dict only when reconnection doesn't need to happen/already happened - if not proto.reconnect: - cls.protos[(addr, port)] = None - return response - - if (addr, port) not in cls.listeners: - # When creating a listener on a given (addr, port) we only need to - # do it once. If we want different behaviors for different - # connections, we can accomplish this by providing different - # `proto` functions - - listener = await cls.network_event_loop.create_server(peer_protocol, addr, port) - logger.debug("Listening server on %s:%d should be started" % (addr, port)) - cls.listeners[(addr, port)] = listener - - cls.protos[(addr, port)] = proto - callback(addr, port) - - -class P2PDataStore(P2PInterface): - """A P2P data store class. - - Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" - - def __init__(self): - super().__init__() - # store of blocks. key is block hash, value is a CBlock object - self.block_store = {} - self.last_block_hash = '' - # store of txs. key is txid, value is a CTransaction object - self.tx_store = {} - self.getdata_requests = [] - - def on_getdata(self, message): - """Check for the tx/block in our stores and if found, reply with MSG_TX or MSG_BLOCK.""" - for inv in message.inv: - self.getdata_requests.append(inv.hash) - invtype = inv.type & MSG_TYPE_MASK - if (invtype == MSG_TX or invtype == MSG_WTX) and inv.hash in self.tx_store.keys(): - self.send_message(msg_tx(self.tx_store[inv.hash])) - elif invtype == MSG_BLOCK and inv.hash in self.block_store.keys(): - self.send_message(msg_block(self.block_store[inv.hash])) - else: - logger.debug('getdata message type {} received.'.format(hex(inv.type))) - - def on_getheaders(self, message): - """Search back through our block store for the locator, and reply with a headers message if found.""" - - locator, hash_stop = message.locator, message.hashstop - - # Assume that the most recent block added is the tip - if not self.block_store: - return - - headers_list = [self.block_store[self.last_block_hash]] - while headers_list[-1].sha256 not in locator.vHave: - # Walk back through the block store, adding headers to headers_list - # as we go. - prev_block_hash = headers_list[-1].hashPrevBlock - if prev_block_hash in self.block_store: - prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) - headers_list.append(prev_block_header) - if prev_block_header.sha256 == hash_stop: - # if this is the hashstop header, stop here - break - else: - logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash))) - break - - # Truncate the list if there are too many headers - headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1] - response = msg_headers(headers_list) - - if response is not None: - self.send_message(response) - - def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60, is_decoy=False): - """Send blocks to test node and test whether the tip advances. - - - add all blocks to our block_store - - send a headers message for the final block - - the on_getheaders handler will ensure that any getheaders are responded to - - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will - ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - - if success is True: assert that the node's tip advances to the most recent block - - if success is False: assert that the node's tip doesn't advance - - if reject_reason is set: assert that the correct reject message is logged""" - - with p2p_lock: - for block in blocks: - self.block_store[block.sha256] = block - self.last_block_hash = block.sha256 - - reject_reason = [reject_reason] if reject_reason else [] - with node.assert_debug_log(expected_msgs=reject_reason): - if is_decoy: # since decoy messages are ignored by the recipient - no need to wait for response - force_send = True - if force_send: - for b in blocks: - self.send_message(msg_block(block=b), is_decoy) - else: - self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) - self.wait_until( - lambda: blocks[-1].sha256 in self.getdata_requests, - timeout=timeout, - check_connected=success, - ) - - if expect_disconnect: - self.wait_for_disconnect(timeout=timeout) - else: - self.sync_with_ping(timeout=timeout) - - if success: - self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) - else: - assert node.getbestblockhash() != blocks[-1].hash - - def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): - """Send txs to test node and test whether they're accepted to the mempool. - - - add all txs to our tx_store - - send tx messages for all txs - - if success is True/False: assert that the txs are/are not accepted to the mempool - - if expect_disconnect is True: Skip the sync with ping - - if reject_reason is set: assert that the correct reject message is logged.""" - - with p2p_lock: - for tx in txs: - self.tx_store[tx.sha256] = tx - - reject_reason = [reject_reason] if reject_reason else [] - with node.assert_debug_log(expected_msgs=reject_reason): - for tx in txs: - self.send_message(msg_tx(tx)) - - if expect_disconnect: - self.wait_for_disconnect() - else: - self.sync_with_ping() - - raw_mempool = node.getrawmempool() - if success: - # Check that all txs are now in the mempool - for tx in txs: - assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash) - else: - # Check that none of the txs are now in the mempool - for tx in txs: - assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash) - -class P2PTxInvStore(P2PInterface): - """A P2PInterface which stores a count of how many times each txid has been announced.""" - def __init__(self): - super().__init__() - self.tx_invs_received = defaultdict(int) - - def on_inv(self, message): - super().on_inv(message) # Send getdata in response. - # Store how many times invs have been received for each tx. - for i in message.inv: - if (i.type == MSG_TX) or (i.type == MSG_WTX): - # save txid - self.tx_invs_received[i.hash] += 1 - - def get_invs(self): - with p2p_lock: - return list(self.tx_invs_received.keys()) - - def wait_for_broadcast(self, txns, *, timeout=60): - """Waits for the txns (list of txids) to complete initial broadcast. - The mempool should mark unbroadcast=False for these transactions. - """ - # Wait until invs have been received (and getdatas sent) for each txid. - self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout) - # Flush messages and wait for the getdatas to be processed - self.sync_with_ping() +#!/usr/bin/env python3) +# Copyright (c) 2010 ArtForz -- public domain half-a-node) +# Copyright (c) 2012 Jeff Garzik) +# Copyright (c) 2010-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test objects for interacting with a bitcoind node over the p2p protocol.) +) +The P2PInterface objects interact with the bitcoind nodes under test using the) +node's p2p interface. They can be used to send messages to the node, and) +callbacks can be registered that execute when messages are received from the) +node. Messages are sent to/received from the node on an asyncio event loop.) +State held inside the objects must be guarded by the p2p_lock to avoid data) +races between the main testing thread and the event loop.) +) +P2PConnection: A low-level connection object to a node's P2P interface) +P2PInterface: A high-level interface object for communicating to a node over P2P) +P2PDataStore: A p2p interface class that keeps a store of transactions and blocks) + and can respond correctly to getdata and getheaders messages) +P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps) + a count of how many times each txid has been announced.""") +) +import asyncio) +from collections import defaultdict) +from io import BytesIO) +import logging) +import platform) +import struct) +import sys) +import threading) +) +from test_framework.messages import () + CBlockHeader,) + MAX_HEADERS_RESULTS,) + msg_addr,) + msg_addrv2,) + msg_block,) + MSG_BLOCK,) + msg_blocktxn,) + msg_cfcheckpt,) + msg_cfheaders,) + msg_cfilter,) + msg_cmpctblock,) + msg_feefilter,) + msg_filteradd,) + msg_filterclear,) + msg_filterload,) + msg_getaddr,) + msg_getblocks,) + msg_getblocktxn,) + msg_getcfcheckpt,) + msg_getcfheaders,) + msg_getcfilters,) + msg_getdata,) + msg_getheaders,) + msg_headers,) + msg_inv,) + msg_mempool,) + msg_merkleblock,) + msg_notfound,) + msg_ping,) + msg_pong,) + msg_sendaddrv2,) + msg_sendcmpct,) + msg_sendheaders,) + msg_sendtxrcncl,) + msg_tx,) + MSG_TX,) + MSG_TYPE_MASK,) + msg_verack,) + msg_version,) + MSG_WTX,) + msg_wtxidrelay,) + NODE_NETWORK,) + NODE_WITNESS,) + MAGIC_BYTES,) + sha256,) +)) +from test_framework.util import () + assert_not_equal,) + MAX_NODES,) + p2p_port,) + wait_until_helper_internal,) +)) +from test_framework.v2_p2p import () + EncryptedP2PState,) + MSGTYPE_TO_SHORTID,) + SHORTID,) +)) +) +logger = logging.getLogger("TestFramework.p2p")) +) +# The minimum P2P version that this test framework supports) +MIN_P2P_VERSION_SUPPORTED = 60001) +# The P2P version that this test framework implements and sends in its `version` message) +# Version 70016 supports wtxid relay) +P2P_VERSION = 70016) +# The services that this test framework offers in its `version` message) +P2P_SERVICES = NODE_NETWORK | NODE_WITNESS) +# The P2P user agent string that this test framework sends in its `version` message) +P2P_SUBVERSION = "/python-p2p-tester:0.0.3/") +# Value for relay that this test framework sends in its `version` message) +P2P_VERSION_RELAY = 1) +# Delay after receiving a tx inv before requesting transactions from non-preferred peers, in seconds) +NONPREF_PEER_TX_DELAY = 2) +# Delay for requesting transactions via txids if we have wtxid-relaying peers, in seconds) +TXID_RELAY_DELAY = 2) +# Delay for requesting transactions if the peer has MAX_PEER_TX_REQUEST_IN_FLIGHT or more requests) +OVERLOADED_PEER_TX_DELAY = 2) +# How long to wait before downloading a transaction from an additional peer) +GETDATA_TX_INTERVAL = 60) +) +MESSAGEMAP = {) + b"addr": msg_addr,) + b"addrv2": msg_addrv2,) + b"block": msg_block,) + b"blocktxn": msg_blocktxn,) + b"cfcheckpt": msg_cfcheckpt,) + b"cfheaders": msg_cfheaders,) + b"cfilter": msg_cfilter,) + b"cmpctblock": msg_cmpctblock,) + b"feefilter": msg_feefilter,) + b"filteradd": msg_filteradd,) + b"filterclear": msg_filterclear,) + b"filterload": msg_filterload,) + b"getaddr": msg_getaddr,) + b"getblocks": msg_getblocks,) + b"getblocktxn": msg_getblocktxn,) + b"getcfcheckpt": msg_getcfcheckpt,) + b"getcfheaders": msg_getcfheaders,) + b"getcfilters": msg_getcfilters,) + b"getdata": msg_getdata,) + b"getheaders": msg_getheaders,) + b"headers": msg_headers,) + b"inv": msg_inv,) + b"mempool": msg_mempool,) + b"merkleblock": msg_merkleblock,) + b"notfound": msg_notfound,) + b"ping": msg_ping,) + b"pong": msg_pong,) + b"sendaddrv2": msg_sendaddrv2,) + b"sendcmpct": msg_sendcmpct,) + b"sendheaders": msg_sendheaders,) + b"sendtxrcncl": msg_sendtxrcncl,) + b"tx": msg_tx,) + b"verack": msg_verack,) + b"version": msg_version,) + b"wtxidrelay": msg_wtxidrelay,) +}) +) +) +class P2PConnection(asyncio.Protocol):) + """A low-level connection object to a node's P2P interface.) +) + This class is responsible for:) +) + - opening and closing the TCP connection to the node) + - reading bytes from and writing bytes to the socket) + - deserializing and serializing the P2P message header) + - logging messages as they are sent and received) +) + This class contains no logic for handing the P2P message payloads. It must be) + sub-classed and the on_message() callback overridden.""") +) + def __init__(self):) + # The underlying transport of the connection.) + # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe) + self._transport = None) + # This lock is acquired before sending messages over the socket. There's an implied lock order and) + # p2p_lock must not be acquired after _send_lock as it could result in deadlocks.) + self._send_lock = threading.Lock()) + self.v2_state = None # EncryptedP2PState object needed for v2 p2p connections) + self.reconnect = False # set if reconnection needs to happen) +) + @property) + def is_connected(self):) + return self._transport is not None) +) + @property) + def supports_v2_p2p(self):) + return self.v2_state is not None) +) + def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor):) + assert not self.is_connected) + self.timeout_factor = timeout_factor) + self.dstaddr = dstaddr) + self.dstport = dstport) + # The initial message to send after the connection was made:) + self.on_connection_send_msg = None) + self.recvbuf = b"") + self.magic_bytes = MAGIC_BYTES[net]) + self.p2p_connected_to_node = dstport,0) +) + def peer_connect(self, dstaddr, dstport, *, net, timeout_factor, supports_v2_p2p):) + self.peer_connect_helper(dstaddr, dstport, net, timeout_factor)) + if supports_v2_p2p:) + self.v2_state = EncryptedP2PState(initiating=True, net=net)) +) + loop = NetworkThread.network_event_loop) + logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))) + coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)) + return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine)) +) + def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor, supports_v2_p2p, reconnect):) + self.peer_connect_helper('0', 0, net, timeout_factor)) + self.reconnect = reconnect) + if supports_v2_p2p:) + self.v2_state = EncryptedP2PState(initiating=False, net=net)) +) + logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id))) + return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id)) +) + def peer_disconnect(self):) + # Connection could have already been closed by other end.) + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())) +) + # Connection and disconnection methods) +) + def connection_made(self, transport):) + """asyncio callback when a connection is opened.""") + assert not self._transport) + info = transport.get_extra_info("socket")) + us = info.getsockname()) + them = info.getpeername()) + logger.debug(f"Connected: us={us[0]}:{us[1]}, them={them[0]}:{them[1]}")) + self.dstaddr = them[0]) + self.dstport = them[1]) + self._transport = transport) + # in an inbound connection to the TestNode with P2PConnection as the initiator, [TestNode <---- P2PConnection]) + # send the initial handshake immediately) + if self.supports_v2_p2p and self.v2_state.initiating and not self.v2_state.tried_v2_handshake:) + send_handshake_bytes = self.v2_state.initiate_v2_handshake()) + logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data")) + self.send_raw_message(send_handshake_bytes)) + # for v1 outbound connections, send version message immediately after opening) + # (for v2 outbound connections, send it after the initial v2 handshake)) + if self.p2p_connected_to_node and not self.supports_v2_p2p:) + self.send_version()) + self.on_open()) +) + def connection_lost(self, exc):) + """asyncio callback when a connection is closed.""") + # don't display warning if reconnection needs to be attempted using v1 P2P) + if exc and not self.reconnect:) + logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))) + else:) + logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))) + self._transport = None) + self.recvbuf = b"") + self.on_close()) +) + # v2 handshake method) + def _on_data_v2_handshake(self):) + """v2 handshake performed before P2P messages are exchanged (see BIP324). P2PConnection is the initiator) + (in inbound connections to TestNode) and the responder (in outbound connections from TestNode).) + Performed by:) + * initiator using `initiate_v2_handshake()`, `complete_handshake()` and `authenticate_handshake()`) + * responder using `respond_v2_handshake()`, `complete_handshake()` and `authenticate_handshake()`) +) + `initiate_v2_handshake()` is immediately done by the initiator when the connection is established in) + `connection_made()`. The rest of the initial v2 handshake functions are handled here.) + """) + if not self.v2_state.peer:) + if not self.v2_state.initiating and not self.v2_state.sent_garbage:) + # if the responder hasn't sent garbage yet, the responder is still reading ellswift bytes) + # reads ellswift bytes till the first mismatch from 12 bytes V1_PREFIX) + length, send_handshake_bytes = self.v2_state.respond_v2_handshake(BytesIO(self.recvbuf))) + self.recvbuf = self.recvbuf[length:]) + if send_handshake_bytes == -1:) + self.v2_state = None) + return) + elif send_handshake_bytes:) + logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data")) + self.send_raw_message(send_handshake_bytes)) + elif send_handshake_bytes == b"":) + return # only after send_handshake_bytes are sent can `complete_handshake()` be done) +) + # `complete_handshake()` reads the remaining ellswift bytes from recvbuf) + # and sends response after deriving shared ECDH secret using received ellswift bytes) + length, response = self.v2_state.complete_handshake(BytesIO(self.recvbuf))) + self.recvbuf = self.recvbuf[length:]) + if response:) + self.send_raw_message(response)) + else:) + return # only after response is sent can `authenticate_handshake()` be done) +) + # `self.v2_state.peer` is instantiated only after shared ECDH secret/BIP324 derived keys and ciphers) + # is derived in `complete_handshake()`.) + # so `authenticate_handshake()` which uses the BIP324 derived ciphers gets called after `complete_handshake()`.) + assert self.v2_state.peer) + length, is_mac_auth = self.v2_state.authenticate_handshake(self.recvbuf)) + if not is_mac_auth:) + raise ValueError("invalid v2 mac tag in handshake authentication")) + self.recvbuf = self.recvbuf[length:]) + if self.v2_state.tried_v2_handshake:) + # for v2 outbound connections, send version message immediately after v2 handshake) + if self.p2p_connected_to_node:) + self.send_version()) + # process post-v2-handshake data immediately, if available) + if len(self.recvbuf) > 0:) + self._on_data()) +) + # Socket read methods) +) + def data_received(self, t):) + """asyncio callback when data is read from the socket.""") + if len(t) > 0:) + self.recvbuf += t) + if self.supports_v2_p2p and not self.v2_state.tried_v2_handshake:) + self._on_data_v2_handshake()) + else:) + self._on_data()) +) + def _on_data(self):) + """Try to read P2P messages from the recv buffer.) +) + This method reads data from the buffer in a loop. It deserializes,) + parses and verifies the P2P header, then passes the P2P payload to) + the on_message callback for processing.""") + try:) + while True:) + if self.supports_v2_p2p:) + # v2 P2P messages are read) + msglen, msg = self.v2_state.v2_receive_packet(self.recvbuf)) + if msglen == -1:) + raise ValueError("invalid v2 mac tag " + repr(self.recvbuf))) + elif msglen == 0: # need to receive more bytes in recvbuf) + return) + self.recvbuf = self.recvbuf[msglen:]) +) + if msg is None: # ignore decoy messages) + return) + assert msg # application layer messages (which aren't decoy messages) are non-empty) + shortid = msg[0] # 1-byte short message type ID) + if shortid == 0:) + # next 12 bytes are interpreted as ASCII message type if shortid is b'\x00') + if len(msg) < 13:) + raise IndexError("msg needs minimum required length of 13 bytes")) + msgtype = msg[1:13].rstrip(b'\x00')) + msg = msg[13:] # msg is set to be payload) + else:) + # a 1-byte short message type ID) + msgtype = SHORTID.get(shortid, f"unknown-{shortid}")) + msg = msg[1:]) + else:) + # v1 P2P messages are read) + if len(self.recvbuf) < 4:) + return) + if self.recvbuf[:4],self.magic_bytes:) + raise ValueError("magic bytes mismatch: {},{}".format(repr(self.magic_bytes), repr(self.recvbuf)))) + if len(self.recvbuf) < 4 + 12 + 4 + 4:) + return) + msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0]) + msglen = struct.unpack(" 500:) + log_message += "... (msg truncated)") + logger.debug(log_message)) +) +) +class P2PInterface(P2PConnection):) + """A high-level P2P interface class for communicating with a Bitcoin node.) +) + This class provides high-level callbacks for processing P2P message) + payloads, as well as convenience methods for interacting with the) + node over P2P.) +) + Individual testcases should subclass this and override the on_* methods) + if they want to alter message handling behaviour.""") + def __init__(self, support_addrv2=False, wtxidrelay=True):) + super().__init__()) +) + # Track number of messages of each type received.) + # Should be read-only in a test.) + self.message_count = defaultdict(int)) +) + # Track the most recent message of each type.) + # To wait for a message to be received, pop that message from) + # this and use self.wait_until.) + self.last_message = {}) +) + # A count of the number of ping messages we've sent to the node) + self.ping_counter = 1) +) + # The network services received from the peer) + self.nServices = 0) +) + self.support_addrv2 = support_addrv2) +) + # If the peer supports wtxid-relay) + self.wtxidrelay = wtxidrelay) +) + def peer_connect_send_version(self, services):) + # Send a version msg) + vt = msg_version()) + vt.nVersion = P2P_VERSION) + vt.strSubVer = P2P_SUBVERSION) + vt.relay = P2P_VERSION_RELAY) + vt.nServices = services) + vt.addrTo.ip = self.dstaddr) + vt.addrTo.port = self.dstport) + vt.addrFrom.ip = "0.0.0.0") + vt.addrFrom.port = 0) + self.on_connection_send_msg = vt # Will be sent in connection_made callback) +) + def peer_connect(self, *, services=P2P_SERVICES, send_version, **kwargs):) + create_conn = super().peer_connect(**kwargs)) +) + if send_version:) + self.peer_connect_send_version(services)) +) + return create_conn) +) + def peer_accept_connection(self, *args, services=P2P_SERVICES, **kwargs):) + create_conn = super().peer_accept_connection(*args, **kwargs)) + self.peer_connect_send_version(services)) +) + return create_conn) +) + # Message receiving methods) +) + def on_message(self, message):) + """Receive message and dispatch message to appropriate callback.) +) + We keep a count of how many of each message type has been received) + and the most recent message of each type.""") + with p2p_lock:) + try:) + msgtype = message.msgtype.decode('ascii')) + self.message_count[msgtype] += 1) + self.last_message[msgtype] = message) + getattr(self, 'on_' + msgtype)(message)) + except Exception:) + print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))) + raise) +) + # Callback methods. Can be overridden by subclasses in individual test) + # cases to provide custom message handling behaviour.) +) + def on_open(self):) + pass) +) + def on_close(self):) + pass) +) + def on_addr(self, message): pass) + def on_addrv2(self, message): pass) + def on_block(self, message): pass) + def on_blocktxn(self, message): pass) + def on_cfcheckpt(self, message): pass) + def on_cfheaders(self, message): pass) + def on_cfilter(self, message): pass) + def on_cmpctblock(self, message): pass) + def on_feefilter(self, message): pass) + def on_filteradd(self, message): pass) + def on_filterclear(self, message): pass) + def on_filterload(self, message): pass) + def on_getaddr(self, message): pass) + def on_getblocks(self, message): pass) + def on_getblocktxn(self, message): pass) + def on_getdata(self, message): pass) + def on_getheaders(self, message): pass) + def on_headers(self, message): pass) + def on_mempool(self, message): pass) + def on_merkleblock(self, message): pass) + def on_notfound(self, message): pass) + def on_pong(self, message): pass) + def on_sendaddrv2(self, message): pass) + def on_sendcmpct(self, message): pass) + def on_sendheaders(self, message): pass) + def on_sendtxrcncl(self, message): pass) + def on_tx(self, message): pass) + def on_wtxidrelay(self, message): pass) +) + def on_inv(self, message):) + want = msg_getdata()) + for i in message.inv:) + if i.type,0:) + want.inv.append(i)) + if len(want.inv):) + self.send_message(want)) +) + def on_ping(self, message):) + self.send_message(msg_pong(message.nonce))) +) + def on_verack(self, message):) + pass) +) + def on_version(self, message):) + assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED)) + # for inbound connections, reply to version with own version message) + # (could be due to v1 reconnect after a failed v2 handshake)) + if not self.p2p_connected_to_node:) + self.send_version()) + self.reconnect = False) + if message.nVersion >= 70016 and self.wtxidrelay:) + self.send_message(msg_wtxidrelay())) + if self.support_addrv2:) + self.send_message(msg_sendaddrv2())) + self.send_message(msg_verack())) + self.nServices = message.nServices) + self.relay = message.relay) + if self.p2p_connected_to_node:) + self.send_message(msg_getaddr())) +) + # Connection helper methods) +) + def wait_until(self, test_function_in, *, timeout=60, check_connected=True):) + def test_function():) + if check_connected:) + assert self.is_connected) + return test_function_in()) +) + wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)) +) + def wait_for_connect(self, *, timeout=60):) + test_function = lambda: self.is_connected) + self.wait_until(test_function, timeout=timeout, check_connected=False)) +) + def wait_for_disconnect(self, *, timeout=60):) + test_function = lambda: not self.is_connected) + self.wait_until(test_function, timeout=timeout, check_connected=False)) +) + def wait_for_reconnect(self, *, timeout=60):) + def test_function():) + return self.is_connected and self.last_message.get('version') and not self.supports_v2_p2p) + self.wait_until(test_function, timeout=timeout, check_connected=False)) +) + # Message receiving helper methods) +) + def wait_for_tx(self, txid, *, timeout=60):) + def test_function():) + if not self.last_message.get('tx'):) + return False) + return self.last_message['tx'].tx.rehash() == txid) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_block(self, blockhash, *, timeout=60):) + def test_function():) + return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_header(self, blockhash, *, timeout=60):) + def test_function():) + last_headers = self.last_message.get('headers')) + if not last_headers:) + return False) + return last_headers.headers[0].rehash() == int(blockhash, 16)) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_merkleblock(self, blockhash, *, timeout=60):) + def test_function():) + last_filtered_block = self.last_message.get('merkleblock')) + if not last_filtered_block:) + return False) + return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_getdata(self, hash_list, *, timeout=60):) + """Waits for a getdata message.) +) + The object hashes in the inventory vector must match the provided hash_list.""") + def test_function():) + last_data = self.last_message.get("getdata")) + if not last_data:) + return False) + return [x.hash for x in last_data.inv] == hash_list) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_getheaders(self, block_hash=None, *, timeout=60):) + """Waits for a getheaders message containing a specific block hash.) +) + If no block hash is provided, checks whether any getheaders message has been received by the node.""") + def test_function():) + last_getheaders = self.last_message.pop("getheaders", None)) + if block_hash is None:) + return last_getheaders) + if last_getheaders is None:) + return False) + return block_hash == last_getheaders.locator.vHave[0]) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_inv(self, expected_inv, *, timeout=60):) + """Waits for an INV message and checks that the first inv object in the message was as expected.""") + if len(expected_inv) > 1:) + raise NotImplementedError("wait_for_inv() will only verify the first inv object")) +) + def test_function():) + return self.last_message.get("inv") and \) + self.last_message["inv"].inv[0].type == expected_inv[0].type and \) + self.last_message["inv"].inv[0].hash == expected_inv[0].hash) +) + self.wait_until(test_function, timeout=timeout)) +) + def wait_for_verack(self, *, timeout=60):) + def test_function():) + return "verack" in self.last_message) +) + self.wait_until(test_function, timeout=timeout)) +) + # Message sending helper functions) +) + def send_version(self):) + if self.on_connection_send_msg:) + self.send_message(self.on_connection_send_msg)) + self.on_connection_send_msg = None # Never used again) +) + def send_and_ping(self, message, *, timeout=60):) + self.send_message(message)) + self.sync_with_ping(timeout=timeout)) +) + def sync_with_ping(self, *, timeout=60):) + """Ensure ProcessMessages and SendMessages is called on this connection""") + # Sending two pings back-to-back, requires that the node calls) + # `ProcessMessage` twice, and thus ensures `SendMessages` must have) + # been called at least once) + self.send_message(msg_ping(nonce=0))) + self.send_message(msg_ping(nonce=self.ping_counter))) +) + def test_function():) + return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter) +) + self.wait_until(test_function, timeout=timeout)) + self.ping_counter += 1) +) +) +# One lock for synchronizing all data access between the network event loop (see) +# NetworkThread below) and the thread running the test logic. For simplicity,) +# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.) +# This lock should be acquired in the thread running the test logic to synchronize) +# access to any data shared with the P2PInterface or P2PConnection.) +p2p_lock = threading.Lock()) +) +) +class NetworkThread(threading.Thread):) + network_event_loop = None) +) + def __init__(self):) + super().__init__(name="NetworkThread")) + # There is only one event loop and no more than one thread must be created) + assert not self.network_event_loop) +) + NetworkThread.listeners = {}) + NetworkThread.protos = {}) + if platform.system() == 'Windows':) + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())) + NetworkThread.network_event_loop = asyncio.new_event_loop()) +) + def run(self):) + """Start the network thread.""") + self.network_event_loop.run_forever()) +) + def close(self, *, timeout=10):) + """Close the connections and network event loop.""") + self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)) + wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout)) + self.network_event_loop.close()) + self.join(timeout)) + # Safe to remove event loop.) + NetworkThread.network_event_loop = None) +) + @classmethod) + def listen(cls, p2p, callback, port=None, addr=None, idx=1):) + """ Ensure a listening server is running on the given port, and run the) + protocol specified by `p2p` on the next connection to it. Once ready) + for connections, call `callback`.""") +) + if port is None:) + assert 0 < idx <= MAX_NODES) + port = p2p_port(MAX_NODES - idx)) + if addr is None:) + addr = '127.0.0.1') +) + def exception_handler(loop, context):) + if not p2p.reconnect:) + loop.default_exception_handler(context)) +) + cls.network_event_loop.set_exception_handler(exception_handler)) + coroutine = cls.create_listen_server(addr, port, callback, p2p)) + cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine)) +) + @classmethod) + async def create_listen_server(cls, addr, port, callback, proto):) + def peer_protocol():) + """Returns a function that does the protocol handling for a new) + connection. To allow different connections to have different) + behaviors, the protocol function is first put in the cls.protos) + dict. When the connection is made, the function removes the) + protocol function from that dict, and returns it so the event loop) + can start executing it.""") + response = cls.protos.get((addr, port))) + # remove protocol function from dict only when reconnection doesn't need to happen/already happened) + if not proto.reconnect:) + cls.protos[(addr, port)] = None) + return response) +) + if (addr, port) not in cls.listeners:) + # When creating a listener on a given (addr, port) we only need to) + # do it once. If we want different behaviors for different) + # connections, we can accomplish this by providing different) + # `proto` functions) +) + listener = await cls.network_event_loop.create_server(peer_protocol, addr, port)) + logger.debug("Listening server on %s:%d should be started" % (addr, port))) + cls.listeners[(addr, port)] = listener) +) + cls.protos[(addr, port)] = proto) + callback(addr, port)) +) +) +class P2PDataStore(P2PInterface):) + """A P2P data store class.) +) + Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""") +) + def __init__(self):) + super().__init__()) + # store of blocks. key is block hash, value is a CBlock object) + self.block_store = {}) + self.last_block_hash = '') + # store of txs. key is txid, value is a CTransaction object) + self.tx_store = {}) + self.getdata_requests = []) +) + def on_getdata(self, message):) + """Check for the tx/block in our stores and if found, reply with MSG_TX or MSG_BLOCK.""") + for inv in message.inv:) + self.getdata_requests.append(inv.hash)) + invtype = inv.type & MSG_TYPE_MASK) + if (invtype == MSG_TX or invtype == MSG_WTX) and inv.hash in self.tx_store.keys():) + self.send_message(msg_tx(self.tx_store[inv.hash]))) + elif invtype == MSG_BLOCK and inv.hash in self.block_store.keys():) + self.send_message(msg_block(self.block_store[inv.hash]))) + else:) + logger.debug('getdata message type {} received.'.format(hex(inv.type)))) +) + def on_getheaders(self, message):) + """Search back through our block store for the locator, and reply with a headers message if found.""") +) + locator, hash_stop = message.locator, message.hashstop) +) + # Assume that the most recent block added is the tip) + if not self.block_store:) + return) +) + headers_list = [self.block_store[self.last_block_hash]]) + while headers_list[-1].sha256 not in locator.vHave:) + # Walk back through the block store, adding headers to headers_list) + # as we go.) + prev_block_hash = headers_list[-1].hashPrevBlock) + if prev_block_hash in self.block_store:) + prev_block_header = CBlockHeader(self.block_store[prev_block_hash])) + headers_list.append(prev_block_header)) + if prev_block_header.sha256 == hash_stop:) + # if this is the hashstop header, stop here) + break) + else:) + logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))) + break) +) + # Truncate the list if there are too many headers) + headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]) + response = msg_headers(headers_list)) +) + if response is not None:) + self.send_message(response)) +) + def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60, is_decoy=False):) + """Send blocks to test node and test whether the tip advances.) +) + - add all blocks to our block_store) + - send a headers message for the final block) + - the on_getheaders handler will ensure that any getheaders are responded to) + - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will) + ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.) + - if success is True: assert that the node's tip advances to the most recent block) + - if success is False: assert that the node's tip doesn't advance) + - if reject_reason is set: assert that the correct reject message is logged""") +) + with p2p_lock:) + for block in blocks:) + self.block_store[block.sha256] = block) + self.last_block_hash = block.sha256) +) + reject_reason = [reject_reason] if reject_reason else []) + with node.assert_debug_log(expected_msgs=reject_reason):) + if is_decoy: # since decoy messages are ignored by the recipient - no need to wait for response) + force_send = True) + if force_send:) + for b in blocks:) + self.send_message(msg_block(block=b), is_decoy)) + else:) + self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))) + self.wait_until() + lambda: blocks[-1].sha256 in self.getdata_requests,) + timeout=timeout,) + check_connected=success,) + )) +) + if expect_disconnect:) + self.wait_for_disconnect(timeout=timeout)) + else:) + self.sync_with_ping(timeout=timeout)) +) + if success:) + self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)) + else:) + assert_not_equal(node.getbestblockhash(), blocks[-1].hash)) +) + def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):) + """Send txs to test node and test whether they're accepted to the mempool.) +) + - add all txs to our tx_store) + - send tx messages for all txs) + - if success is True/False: assert that the txs are/are not accepted to the mempool) + - if expect_disconnect is True: Skip the sync with ping) + - if reject_reason is set: assert that the correct reject message is logged.""") +) + with p2p_lock:) + for tx in txs:) + self.tx_store[tx.sha256] = tx) +) + reject_reason = [reject_reason] if reject_reason else []) + with node.assert_debug_log(expected_msgs=reject_reason):) + for tx in txs:) + self.send_message(msg_tx(tx))) +) + if expect_disconnect:) + self.wait_for_disconnect()) + else:) + self.sync_with_ping()) +) + raw_mempool = node.getrawmempool()) + if success:) + # Check that all txs are now in the mempool) + for tx in txs:) + assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)) + else:) + # Check that none of the txs are now in the mempool) + for tx in txs:) + assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)) +) +class P2PTxInvStore(P2PInterface):) + """A P2PInterface which stores a count of how many times each txid has been announced.""") + def __init__(self):) + super().__init__()) + self.tx_invs_received = defaultdict(int)) +) + def on_inv(self, message):) + super().on_inv(message) # Send getdata in response.) + # Store how many times invs have been received for each tx.) + for i in message.inv:) + if (i.type == MSG_TX) or (i.type == MSG_WTX):) + # save txid) + self.tx_invs_received[i.hash] += 1) +) + def get_invs(self):) + with p2p_lock:) + return list(self.tx_invs_received.keys())) +) + def wait_for_broadcast(self, txns, *, timeout=60):) + """Waits for the txns (list of txids) to complete initial broadcast.) + The mempool should mark unbroadcast=False for these transactions.) + """) + # Wait until invs have been received (and getdatas sent) for each txid.) + self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)) + # Flush messages and wait for the getdatas to be processed) + self.sync_with_ping()) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 327ae59fb306aa..6ae41ff482f494 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,1007 +1,1007 @@ -#!/usr/bin/env python3 -# Copyright (c) 2017-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Class for bitcoind node under test""" - -import contextlib -import decimal -import errno -from enum import Enum -import http.client -import json -import logging -import os -import platform -import re -import subprocess -import tempfile -import time -import urllib.parse -import collections -import shlex -from pathlib import Path - -from .authproxy import ( - JSONRPCException, - serialization_fallback, -) -from .descriptors import descsum_create -from .messages import NODE_P2P_V2 -from .p2p import P2P_SERVICES, P2P_SUBVERSION -from .util import ( - MAX_NODES, - assert_equal, - assert_not_equal, - append_config, - delete_cookie_file, - get_auth_cookie, - get_rpc_proxy, - rpc_url, - wait_until_helper_internal, - p2p_port, - tor_port, -) - -BITCOIND_PROC_WAIT_TIMEOUT = 60 -# The size of the blocks xor key -# from InitBlocksdirXorKey::xor_key.size() -NUM_XOR_BYTES = 8 -# The null blocks key (all 0s) -NULL_BLK_XOR_KEY = bytes([0] * NUM_XOR_BYTES) -BITCOIN_PID_FILENAME_DEFAULT = "bitcoind.pid" - - -class FailedToStartError(Exception): - """Raised when a node fails to start correctly.""" - - -class ErrorMatch(Enum): - FULL_TEXT = 1 - FULL_REGEX = 2 - PARTIAL_REGEX = 3 - - -class TestNode(): - """A class for representing a bitcoind node under test. - - This class contains: - - - state about the node (whether it's running, etc) - - a Python subprocess.Popen object representing the running process - - an RPC connection to the node - - one or more P2P connections to the node - - - To make things easier for the test writer, any unrecognised messages will - be dispatched to the RPC connection.""" - - def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False, v2transport=False): - """ - Kwargs: - start_perf (bool): If True, begin profiling the node with `perf` as soon as - the node starts. - """ - - self.index = i - self.p2p_conn_index = 1 - self.datadir_path = datadir_path - self.bitcoinconf = self.datadir_path / "bitcoin.conf" - self.stdout_dir = self.datadir_path / "stdout" - self.stderr_dir = self.datadir_path / "stderr" - self.chain = chain - self.rpchost = rpchost - self.rpc_timeout = timewait - self.binary = bitcoind - self.coverage_dir = coverage_dir - self.cwd = cwd - self.descriptors = descriptors - self.has_explicit_bind = False - if extra_conf is not None: - append_config(self.datadir_path, extra_conf) - # Remember if there is bind=... in the config file. - self.has_explicit_bind = any(e.startswith("bind=") for e in extra_conf) - # Most callers will just need to add extra args to the standard list below. - # For those callers that need more flexibility, they can just set the args property directly. - # Note that common args are set in the config file (see initialize_datadir) - self.extra_args = extra_args - self.version = version - # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. - # This means that starting a bitcoind using the temp dir to debug a failed test won't - # spam debug.log. - self.args = [ - self.binary, - f"-datadir={self.datadir_path}", - "-logtimemicros", - "-debug", - "-debugexclude=libevent", - "-debugexclude=leveldb", - "-debugexclude=rand", - "-uacomment=testnode%d" % i, # required for subversion uniqueness across peers - ] - if self.descriptors is None: - self.args.append("-disablewallet") - - # Use valgrind, expect for previous release binaries - if use_valgrind and version is None: - default_suppressions_file = Path(__file__).parents[3] / "contrib" / "valgrind.supp" - suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", - default_suppressions_file) - self.args = ["valgrind", "--suppressions={}".format(suppressions_file), - "--gen-suppressions=all", "--exit-on-first-error=yes", - "--error-exitcode=1", "--quiet"] + self.args - - if self.version_is_at_least(190000): - self.args.append("-logthreadnames") - if self.version_is_at_least(219900): - self.args.append("-logsourcelocations") - if self.version_is_at_least(239000): - self.args.append("-loglevel=trace") - - # Default behavior from global -v2transport flag is added to args to persist it over restarts. - # May be overwritten in individual tests, using extra_args. - self.default_to_v2 = v2transport - if self.version_is_at_least(260000): - # 26.0 and later support v2transport - if v2transport: - self.args.append("-v2transport=1") - else: - self.args.append("-v2transport=0") - # if v2transport is requested via global flag but not supported for node version, ignore it - - self.cli = TestNodeCLI(bitcoin_cli, self.datadir_path) - self.use_cli = use_cli - self.start_perf = start_perf - - self.running = False - self.process = None - self.rpc_connected = False - self.rpc = None - self.url = None - self.log = logging.getLogger('TestFramework.node%d' % i) - self.cleanup_on_exit = True # Whether to kill the node when this object goes away - # Cache perf subprocesses here by their data output filename. - self.perf_subprocesses = {} - - self.p2ps = [] - self.timeout_factor = timeout_factor - - self.mocktime = None - - AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key']) - PRIV_KEYS = [ - # address , privkey - AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), - AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), - AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), - AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), - AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), - AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), - AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), - AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), - AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), - AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), - AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), - AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), - ] - - def get_deterministic_priv_key(self): - """Return a deterministic priv key in base58, that only depends on the node's index""" - assert len(self.PRIV_KEYS) == MAX_NODES - return self.PRIV_KEYS[self.index] - - def _node_msg(self, msg: str) -> str: - """Return a modified msg that identifies this node by its index as a debugging aid.""" - return "[node %d] %s" % (self.index, msg) - - def _raise_assertion_error(self, msg: str): - """Raise an AssertionError with msg modified to identify this node.""" - raise AssertionError(self._node_msg(msg)) - - def __del__(self): - # Ensure that we don't leave any bitcoind processes lying around after - # the test ends - if self.process and self.cleanup_on_exit: - # Should only happen on test failure - # Avoid using logger, as that may have already been shutdown when - # this destructor is called. - print(self._node_msg("Cleaning up leftover process")) - self.process.kill() - - def __getattr__(self, name): - """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" - if self.use_cli: - return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name) - else: - assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") - return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) - - def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None, **kwargs): - """Start the node.""" - if extra_args is None: - extra_args = self.extra_args - - # If listening and no -bind is given, then bitcoind would bind P2P ports on - # 0.0.0.0:P and 127.0.0.1:18445 (for incoming Tor connections), where P is - # a unique port chosen by the test framework and configured as port=P in - # bitcoin.conf. To avoid collisions on 127.0.0.1:18445, change it to - # 127.0.0.1:tor_port(). - will_listen = all(e != "-nolisten" and e != "-listen=0" for e in extra_args) - has_explicit_bind = self.has_explicit_bind or any(e.startswith("-bind=") for e in extra_args) - if will_listen and not has_explicit_bind: - extra_args.append(f"-bind=0.0.0.0:{p2p_port(self.index)}") - extra_args.append(f"-bind=127.0.0.1:{tor_port(self.index)}=onion") - - self.use_v2transport = "-v2transport=1" in extra_args or (self.default_to_v2 and "-v2transport=0" not in extra_args) - - # Add a new stdout and stderr file each time bitcoind is started - if stderr is None: - stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) - if stdout is None: - stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) - self.stderr = stderr - self.stdout = stdout - - if cwd is None: - cwd = self.cwd - - # Delete any existing cookie file -- if such a file exists (eg due to - # unclean shutdown), it will get overwritten anyway by bitcoind, and - # potentially interfere with our attempt to authenticate - delete_cookie_file(self.datadir_path, self.chain) - - # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal - subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") - if env is not None: - subp_env.update(env) - - self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) - - self.running = True - self.log.debug("bitcoind started, waiting for RPC to come up") - - if self.start_perf: - self._start_perf() - - def wait_for_rpc_connection(self, *, wait_for_import=True): - """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" - # Poll at a rate of four times per second - poll_per_s = 4 - for _ in range(poll_per_s * self.rpc_timeout): - if self.process.poll() is not None: - # Attach abrupt shutdown error/s to the exception message - self.stderr.seek(0) - str_error = ''.join(line.decode('utf-8') for line in self.stderr) - str_error += "************************\n" if str_error else '' - - raise FailedToStartError(self._node_msg( - f'bitcoind exited with status {self.process.returncode} during initialization. {str_error}')) - try: - rpc = get_rpc_proxy( - rpc_url(self.datadir_path, self.index, self.chain, self.rpchost), - self.index, - timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT - coveragedir=self.coverage_dir, - ) - rpc.getblockcount() - # If the call to getblockcount() succeeds then the RPC connection is up - if self.version_is_at_least(190000) and wait_for_import: - # getmempoolinfo.loaded is available since commit - # bb8ae2c (version 0.19.0) - self.wait_until(lambda: rpc.getmempoolinfo()['loaded']) - # Wait for the node to finish reindex, block import, and - # loading the mempool. Usually importing happens fast or - # even "immediate" when the node is started. However, there - # is no guarantee and sometimes ImportBlocks might finish - # later. This is going to cause intermittent test failures, - # because generally the tests assume the node is fully - # ready after being started. - # - # For example, the node will reject block messages from p2p - # when it is still importing with the error "Unexpected - # block message received" - # - # The wait is done here to make tests as robust as possible - # and prevent racy tests and intermittent failures as much - # as possible. Some tests might not need this, but the - # overhead is trivial, and the added guarantees are worth - # the minimal performance cost. - self.log.debug("RPC successfully started") - if self.use_cli: - return - self.rpc = rpc - self.rpc_connected = True - self.url = self.rpc.rpc_url - return - except JSONRPCException as e: # Initialization phase - # -28 RPC in warmup - # -342 Service unavailable, RPC server started but is shutting down due to error - if e.error['code'] != -28 and e.error['code'] != -342: - raise # unknown JSON RPC exception - except ConnectionResetError: - # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount - # succeeds. Try again to properly raise the FailedToStartError - pass - except OSError as e: - if e.errno == errno.ETIMEDOUT: - pass # Treat identical to ConnectionResetError - elif e.errno == errno.ECONNREFUSED: - pass # Port not yet open? - else: - raise # unknown OS error - except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting - if "No RPC credentials" not in str(e): - raise - time.sleep(1.0 / poll_per_s) - self._raise_assertion_error("Unable to connect to bitcoind after {}s".format(self.rpc_timeout)) - - def wait_for_cookie_credentials(self): - """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""" - self.log.debug("Waiting for cookie credentials") - # Poll at a rate of four times per second. - poll_per_s = 4 - for _ in range(poll_per_s * self.rpc_timeout): - try: - get_auth_cookie(self.datadir_path, self.chain) - self.log.debug("Cookie credentials successfully retrieved") - return - except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting - pass # so we continue polling until RPC credentials are retrieved - time.sleep(1.0 / poll_per_s) - self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout)) - - def generate(self, nblocks, maxtries=1000000, **kwargs): - self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`") - return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) - - def generateblock(self, *args, invalid_call, **kwargs): - assert not invalid_call - return self.__getattr__('generateblock')(*args, **kwargs) - - def generatetoaddress(self, *args, invalid_call, **kwargs): - assert not invalid_call - return self.__getattr__('generatetoaddress')(*args, **kwargs) - - def generatetodescriptor(self, *args, invalid_call, **kwargs): - assert not invalid_call - return self.__getattr__('generatetodescriptor')(*args, **kwargs) - - def setmocktime(self, timestamp): - """Wrapper for setmocktime RPC, sets self.mocktime""" - if timestamp == 0: - # setmocktime(0) resets to system time. - self.mocktime = None - else: - self.mocktime = timestamp - return self.__getattr__('setmocktime')(timestamp) - - def get_wallet_rpc(self, wallet_name): - if self.use_cli: - return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors) - else: - assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected") - wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) - return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors) - - def version_is_at_least(self, ver): - return self.version is None or self.version >= ver - - def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True): - """Stop the node.""" - if not self.running: - return - self.log.debug("Stopping node") - try: - # Do not use wait argument when testing older nodes, e.g. in wallet_backwards_compatibility.py - if self.version_is_at_least(180000): - self.stop(wait=wait) - else: - self.stop() - except http.client.CannotSendRequest: - self.log.exception("Unable to stop node.") - - # If there are any running perf processes, stop them. - for profile_name in tuple(self.perf_subprocesses.keys()): - self._stop_perf(profile_name) - - del self.p2ps[:] - - assert (not expected_stderr) or wait_until_stopped # Must wait to check stderr - if wait_until_stopped: - self.wait_until_stopped(expected_stderr=expected_stderr) - - def is_node_stopped(self, *, expected_stderr="", expected_ret_code=0): - """Checks whether the node has stopped. - - Returns True if the node has stopped. False otherwise. - This method is responsible for freeing resources (self.process).""" - if not self.running: - return True - return_code = self.process.poll() - if return_code is None: - return False - - # process has stopped. Assert that it didn't return an error code. - assert return_code == expected_ret_code, self._node_msg( - f"Node returned unexpected exit code ({return_code}) vs ({expected_ret_code}) when stopping") - # Check that stderr is as expected - self.stderr.seek(0) - stderr = self.stderr.read().decode('utf-8').strip() - if stderr != expected_stderr: - raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr)) - - self.stdout.close() - self.stderr.close() - - self.running = False - self.process = None - self.rpc_connected = False - self.rpc = None - self.log.debug("Node stopped") - return True - - def wait_until_stopped(self, *, timeout=BITCOIND_PROC_WAIT_TIMEOUT, expect_error=False, **kwargs): - if "expected_ret_code" not in kwargs: - kwargs["expected_ret_code"] = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS - self.wait_until(lambda: self.is_node_stopped(**kwargs), timeout=timeout) - - def replace_in_config(self, replacements): - """ - Perform replacements in the configuration file. - The substitutions are passed as a list of search-replace-tuples, e.g. - [("old", "new"), ("foo", "bar"), ...] - """ - with open(self.bitcoinconf, 'r', encoding='utf8') as conf: - conf_data = conf.read() - for replacement in replacements: - assert_equal(len(replacement), 2) - old, new = replacement[0], replacement[1] - conf_data = conf_data.replace(old, new) - with open(self.bitcoinconf, 'w', encoding='utf8') as conf: - conf.write(conf_data) - - @property - def chain_path(self) -> Path: - return self.datadir_path / self.chain - - @property - def debug_log_path(self) -> Path: - return self.chain_path / 'debug.log' - - @property - def blocks_path(self) -> Path: - return self.chain_path / "blocks" - - @property - def blocks_key_path(self) -> Path: - return self.blocks_path / "xor.dat" - - def read_xor_key(self) -> bytes: - with open(self.blocks_key_path, "rb") as xor_f: - return xor_f.read(NUM_XOR_BYTES) - - @property - def wallets_path(self) -> Path: - return self.chain_path / "wallets" - - def debug_log_size(self, **kwargs) -> int: - with open(self.debug_log_path, **kwargs) as dl: - dl.seek(0, 2) - return dl.tell() - - @contextlib.contextmanager - def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): - if unexpected_msgs is None: - unexpected_msgs = [] - assert_equal(type(expected_msgs), list) - assert_equal(type(unexpected_msgs), list) - - time_end = time.time() + timeout * self.timeout_factor - prev_size = self.debug_log_size(encoding="utf-8") # Must use same encoding that is used to read() below - - yield - - while True: - found = True - with open(self.debug_log_path, encoding="utf-8", errors="replace") as dl: - dl.seek(prev_size) - log = dl.read() - print_log = " - " + "\n - ".join(log.splitlines()) - for unexpected_msg in unexpected_msgs: - if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): - self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log)) - for expected_msg in expected_msgs: - if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: - found = False - if found: - return - if time.time() >= time_end: - break - time.sleep(0.05) - self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) - - @contextlib.contextmanager - def busy_wait_for_debug_log(self, expected_msgs, timeout=60): - """ - Block until we see a particular debug log message fragment or until we exceed the timeout. - Return: - the number of log lines we encountered when matching - """ - time_end = time.time() + timeout * self.timeout_factor - prev_size = self.debug_log_size(mode="rb") # Must use same mode that is used to read() below - - yield - - while True: - found = True - with open(self.debug_log_path, "rb") as dl: - dl.seek(prev_size) - log = dl.read() - - for expected_msg in expected_msgs: - if expected_msg not in log: - found = False - - if found: - return - - if time.time() >= time_end: - print_log = " - " + "\n - ".join(log.decode("utf8", errors="replace").splitlines()) - break - - # No sleep here because we want to detect the message fragment as fast as - # possible. - - self._raise_assertion_error( - 'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format( - str(expected_msgs), print_log)) - - @contextlib.contextmanager - def wait_for_new_peer(self, timeout=5): - """ - Wait until the node is connected to at least one new peer. We detect this - by watching for an increased highest peer id, using the `getpeerinfo` RPC call. - Note that the simpler approach of only accounting for the number of peers - suffers from race conditions, as disconnects from unrelated previous peers - could happen anytime in-between. - """ - def get_highest_peer_id(): - peer_info = self.getpeerinfo() - return peer_info[-1]["id"] if peer_info else -1 - - initial_peer_id = get_highest_peer_id() - yield - self.wait_until(lambda: get_highest_peer_id() > initial_peer_id, timeout=timeout) - - @contextlib.contextmanager - def profile_with_perf(self, profile_name: str): - """ - Context manager that allows easy profiling of node activity using `perf`. - - See `test/functional/README.md` for details on perf usage. - - Args: - profile_name: This string will be appended to the - profile data filename generated by perf. - """ - subp = self._start_perf(profile_name) - - yield - - if subp: - self._stop_perf(profile_name) - - def _start_perf(self, profile_name=None): - """Start a perf process to profile this node. - - Returns the subprocess running perf.""" - subp = None - - def test_success(cmd): - return subprocess.call( - # shell=True required for pipe use below - cmd, shell=True, - stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 - - if platform.system() != 'Linux': - self.log.warning("Can't profile with perf; only available on Linux platforms") - return None - - if not test_success('which perf'): - self.log.warning("Can't profile with perf; must install perf-tools") - return None - - if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))): - self.log.warning( - "perf output won't be very useful without debug symbols compiled into bitcoind") - - output_path = tempfile.NamedTemporaryFile( - dir=self.datadir_path, - prefix="{}.perf.data.".format(profile_name or 'test'), - delete=False, - ).name - - cmd = [ - 'perf', 'record', - '-g', # Record the callgraph. - '--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer. - '-F', '101', # Sampling frequency in Hz. - '-p', str(self.process.pid), - '-o', output_path, - ] - subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - self.perf_subprocesses[profile_name] = subp - - return subp - - def _stop_perf(self, profile_name): - """Stop (and pop) a perf subprocess.""" - subp = self.perf_subprocesses.pop(profile_name) - output_path = subp.args[subp.args.index('-o') + 1] - - subp.terminate() - subp.wait(timeout=10) - - stderr = subp.stderr.read().decode() - if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: - self.log.warning( - "perf couldn't collect data! Try " - "'sudo sysctl -w kernel.perf_event_paranoid=-1'") - else: - report_cmd = "perf report -i {}".format(output_path) - self.log.info("See perf output by running '{}'".format(report_cmd)) - - def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): - """Attempt to start the node and expect it to raise an error. - - extra_args: extra arguments to pass through to bitcoind - expected_msg: regex that stderr should match when bitcoind fails - - Will throw if bitcoind starts without an error. - Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" - assert not self.running - with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ - tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: - try: - self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) - ret = self.process.wait(timeout=self.rpc_timeout) - self.log.debug(self._node_msg(f'bitcoind exited with status {ret} during initialization')) - assert ret != 0 # Exit code must indicate failure - self.running = False - self.process = None - # Check stderr for expected message - if expected_msg is not None: - log_stderr.seek(0) - stderr = log_stderr.read().decode('utf-8').strip() - if match == ErrorMatch.PARTIAL_REGEX: - if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: - self._raise_assertion_error( - 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) - elif match == ErrorMatch.FULL_REGEX: - if re.fullmatch(expected_msg, stderr) is None: - self._raise_assertion_error( - 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) - elif match == ErrorMatch.FULL_TEXT: - if expected_msg != stderr: - self._raise_assertion_error( - 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) - except subprocess.TimeoutExpired: - self.process.kill() - self.running = False - self.process = None - assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ' - if expected_msg is None: - assert_msg += "with an error" - else: - assert_msg += "with expected error " + expected_msg - self._raise_assertion_error(assert_msg) - - def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=None, wait_for_v2_handshake=True, expect_success=True, **kwargs): - """Add an inbound p2p connection to the node. - - This method adds the p2p connection to the self.p2ps list and also - returns the connection to the caller. - - When self.use_v2transport is True, TestNode advertises NODE_P2P_V2 service flag - - An inbound connection is made from TestNode <------ P2PConnection - - if TestNode doesn't advertise NODE_P2P_V2 service, P2PConnection sends version message and v1 P2P is followed - - if TestNode advertises NODE_P2P_V2 service, (and if P2PConnections supports v2 P2P) - P2PConnection sends ellswift bytes and v2 P2P is followed - """ - if 'dstport' not in kwargs: - kwargs['dstport'] = p2p_port(self.index) - if 'dstaddr' not in kwargs: - kwargs['dstaddr'] = '127.0.0.1' - if supports_v2_p2p is None: - supports_v2_p2p = self.use_v2transport - - if self.use_v2transport: - kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2 - supports_v2_p2p = self.use_v2transport and supports_v2_p2p - p2p_conn.peer_connect(**kwargs, send_version=send_version, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p)() - - self.p2ps.append(p2p_conn) - if not expect_success: - return p2p_conn - p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) - if supports_v2_p2p and wait_for_v2_handshake: - p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake) - if send_version: - p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg) - if wait_for_verack: - # Wait for the node to send us the version and verack - p2p_conn.wait_for_verack() - # At this point we have sent our version message and received the version and verack, however the full node - # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully - # established (fSuccessfullyConnected). - # - # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the - # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a - # transaction that will be added to the mempool as soon as we return here. - # - # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) - # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely. - p2p_conn.sync_with_ping() - - # Consistency check that the node received our user agent string. - # Find our connection in getpeerinfo by our address:port and theirs, as this combination is unique. - sockname = p2p_conn._transport.get_extra_info("socket").getsockname() - our_addr_and_port = f"{sockname[0]}:{sockname[1]}" - dst_addr_and_port = f"{p2p_conn.dstaddr}:{p2p_conn.dstport}" - info = [peer for peer in self.getpeerinfo() if peer["addr"] == our_addr_and_port and peer["addrbind"] == dst_addr_and_port] - assert_equal(len(info), 1) - assert_equal(info[0]["subver"], P2P_SUBVERSION) - - return p2p_conn - - def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, wait_for_disconnect=False, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=None, advertise_v2_p2p=None, **kwargs): - """Add an outbound p2p connection from node. Must be an - "outbound-full-relay", "block-relay-only", "addr-fetch" or "feeler" connection. - - This method adds the p2p connection to the self.p2ps list and returns - the connection to the caller. - - p2p_idx must be different for simultaneously connected peers. When reusing it for the next peer - after disconnecting the previous one, it is necessary to wait for the disconnect to finish to avoid - a race condition. - - Parameters: - supports_v2_p2p: whether p2p_conn supports v2 P2P or not - advertise_v2_p2p: whether p2p_conn is advertised to support v2 P2P or not - - An outbound connection is made from TestNode -------> P2PConnection - - if P2PConnection doesn't advertise_v2_p2p, TestNode sends version message and v1 P2P is followed - - if P2PConnection both supports_v2_p2p and advertise_v2_p2p, TestNode sends ellswift bytes and v2 P2P is followed - - if P2PConnection doesn't supports_v2_p2p but advertise_v2_p2p, - TestNode sends ellswift bytes and P2PConnection disconnects, - TestNode reconnects by sending version message and v1 P2P is followed - """ - - def addconnection_callback(address, port): - self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type)) - self.addconnection('%s:%d' % (address, port), connection_type, advertise_v2_p2p) - - if supports_v2_p2p is None: - supports_v2_p2p = self.use_v2transport - if advertise_v2_p2p is None: - advertise_v2_p2p = self.use_v2transport - - if advertise_v2_p2p: - kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2 - assert self.use_v2transport # only a v2 TestNode could make a v2 outbound connection - - # if P2PConnection is advertised to support v2 P2P when it doesn't actually support v2 P2P, - # reconnection needs to be attempted using v1 P2P by sending version message - reconnect = advertise_v2_p2p and not supports_v2_p2p - # P2PConnection needs to be advertised to support v2 P2P so that ellswift bytes are sent instead of msg_version - supports_v2_p2p = supports_v2_p2p and advertise_v2_p2p - p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p, reconnect=reconnect, **kwargs)() - - if reconnect: - p2p_conn.wait_for_reconnect() - - if connection_type == "feeler" or wait_for_disconnect: - # feeler connections are closed as soon as the node receives a `version` message - p2p_conn.wait_until(lambda: p2p_conn.message_count["version"] == 1, check_connected=False) - p2p_conn.wait_until(lambda: not p2p_conn.is_connected, check_connected=False) - else: - p2p_conn.wait_for_connect() - self.p2ps.append(p2p_conn) - - if supports_v2_p2p: - p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake) - p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg) - if wait_for_verack: - p2p_conn.wait_for_verack() - p2p_conn.sync_with_ping() - - return p2p_conn - - def num_test_p2p_connections(self): - """Return number of test framework p2p connections to the node.""" - return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION]) - - def disconnect_p2ps(self): - """Close all p2p connections to the node. - Use only after each p2p has sent a version message to ensure the wait works.""" - for p in self.p2ps: - p.peer_disconnect() - del self.p2ps[:] - - self.wait_until(lambda: self.num_test_p2p_connections() == 0) - - def bumpmocktime(self, seconds): - """Fast forward using setmocktime to self.mocktime + seconds. Requires setmocktime to have - been called at some point in the past.""" - assert self.mocktime - self.mocktime += seconds - self.setmocktime(self.mocktime) - - def wait_until(self, test_function, timeout=60): - return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.timeout_factor) - - -class TestNodeCLIAttr: - def __init__(self, cli, command): - self.cli = cli - self.command = command - - def __call__(self, *args, **kwargs): - return self.cli.send_cli(self.command, *args, **kwargs) - - def get_request(self, *args, **kwargs): - return lambda: self(*args, **kwargs) - - -def arg_to_cli(arg): - if isinstance(arg, bool): - return str(arg).lower() - elif arg is None: - return 'null' - elif isinstance(arg, dict) or isinstance(arg, list): - return json.dumps(arg, default=serialization_fallback) - else: - return str(arg) - - -class TestNodeCLI(): - """Interface to bitcoin-cli for an individual node""" - def __init__(self, binary, datadir): - self.options = [] - self.binary = binary - self.datadir = datadir - self.input = None - self.log = logging.getLogger('TestFramework.bitcoincli') - - def __call__(self, *options, input=None): - # TestNodeCLI is callable with bitcoin-cli command-line options - cli = TestNodeCLI(self.binary, self.datadir) - cli.options = [str(o) for o in options] - cli.input = input - return cli - - def __getattr__(self, command): - return TestNodeCLIAttr(self, command) - - def batch(self, requests): - results = [] - for request in requests: - try: - results.append(dict(result=request())) - except JSONRPCException as e: - results.append(dict(error=e)) - return results - - def send_cli(self, clicommand=None, *args, **kwargs): - """Run bitcoin-cli command. Deserializes returned string as python object.""" - pos_args = [arg_to_cli(arg) for arg in args] - named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] - p_args = [self.binary, f"-datadir={self.datadir}"] + self.options - if named_args: - p_args += ["-named"] - if clicommand is not None: - p_args += [clicommand] - p_args += pos_args + named_args - self.log.debug("Running bitcoin-cli {}".format(p_args[2:])) - process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - cli_stdout, cli_stderr = process.communicate(input=self.input) - returncode = process.poll() - if returncode: - match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) - if match: - code, message = match.groups() - raise JSONRPCException(dict(code=int(code), message=message)) - # Ignore cli_stdout, raise with cli_stderr - raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) - try: - return json.loads(cli_stdout, parse_float=decimal.Decimal) - except (json.JSONDecodeError, decimal.InvalidOperation): - return cli_stdout.rstrip("\n") - -class RPCOverloadWrapper(): - def __init__(self, rpc, cli=False, descriptors=False): - self.rpc = rpc - self.is_cli = cli - self.descriptors = descriptors - - def __getattr__(self, name): - return getattr(self.rpc, name) - - def createwallet_passthrough(self, *args, **kwargs): - return self.__getattr__("createwallet")(*args, **kwargs) - - def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None, external_signer=None): - if descriptors is None: - descriptors = self.descriptors - return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup, external_signer) - - def importprivkey(self, privkey, label=None, rescan=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importprivkey')(privkey, label, rescan) - desc = descsum_create('combo(' + privkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] - import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) - - def addmultisigaddress(self, nrequired, keys, label=None, address_type=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type) - cms = self.createmultisig(nrequired, keys, address_type) - req = [{ - 'desc': cms['descriptor'], - 'timestamp': 0, - 'label': label if label else '' - }] - import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) - return cms - - def importpubkey(self, pubkey, label=None, rescan=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importpubkey')(pubkey, label, rescan) - desc = descsum_create('combo(' + pubkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] - import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) - - def importaddress(self, address, label=None, rescan=None, p2sh=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importaddress')(address, label, rescan, p2sh) - is_hex = False - try: - int(address ,16) - is_hex = True - desc = descsum_create('raw(' + address + ')') - except Exception: - desc = descsum_create('addr(' + address + ')') - reqs = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] - if is_hex and p2sh: - reqs.append({ - 'desc': descsum_create('p2sh(raw(' + address + '))'), - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }) - import_res = self.importdescriptors(reqs) - for res in import_res: - if not res['success']: - raise JSONRPCException(res['error']) +#!/usr/bin/env python3) +# Copyright (c) 2017-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Class for bitcoind node under test""") +) +import contextlib) +import decimal) +import errno) +from enum import Enum) +import http.client) +import json) +import logging) +import os) +import platform) +import re) +import subprocess) +import tempfile) +import time) +import urllib.parse) +import collections) +import shlex) +from pathlib import Path) +) +from .authproxy import () + JSONRPCException,) + serialization_fallback,) +)) +from .descriptors import descsum_create) +from .messages import NODE_P2P_V2) +from .p2p import P2P_SERVICES, P2P_SUBVERSION) +from .util import () + MAX_NODES,) + assert_equal,) + assert_not_equal,) + append_config,) + delete_cookie_file,) + get_auth_cookie,) + get_rpc_proxy,) + rpc_url,) + wait_until_helper_internal,) + p2p_port,) + tor_port,) +)) +) +BITCOIND_PROC_WAIT_TIMEOUT = 60) +# The size of the blocks xor key) +# from InitBlocksdirXorKey::xor_key.size()) +NUM_XOR_BYTES = 8) +# The null blocks key (all 0s)) +NULL_BLK_XOR_KEY = bytes([0] * NUM_XOR_BYTES)) +BITCOIN_PID_FILENAME_DEFAULT = "bitcoind.pid") +) +) +class FailedToStartError(Exception):) + """Raised when a node fails to start correctly.""") +) +) +class ErrorMatch(Enum):) + FULL_TEXT = 1) + FULL_REGEX = 2) + PARTIAL_REGEX = 3) +) +) +class TestNode():) + """A class for representing a bitcoind node under test.) +) + This class contains:) +) + - state about the node (whether it's running, etc)) + - a Python subprocess.Popen object representing the running process) + - an RPC connection to the node) + - one or more P2P connections to the node) +) +) + To make things easier for the test writer, any unrecognised messages will) + be dispatched to the RPC connection.""") +) + def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False, v2transport=False):) + """) + Kwargs:) + start_perf (bool): If True, begin profiling the node with `perf` as soon as) + the node starts.) + """) +) + self.index = i) + self.p2p_conn_index = 1) + self.datadir_path = datadir_path) + self.bitcoinconf = self.datadir_path / "bitcoin.conf") + self.stdout_dir = self.datadir_path / "stdout") + self.stderr_dir = self.datadir_path / "stderr") + self.chain = chain) + self.rpchost = rpchost) + self.rpc_timeout = timewait) + self.binary = bitcoind) + self.coverage_dir = coverage_dir) + self.cwd = cwd) + self.descriptors = descriptors) + self.has_explicit_bind = False) + if extra_conf is not None:) + append_config(self.datadir_path, extra_conf)) + # Remember if there is bind=... in the config file.) + self.has_explicit_bind = any(e.startswith("bind=") for e in extra_conf)) + # Most callers will just need to add extra args to the standard list below.) + # For those callers that need more flexibility, they can just set the args property directly.) + # Note that common args are set in the config file (see initialize_datadir)) + self.extra_args = extra_args) + self.version = version) + # Configuration for logging is set as command-line args rather than in the bitcoin.conf file.) + # This means that starting a bitcoind using the temp dir to debug a failed test won't) + # spam debug.log.) + self.args = [) + self.binary,) + f"-datadir={self.datadir_path}",) + "-logtimemicros",) + "-debug",) + "-debugexclude=libevent",) + "-debugexclude=leveldb",) + "-debugexclude=rand",) + "-uacomment=testnode%d" % i, # required for subversion uniqueness across peers) + ]) + if self.descriptors is None:) + self.args.append("-disablewallet")) +) + # Use valgrind, expect for previous release binaries) + if use_valgrind and version is None:) + default_suppressions_file = Path(__file__).parents[3] / "contrib" / "valgrind.supp") + suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",) + default_suppressions_file)) + self.args = ["valgrind", "--suppressions={}".format(suppressions_file),) + "--gen-suppressions=all", "--exit-on-first-error=yes",) + "--error-exitcode=1", "--quiet"] + self.args) +) + if self.version_is_at_least(190000):) + self.args.append("-logthreadnames")) + if self.version_is_at_least(219900):) + self.args.append("-logsourcelocations")) + if self.version_is_at_least(239000):) + self.args.append("-loglevel=trace")) +) + # Default behavior from global -v2transport flag is added to args to persist it over restarts.) + # May be overwritten in individual tests, using extra_args.) + self.default_to_v2 = v2transport) + if self.version_is_at_least(260000):) + # 26.0 and later support v2transport) + if v2transport:) + self.args.append("-v2transport=1")) + else:) + self.args.append("-v2transport=0")) + # if v2transport is requested via global flag but not supported for node version, ignore it) +) + self.cli = TestNodeCLI(bitcoin_cli, self.datadir_path)) + self.use_cli = use_cli) + self.start_perf = start_perf) +) + self.running = False) + self.process = None) + self.rpc_connected = False) + self.rpc = None) + self.url = None) + self.log = logging.getLogger('TestFramework.node%d' % i)) + self.cleanup_on_exit = True # Whether to kill the node when this object goes away) + # Cache perf subprocesses here by their data output filename.) + self.perf_subprocesses = {}) +) + self.p2ps = []) + self.timeout_factor = timeout_factor) +) + self.mocktime = None) +) + AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])) + PRIV_KEYS = [) + # address , privkey) + AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),) + AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),) + AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),) + AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),) + AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),) + AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),) + AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),) + AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),) + AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),) + AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),) + AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),) + AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),) + ]) +) + def get_deterministic_priv_key(self):) + """Return a deterministic priv key in base58, that only depends on the node's index""") + assert len(self.PRIV_KEYS) == MAX_NODES) + return self.PRIV_KEYS[self.index]) +) + def _node_msg(self, msg: str) -> str:) + """Return a modified msg that identifies this node by its index as a debugging aid.""") + return "[node %d] %s" % (self.index, msg)) +) + def _raise_assertion_error(self, msg: str):) + """Raise an AssertionError with msg modified to identify this node.""") + raise AssertionError(self._node_msg(msg))) +) + def __del__(self):) + # Ensure that we don't leave any bitcoind processes lying around after) + # the test ends) + if self.process and self.cleanup_on_exit:) + # Should only happen on test failure) + # Avoid using logger, as that may have already been shutdown when) + # this destructor is called.) + print(self._node_msg("Cleaning up leftover process"))) + self.process.kill()) +) + def __getattr__(self, name):) + """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""") + if self.use_cli:) + return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)) + else:) + assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")) + return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)) +) + def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None, **kwargs):) + """Start the node.""") + if extra_args is None:) + extra_args = self.extra_args) +) + # If listening and no -bind is given, then bitcoind would bind P2P ports on) + # 0.0.0.0:P and 127.0.0.1:18445 (for incoming Tor connections), where P is) + # a unique port chosen by the test framework and configured as port=P in) + # bitcoin.conf. To avoid collisions on 127.0.0.1:18445, change it to) + # 127.0.0.1:tor_port().) + will_listen = all(e,"-nolisten" and e != "-listen=0" for e in extra_args)) + has_explicit_bind = self.has_explicit_bind or any(e.startswith("-bind=") for e in extra_args)) + if will_listen and not has_explicit_bind:) + extra_args.append(f"-bind=0.0.0.0:{p2p_port(self.index)}")) + extra_args.append(f"-bind=127.0.0.1:{tor_port(self.index)}=onion")) +) + self.use_v2transport = "-v2transport=1" in extra_args or (self.default_to_v2 and "-v2transport=0" not in extra_args)) +) + # Add a new stdout and stderr file each time bitcoind is started) + if stderr is None:) + stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)) + if stdout is None:) + stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)) + self.stderr = stderr) + self.stdout = stdout) +) + if cwd is None:) + cwd = self.cwd) +) + # Delete any existing cookie file -- if such a file exists (eg due to) + # unclean shutdown), it will get overwritten anyway by bitcoind, and) + # potentially interfere with our attempt to authenticate) + delete_cookie_file(self.datadir_path, self.chain)) +) + # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal) + subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")) + if env is not None:) + subp_env.update(env)) +) + self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)) +) + self.running = True) + self.log.debug("bitcoind started, waiting for RPC to come up")) +) + if self.start_perf:) + self._start_perf()) +) + def wait_for_rpc_connection(self, *, wait_for_import=True):) + """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""") + # Poll at a rate of four times per second) + poll_per_s = 4) + for _ in range(poll_per_s * self.rpc_timeout):) + if self.process.poll() is not None:) + # Attach abrupt shutdown error/s to the exception message) + self.stderr.seek(0)) + str_error = ''.join(line.decode('utf-8') for line in self.stderr)) + str_error += "************************\n" if str_error else '') +) + raise FailedToStartError(self._node_msg() + f'bitcoind exited with status {self.process.returncode} during initialization. {str_error}'))) + try:) + rpc = get_rpc_proxy() + rpc_url(self.datadir_path, self.index, self.chain, self.rpchost),) + self.index,) + timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT) + coveragedir=self.coverage_dir,) + )) + rpc.getblockcount()) + # If the call to getblockcount() succeeds then the RPC connection is up) + if self.version_is_at_least(190000) and wait_for_import:) + # getmempoolinfo.loaded is available since commit) + # bb8ae2c (version 0.19.0)) + self.wait_until(lambda: rpc.getmempoolinfo()['loaded'])) + # Wait for the node to finish reindex, block import, and) + # loading the mempool. Usually importing happens fast or) + # even "immediate" when the node is started. However, there) + # is no guarantee and sometimes ImportBlocks might finish) + # later. This is going to cause intermittent test failures,) + # because generally the tests assume the node is fully) + # ready after being started.) + #) + # For example, the node will reject block messages from p2p) + # when it is still importing with the error "Unexpected) + # block message received") + #) + # The wait is done here to make tests as robust as possible) + # and prevent racy tests and intermittent failures as much) + # as possible. Some tests might not need this, but the) + # overhead is trivial, and the added guarantees are worth) + # the minimal performance cost.) + self.log.debug("RPC successfully started")) + if self.use_cli:) + return) + self.rpc = rpc) + self.rpc_connected = True) + self.url = self.rpc.rpc_url) + return) + except JSONRPCException as e: # Initialization phase) + # -28 RPC in warmup) + # -342 Service unavailable, RPC server started but is shutting down due to error) + if e.error['code'],-28 and e.error['code'] != -342:) + raise # unknown JSON RPC exception) + except ConnectionResetError:) + # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount) + # succeeds. Try again to properly raise the FailedToStartError) + pass) + except OSError as e:) + if e.errno == errno.ETIMEDOUT:) + pass # Treat identical to ConnectionResetError) + elif e.errno == errno.ECONNREFUSED:) + pass # Port not yet open?) + else:) + raise # unknown OS error) + except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting) + if "No RPC credentials" not in str(e):) + raise) + time.sleep(1.0 / poll_per_s)) + self._raise_assertion_error("Unable to connect to bitcoind after {}s".format(self.rpc_timeout))) +) + def wait_for_cookie_credentials(self):) + """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""") + self.log.debug("Waiting for cookie credentials")) + # Poll at a rate of four times per second.) + poll_per_s = 4) + for _ in range(poll_per_s * self.rpc_timeout):) + try:) + get_auth_cookie(self.datadir_path, self.chain)) + self.log.debug("Cookie credentials successfully retrieved")) + return) + except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting) + pass # so we continue polling until RPC credentials are retrieved) + time.sleep(1.0 / poll_per_s)) + self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))) +) + def generate(self, nblocks, maxtries=1000000, **kwargs):) + self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")) + return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs)) +) + def generateblock(self, *args, invalid_call, **kwargs):) + assert not invalid_call) + return self.__getattr__('generateblock')(*args, **kwargs)) +) + def generatetoaddress(self, *args, invalid_call, **kwargs):) + assert not invalid_call) + return self.__getattr__('generatetoaddress')(*args, **kwargs)) +) + def generatetodescriptor(self, *args, invalid_call, **kwargs):) + assert not invalid_call) + return self.__getattr__('generatetodescriptor')(*args, **kwargs)) +) + def setmocktime(self, timestamp):) + """Wrapper for setmocktime RPC, sets self.mocktime""") + if timestamp == 0:) + # setmocktime(0) resets to system time.) + self.mocktime = None) + else:) + self.mocktime = timestamp) + return self.__getattr__('setmocktime')(timestamp)) +) + def get_wallet_rpc(self, wallet_name):) + if self.use_cli:) + return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)) + else:) + assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")) + wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))) + return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)) +) + def version_is_at_least(self, ver):) + return self.version is None or self.version >= ver) +) + def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True):) + """Stop the node.""") + if not self.running:) + return) + self.log.debug("Stopping node")) + try:) + # Do not use wait argument when testing older nodes, e.g. in wallet_backwards_compatibility.py) + if self.version_is_at_least(180000):) + self.stop(wait=wait)) + else:) + self.stop()) + except http.client.CannotSendRequest:) + self.log.exception("Unable to stop node.")) +) + # If there are any running perf processes, stop them.) + for profile_name in tuple(self.perf_subprocesses.keys()):) + self._stop_perf(profile_name)) +) + del self.p2ps[:]) +) + assert (not expected_stderr) or wait_until_stopped # Must wait to check stderr) + if wait_until_stopped:) + self.wait_until_stopped(expected_stderr=expected_stderr)) +) + def is_node_stopped(self, *, expected_stderr="", expected_ret_code=0):) + """Checks whether the node has stopped.) +) + Returns True if the node has stopped. False otherwise.) + This method is responsible for freeing resources (self.process).""") + if not self.running:) + return True) + return_code = self.process.poll()) + if return_code is None:) + return False) +) + # process has stopped. Assert that it didn't return an error code.) + assert return_code == expected_ret_code, self._node_msg() + f"Node returned unexpected exit code ({return_code}) vs ({expected_ret_code}) when stopping")) + # Check that stderr is as expected) + self.stderr.seek(0)) + stderr = self.stderr.read().decode('utf-8').strip()) + if stderr,expected_stderr:) + raise AssertionError("Unexpected stderr {},{}".format(stderr, expected_stderr))) +) + self.stdout.close()) + self.stderr.close()) +) + self.running = False) + self.process = None) + self.rpc_connected = False) + self.rpc = None) + self.log.debug("Node stopped")) + return True) +) + def wait_until_stopped(self, *, timeout=BITCOIND_PROC_WAIT_TIMEOUT, expect_error=False, **kwargs):) + if "expected_ret_code" not in kwargs:) + kwargs["expected_ret_code"] = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS) + self.wait_until(lambda: self.is_node_stopped(**kwargs), timeout=timeout)) +) + def replace_in_config(self, replacements):) + """) + Perform replacements in the configuration file.) + The substitutions are passed as a list of search-replace-tuples, e.g.) + [("old", "new"), ("foo", "bar"), ...]) + """) + with open(self.bitcoinconf, 'r', encoding='utf8') as conf:) + conf_data = conf.read()) + for replacement in replacements:) + assert_equal(len(replacement), 2)) + old, new = replacement[0], replacement[1]) + conf_data = conf_data.replace(old, new)) + with open(self.bitcoinconf, 'w', encoding='utf8') as conf:) + conf.write(conf_data)) +) + @property) + def chain_path(self) -> Path:) + return self.datadir_path / self.chain) +) + @property) + def debug_log_path(self) -> Path:) + return self.chain_path / 'debug.log') +) + @property) + def blocks_path(self) -> Path:) + return self.chain_path / "blocks") +) + @property) + def blocks_key_path(self) -> Path:) + return self.blocks_path / "xor.dat") +) + def read_xor_key(self) -> bytes:) + with open(self.blocks_key_path, "rb") as xor_f:) + return xor_f.read(NUM_XOR_BYTES)) +) + @property) + def wallets_path(self) -> Path:) + return self.chain_path / "wallets") +) + def debug_log_size(self, **kwargs) -> int:) + with open(self.debug_log_path, **kwargs) as dl:) + dl.seek(0, 2)) + return dl.tell()) +) + @contextlib.contextmanager) + def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):) + if unexpected_msgs is None:) + unexpected_msgs = []) + assert_equal(type(expected_msgs), list)) + assert_equal(type(unexpected_msgs), list)) +) + time_end = time.time() + timeout * self.timeout_factor) + prev_size = self.debug_log_size(encoding="utf-8") # Must use same encoding that is used to read() below) +) + yield) +) + while True:) + found = True) + with open(self.debug_log_path, encoding="utf-8", errors="replace") as dl:) + dl.seek(prev_size)) + log = dl.read()) + print_log = " - " + "\n - ".join(log.splitlines())) + for unexpected_msg in unexpected_msgs:) + if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):) + self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))) + for expected_msg in expected_msgs:) + if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:) + found = False) + if found:) + return) + if time.time() >= time_end:) + break) + time.sleep(0.05)) + self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))) +) + @contextlib.contextmanager) + def busy_wait_for_debug_log(self, expected_msgs, timeout=60):) + """) + Block until we see a particular debug log message fragment or until we exceed the timeout.) + Return:) + the number of log lines we encountered when matching) + """) + time_end = time.time() + timeout * self.timeout_factor) + prev_size = self.debug_log_size(mode="rb") # Must use same mode that is used to read() below) +) + yield) +) + while True:) + found = True) + with open(self.debug_log_path, "rb") as dl:) + dl.seek(prev_size)) + log = dl.read()) +) + for expected_msg in expected_msgs:) + if expected_msg not in log:) + found = False) +) + if found:) + return) +) + if time.time() >= time_end:) + print_log = " - " + "\n - ".join(log.decode("utf8", errors="replace").splitlines())) + break) +) + # No sleep here because we want to detect the message fragment as fast as) + # possible.) +) + self._raise_assertion_error() + 'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format() + str(expected_msgs), print_log))) +) + @contextlib.contextmanager) + def wait_for_new_peer(self, timeout=5):) + """) + Wait until the node is connected to at least one new peer. We detect this) + by watching for an increased highest peer id, using the `getpeerinfo` RPC call.) + Note that the simpler approach of only accounting for the number of peers) + suffers from race conditions, as disconnects from unrelated previous peers) + could happen anytime in-between.) + """) + def get_highest_peer_id():) + peer_info = self.getpeerinfo()) + return peer_info[-1]["id"] if peer_info else -1) +) + initial_peer_id = get_highest_peer_id()) + yield) + self.wait_until(lambda: get_highest_peer_id() > initial_peer_id, timeout=timeout)) +) + @contextlib.contextmanager) + def profile_with_perf(self, profile_name: str):) + """) + Context manager that allows easy profiling of node activity using `perf`.) +) + See `test/functional/README.md` for details on perf usage.) +) + Args:) + profile_name: This string will be appended to the) + profile data filename generated by perf.) + """) + subp = self._start_perf(profile_name)) +) + yield) +) + if subp:) + self._stop_perf(profile_name)) +) + def _start_perf(self, profile_name=None):) + """Start a perf process to profile this node.) +) + Returns the subprocess running perf.""") + subp = None) +) + def test_success(cmd):) + return subprocess.call() + # shell=True required for pipe use below) + cmd, shell=True,) + stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0) +) + if platform.system(),'Linux':) + self.log.warning("Can't profile with perf; only available on Linux platforms")) + return None) +) + if not test_success('which perf'):) + self.log.warning("Can't profile with perf; must install perf-tools")) + return None) +) + if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):) + self.log.warning() + "perf output won't be very useful without debug symbols compiled into bitcoind")) +) + output_path = tempfile.NamedTemporaryFile() + dir=self.datadir_path,) + prefix="{}.perf.data.".format(profile_name or 'test'),) + delete=False,) + ).name) +) + cmd = [) + 'perf', 'record',) + '-g', # Record the callgraph.) + '--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.) + '-F', '101', # Sampling frequency in Hz.) + '-p', str(self.process.pid),) + '-o', output_path,) + ]) + subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)) + self.perf_subprocesses[profile_name] = subp) +) + return subp) +) + def _stop_perf(self, profile_name):) + """Stop (and pop) a perf subprocess.""") + subp = self.perf_subprocesses.pop(profile_name)) + output_path = subp.args[subp.args.index('-o') + 1]) +) + subp.terminate()) + subp.wait(timeout=10)) +) + stderr = subp.stderr.read().decode()) + if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:) + self.log.warning() + "perf couldn't collect data! Try ") + "'sudo sysctl -w kernel.perf_event_paranoid=-1'")) + else:) + report_cmd = "perf report -i {}".format(output_path)) + self.log.info("See perf output by running '{}'".format(report_cmd))) +) + def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):) + """Attempt to start the node and expect it to raise an error.) +) + extra_args: extra arguments to pass through to bitcoind) + expected_msg: regex that stderr should match when bitcoind fails) +) + Will throw if bitcoind starts without an error.) + Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""") + assert not self.running) + with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \) + tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:) + try:) + self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)) + ret = self.process.wait(timeout=self.rpc_timeout)) + self.log.debug(self._node_msg(f'bitcoind exited with status {ret} during initialization'))) + assert_not_equal(ret, 0) # Exit code must indicate failure) + self.running = False) + self.process = None) + # Check stderr for expected message) + if expected_msg is not None:) + log_stderr.seek(0)) + stderr = log_stderr.read().decode('utf-8').strip()) + if match == ErrorMatch.PARTIAL_REGEX:) + if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:) + self._raise_assertion_error() + 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))) + elif match == ErrorMatch.FULL_REGEX:) + if re.fullmatch(expected_msg, stderr) is None:) + self._raise_assertion_error() + 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))) + elif match == ErrorMatch.FULL_TEXT:) + if expected_msg,stderr:) + self._raise_assertion_error() + 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))) + except subprocess.TimeoutExpired:) + self.process.kill()) + self.running = False) + self.process = None) + assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ') + if expected_msg is None:) + assert_msg += "with an error") + else:) + assert_msg += "with expected error " + expected_msg) + self._raise_assertion_error(assert_msg)) +) + def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=None, wait_for_v2_handshake=True, expect_success=True, **kwargs):) + """Add an inbound p2p connection to the node.) +) + This method adds the p2p connection to the self.p2ps list and also) + returns the connection to the caller.) +) + When self.use_v2transport is True, TestNode advertises NODE_P2P_V2 service flag) +) + An inbound connection is made from TestNode <------ P2PConnection) + - if TestNode doesn't advertise NODE_P2P_V2 service, P2PConnection sends version message and v1 P2P is followed) + - if TestNode advertises NODE_P2P_V2 service, (and if P2PConnections supports v2 P2P)) + P2PConnection sends ellswift bytes and v2 P2P is followed) + """) + if 'dstport' not in kwargs:) + kwargs['dstport'] = p2p_port(self.index)) + if 'dstaddr' not in kwargs:) + kwargs['dstaddr'] = '127.0.0.1') + if supports_v2_p2p is None:) + supports_v2_p2p = self.use_v2transport) +) + if self.use_v2transport:) + kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2) + supports_v2_p2p = self.use_v2transport and supports_v2_p2p) + p2p_conn.peer_connect(**kwargs, send_version=send_version, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p)()) +) + self.p2ps.append(p2p_conn)) + if not expect_success:) + return p2p_conn) + p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)) + if supports_v2_p2p and wait_for_v2_handshake:) + p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake)) + if send_version:) + p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg)) + if wait_for_verack:) + # Wait for the node to send us the version and verack) + p2p_conn.wait_for_verack()) + # At this point we have sent our version message and received the version and verack, however the full node) + # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully) + # established (fSuccessfullyConnected).) + #) + # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the) + # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a) + # transaction that will be added to the mempool as soon as we return here.) + #) + # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)) + # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.) + p2p_conn.sync_with_ping()) +) + # Consistency check that the node received our user agent string.) + # Find our connection in getpeerinfo by our address:port and theirs, as this combination is unique.) + sockname = p2p_conn._transport.get_extra_info("socket").getsockname()) + our_addr_and_port = f"{sockname[0]}:{sockname[1]}") + dst_addr_and_port = f"{p2p_conn.dstaddr}:{p2p_conn.dstport}") + info = [peer for peer in self.getpeerinfo() if peer["addr"] == our_addr_and_port and peer["addrbind"] == dst_addr_and_port]) + assert_equal(len(info), 1)) + assert_equal(info[0]["subver"], P2P_SUBVERSION)) +) + return p2p_conn) +) + def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, wait_for_disconnect=False, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=None, advertise_v2_p2p=None, **kwargs):) + """Add an outbound p2p connection from node. Must be an) + "outbound-full-relay", "block-relay-only", "addr-fetch" or "feeler" connection.) +) + This method adds the p2p connection to the self.p2ps list and returns) + the connection to the caller.) +) + p2p_idx must be different for simultaneously connected peers. When reusing it for the next peer) + after disconnecting the previous one, it is necessary to wait for the disconnect to finish to avoid) + a race condition.) +) + Parameters:) + supports_v2_p2p: whether p2p_conn supports v2 P2P or not) + advertise_v2_p2p: whether p2p_conn is advertised to support v2 P2P or not) +) + An outbound connection is made from TestNode -------> P2PConnection) + - if P2PConnection doesn't advertise_v2_p2p, TestNode sends version message and v1 P2P is followed) + - if P2PConnection both supports_v2_p2p and advertise_v2_p2p, TestNode sends ellswift bytes and v2 P2P is followed) + - if P2PConnection doesn't supports_v2_p2p but advertise_v2_p2p,) + TestNode sends ellswift bytes and P2PConnection disconnects,) + TestNode reconnects by sending version message and v1 P2P is followed) + """) +) + def addconnection_callback(address, port):) + self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type))) + self.addconnection('%s:%d' % (address, port), connection_type, advertise_v2_p2p)) +) + if supports_v2_p2p is None:) + supports_v2_p2p = self.use_v2transport) + if advertise_v2_p2p is None:) + advertise_v2_p2p = self.use_v2transport) +) + if advertise_v2_p2p:) + kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2) + assert self.use_v2transport # only a v2 TestNode could make a v2 outbound connection) +) + # if P2PConnection is advertised to support v2 P2P when it doesn't actually support v2 P2P,) + # reconnection needs to be attempted using v1 P2P by sending version message) + reconnect = advertise_v2_p2p and not supports_v2_p2p) + # P2PConnection needs to be advertised to support v2 P2P so that ellswift bytes are sent instead of msg_version) + supports_v2_p2p = supports_v2_p2p and advertise_v2_p2p) + p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p, reconnect=reconnect, **kwargs)()) +) + if reconnect:) + p2p_conn.wait_for_reconnect()) +) + if connection_type == "feeler" or wait_for_disconnect:) + # feeler connections are closed as soon as the node receives a `version` message) + p2p_conn.wait_until(lambda: p2p_conn.message_count["version"] == 1, check_connected=False)) + p2p_conn.wait_until(lambda: not p2p_conn.is_connected, check_connected=False)) + else:) + p2p_conn.wait_for_connect()) + self.p2ps.append(p2p_conn)) +) + if supports_v2_p2p:) + p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake)) + p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg)) + if wait_for_verack:) + p2p_conn.wait_for_verack()) + p2p_conn.sync_with_ping()) +) + return p2p_conn) +) + def num_test_p2p_connections(self):) + """Return number of test framework p2p connections to the node.""") + return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION])) +) + def disconnect_p2ps(self):) + """Close all p2p connections to the node.) + Use only after each p2p has sent a version message to ensure the wait works.""") + for p in self.p2ps:) + p.peer_disconnect()) + del self.p2ps[:]) +) + self.wait_until(lambda: self.num_test_p2p_connections() == 0)) +) + def bumpmocktime(self, seconds):) + """Fast forward using setmocktime to self.mocktime + seconds. Requires setmocktime to have) + been called at some point in the past.""") + assert self.mocktime) + self.mocktime += seconds) + self.setmocktime(self.mocktime)) +) + def wait_until(self, test_function, timeout=60):) + return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.timeout_factor)) +) +) +class TestNodeCLIAttr:) + def __init__(self, cli, command):) + self.cli = cli) + self.command = command) +) + def __call__(self, *args, **kwargs):) + return self.cli.send_cli(self.command, *args, **kwargs)) +) + def get_request(self, *args, **kwargs):) + return lambda: self(*args, **kwargs)) +) +) +def arg_to_cli(arg):) + if isinstance(arg, bool):) + return str(arg).lower()) + elif arg is None:) + return 'null') + elif isinstance(arg, dict) or isinstance(arg, list):) + return json.dumps(arg, default=serialization_fallback)) + else:) + return str(arg)) +) +) +class TestNodeCLI():) + """Interface to bitcoin-cli for an individual node""") + def __init__(self, binary, datadir):) + self.options = []) + self.binary = binary) + self.datadir = datadir) + self.input = None) + self.log = logging.getLogger('TestFramework.bitcoincli')) +) + def __call__(self, *options, input=None):) + # TestNodeCLI is callable with bitcoin-cli command-line options) + cli = TestNodeCLI(self.binary, self.datadir)) + cli.options = [str(o) for o in options]) + cli.input = input) + return cli) +) + def __getattr__(self, command):) + return TestNodeCLIAttr(self, command)) +) + def batch(self, requests):) + results = []) + for request in requests:) + try:) + results.append(dict(result=request()))) + except JSONRPCException as e:) + results.append(dict(error=e))) + return results) +) + def send_cli(self, clicommand=None, *args, **kwargs):) + """Run bitcoin-cli command. Deserializes returned string as python object.""") + pos_args = [arg_to_cli(arg) for arg in args]) + named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]) + p_args = [self.binary, f"-datadir={self.datadir}"] + self.options) + if named_args:) + p_args += ["-named"]) + if clicommand is not None:) + p_args += [clicommand]) + p_args += pos_args + named_args) + self.log.debug("Running bitcoin-cli {}".format(p_args[2:]))) + process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)) + cli_stdout, cli_stderr = process.communicate(input=self.input)) + returncode = process.poll()) + if returncode:) + match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)) + if match:) + code, message = match.groups()) + raise JSONRPCException(dict(code=int(code), message=message))) + # Ignore cli_stdout, raise with cli_stderr) + raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)) + try:) + return json.loads(cli_stdout, parse_float=decimal.Decimal)) + except (json.JSONDecodeError, decimal.InvalidOperation):) + return cli_stdout.rstrip("\n")) +) +class RPCOverloadWrapper():) + def __init__(self, rpc, cli=False, descriptors=False):) + self.rpc = rpc) + self.is_cli = cli) + self.descriptors = descriptors) +) + def __getattr__(self, name):) + return getattr(self.rpc, name)) +) + def createwallet_passthrough(self, *args, **kwargs):) + return self.__getattr__("createwallet")(*args, **kwargs)) +) + def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None, external_signer=None):) + if descriptors is None:) + descriptors = self.descriptors) + return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup, external_signer)) +) + def importprivkey(self, privkey, label=None, rescan=None):) + wallet_info = self.getwalletinfo()) + if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):) + return self.__getattr__('importprivkey')(privkey, label, rescan)) + desc = descsum_create('combo(' + privkey + ')')) + req = [{) + 'desc': desc,) + 'timestamp': 0 if rescan else 'now',) + 'label': label if label else '') + }]) + import_res = self.importdescriptors(req)) + if not import_res[0]['success']:) + raise JSONRPCException(import_res[0]['error'])) +) + def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):) + wallet_info = self.getwalletinfo()) + if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):) + return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)) + cms = self.createmultisig(nrequired, keys, address_type)) + req = [{) + 'desc': cms['descriptor'],) + 'timestamp': 0,) + 'label': label if label else '') + }]) + import_res = self.importdescriptors(req)) + if not import_res[0]['success']:) + raise JSONRPCException(import_res[0]['error'])) + return cms) +) + def importpubkey(self, pubkey, label=None, rescan=None):) + wallet_info = self.getwalletinfo()) + if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):) + return self.__getattr__('importpubkey')(pubkey, label, rescan)) + desc = descsum_create('combo(' + pubkey + ')')) + req = [{) + 'desc': desc,) + 'timestamp': 0 if rescan else 'now',) + 'label': label if label else '') + }]) + import_res = self.importdescriptors(req)) + if not import_res[0]['success']:) + raise JSONRPCException(import_res[0]['error'])) +) + def importaddress(self, address, label=None, rescan=None, p2sh=None):) + wallet_info = self.getwalletinfo()) + if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):) + return self.__getattr__('importaddress')(address, label, rescan, p2sh)) + is_hex = False) + try:) + int(address ,16)) + is_hex = True) + desc = descsum_create('raw(' + address + ')')) + except Exception:) + desc = descsum_create('addr(' + address + ')')) + reqs = [{) + 'desc': desc,) + 'timestamp': 0 if rescan else 'now',) + 'label': label if label else '') + }]) + if is_hex and p2sh:) + reqs.append({) + 'desc': descsum_create('p2sh(raw(' + address + '))'),) + 'timestamp': 0 if rescan else 'now',) + 'label': label if label else '') + })) + import_res = self.importdescriptors(reqs)) + for res in import_res:) + if not res['success']:) + raise JSONRPCException(res['error'])) diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py index c8a6e354a5a4ea..d63a33399265c6 100755 --- a/test/functional/wallet_avoidreuse.py +++ b/test/functional/wallet_avoidreuse.py @@ -1,385 +1,385 @@ -#!/usr/bin/env python3 -# Copyright (c) 2018-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the avoid_reuse and setwalletflag features.""" - -from test_framework.address import address_to_scriptpubkey -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_approx, - assert_equal, - assert_raises_rpc_error, -) - -def reset_balance(node, discardaddr): - '''Throw away all owned coins by the node so it gets a balance of 0.''' - balance = node.getbalance(avoid_reuse=False) - if balance > 0.5: - node.sendtoaddress(address=discardaddr, amount=balance, subtractfeefromamount=True, avoid_reuse=False) - -def count_unspent(node): - '''Count the unspent outputs for the given node and return various statistics''' - r = { - "total": { - "count": 0, - "sum": 0, - }, - "reused": { - "count": 0, - "sum": 0, - }, - } - supports_reused = True - for utxo in node.listunspent(minconf=0): - r["total"]["count"] += 1 - r["total"]["sum"] += utxo["amount"] - if supports_reused and "reused" in utxo: - if utxo["reused"]: - r["reused"]["count"] += 1 - r["reused"]["sum"] += utxo["amount"] - else: - supports_reused = False - r["reused"]["supported"] = supports_reused - return r - -def assert_unspent(node, total_count=None, total_sum=None, reused_supported=None, reused_count=None, reused_sum=None, margin=0.001): - '''Make assertions about a node's unspent output statistics''' - stats = count_unspent(node) - if total_count is not None: - assert_equal(stats["total"]["count"], total_count) - if total_sum is not None: - assert_approx(stats["total"]["sum"], total_sum, margin) - if reused_supported is not None: - assert_equal(stats["reused"]["supported"], reused_supported) - if reused_count is not None: - assert_equal(stats["reused"]["count"], reused_count) - if reused_sum is not None: - assert_approx(stats["reused"]["sum"], reused_sum, margin) - -def assert_balances(node, mine, margin=0.001): - '''Make assertions about a node's getbalances output''' - got = node.getbalances()["mine"] - for k,v in mine.items(): - assert_approx(got[k], v, margin) - -class AvoidReuseTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 2 - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def run_test(self): - '''Set up initial chain and run tests defined below''' - - self.test_persistence() - self.test_immutable() - - self.generate(self.nodes[0], 110) - self.test_change_remains_change(self.nodes[1]) - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_sending_from_reused_address_without_avoid_reuse() - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_sending_from_reused_address_fails("legacy") - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_sending_from_reused_address_fails("p2sh-segwit") - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_sending_from_reused_address_fails("bech32") - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_getbalances_used() - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_full_destination_group_is_preferred() - reset_balance(self.nodes[1], self.nodes[0].getnewaddress()) - self.test_all_destination_groups_are_used() - - def test_persistence(self): - '''Test that wallet files persist the avoid_reuse flag.''' - self.log.info("Test wallet files persist avoid_reuse flag") - - # Configure node 1 to use avoid_reuse - self.nodes[1].setwalletflag('avoid_reuse') - - # Flags should be node1.avoid_reuse=false, node2.avoid_reuse=true - assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False) - assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True) - - self.restart_node(1) - self.connect_nodes(0, 1) - - # Flags should still be node1.avoid_reuse=false, node2.avoid_reuse=true - assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False) - assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True) - - # Attempting to set flag to its current state should throw - assert_raises_rpc_error(-8, "Wallet flag is already set to false", self.nodes[0].setwalletflag, 'avoid_reuse', False) - assert_raises_rpc_error(-8, "Wallet flag is already set to true", self.nodes[1].setwalletflag, 'avoid_reuse', True) - - assert_raises_rpc_error(-8, "Unknown wallet flag: abc", self.nodes[0].setwalletflag, 'abc', True) - - # Create a wallet with avoid reuse, and test that disabling it afterwards persists - self.nodes[1].createwallet(wallet_name="avoid_reuse_persist", avoid_reuse=True) - w = self.nodes[1].get_wallet_rpc("avoid_reuse_persist") - assert_equal(w.getwalletinfo()["avoid_reuse"], True) - w.setwalletflag("avoid_reuse", False) - assert_equal(w.getwalletinfo()["avoid_reuse"], False) - w.unloadwallet() - self.nodes[1].loadwallet("avoid_reuse_persist") - assert_equal(w.getwalletinfo()["avoid_reuse"], False) - w.unloadwallet() - - def test_immutable(self): - '''Test immutable wallet flags''' - self.log.info("Test immutable wallet flags") - - # Attempt to set the disable_private_keys flag; this should not work - assert_raises_rpc_error(-8, "Wallet flag is immutable", self.nodes[1].setwalletflag, 'disable_private_keys') - - tempwallet = ".wallet_avoidreuse.py_test_immutable_wallet.dat" - - # Create a wallet with disable_private_keys set; this should work - self.nodes[1].createwallet(wallet_name=tempwallet, disable_private_keys=True) - w = self.nodes[1].get_wallet_rpc(tempwallet) - - # Attempt to unset the disable_private_keys flag; this should not work - assert_raises_rpc_error(-8, "Wallet flag is immutable", w.setwalletflag, 'disable_private_keys', False) - - # Unload temp wallet - self.nodes[1].unloadwallet(tempwallet) - - def test_change_remains_change(self, node): - self.log.info("Test that change doesn't turn into non-change when spent") - - reset_balance(node, node.getnewaddress()) - addr = node.getnewaddress() - txid = node.sendtoaddress(addr, 1) - out = node.listunspent(minconf=0, query_options={'minimumAmount': 2}) - assert_equal(len(out), 1) - assert_equal(out[0]['txid'], txid) - changeaddr = out[0]['address'] - - # Make sure it's starting out as change as expected - assert node.getaddressinfo(changeaddr)['ischange'] - for logical_tx in node.listtransactions(): - assert logical_tx.get('address') != changeaddr - - # Spend it - reset_balance(node, node.getnewaddress()) - - # It should still be change - assert node.getaddressinfo(changeaddr)['ischange'] - for logical_tx in node.listtransactions(): - assert logical_tx.get('address') != changeaddr - - def test_sending_from_reused_address_without_avoid_reuse(self): - ''' - Test the same as test_sending_from_reused_address_fails, except send the 10 BTC with - the avoid_reuse flag set to false. This means the 10 BTC send should succeed, - where it fails in test_sending_from_reused_address_fails. - ''' - self.log.info("Test sending from reused address with avoid_reuse=false") - - fundaddr = self.nodes[1].getnewaddress() - retaddr = self.nodes[0].getnewaddress() - - self.nodes[0].sendtoaddress(fundaddr, 10) - self.generate(self.nodes[0], 1) - - # listunspent should show 1 single, unused 10 btc output - assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0) - # getbalances should show no used, 10 btc trusted - assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10}) - # node 0 should not show a used entry, as it does not enable avoid_reuse - assert "used" not in self.nodes[0].getbalances()["mine"] - - self.nodes[1].sendtoaddress(retaddr, 5) - self.generate(self.nodes[0], 1) - - # listunspent should show 1 single, unused 5 btc output - assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0) - # getbalances should show no used, 5 btc trusted - assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5}) - - self.nodes[0].sendtoaddress(fundaddr, 10) - self.generate(self.nodes[0], 1) - - # listunspent should show 2 total outputs (5, 10 btc), one unused (5), one reused (10) - assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10) - # getbalances should show 10 used, 5 btc trusted - assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5}) - - self.nodes[1].sendtoaddress(address=retaddr, amount=10, avoid_reuse=False) - - # listunspent should show 1 total outputs (5 btc), unused - assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_count=0) - # getbalances should show no used, 5 btc trusted - assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5}) - - # node 1 should now have about 5 btc left (for both cases) - assert_approx(self.nodes[1].getbalance(), 5, 0.001) - assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 5, 0.001) - - def test_sending_from_reused_address_fails(self, second_addr_type): - ''' - Test the simple case where [1] generates a new address A, then - [0] sends 10 BTC to A. - [1] spends 5 BTC from A. (leaving roughly 5 BTC useable) - [0] sends 10 BTC to A again. - [1] tries to spend 10 BTC (fails; dirty). - [1] tries to spend 4 BTC (succeeds; change address sufficient) - ''' - self.log.info("Test sending from reused {} address fails".format(second_addr_type)) - - fundaddr = self.nodes[1].getnewaddress(label="", address_type="legacy") - retaddr = self.nodes[0].getnewaddress() - - self.nodes[0].sendtoaddress(fundaddr, 10) - self.generate(self.nodes[0], 1) - - # listunspent should show 1 single, unused 10 btc output - assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0) - # getbalances should show no used, 10 btc trusted - assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10}) - - self.nodes[1].sendtoaddress(retaddr, 5) - self.generate(self.nodes[0], 1) - - # listunspent should show 1 single, unused 5 btc output - assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0) - # getbalances should show no used, 5 btc trusted - assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5}) - - if not self.options.descriptors: - # For the second send, we transmute it to a related single-key address - # to make sure it's also detected as reuse - fund_spk = address_to_scriptpubkey(fundaddr).hex() - fund_decoded = self.nodes[0].decodescript(fund_spk) - if second_addr_type == "p2sh-segwit": - new_fundaddr = fund_decoded["segwit"]["p2sh-segwit"] - elif second_addr_type == "bech32": - new_fundaddr = fund_decoded["segwit"]["address"] - else: - new_fundaddr = fundaddr - assert_equal(second_addr_type, "legacy") - - self.nodes[0].sendtoaddress(new_fundaddr, 10) - self.generate(self.nodes[0], 1) - - # listunspent should show 2 total outputs (5, 10 btc), one unused (5), one reused (10) - assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10) - # getbalances should show 10 used, 5 btc trusted - assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5}) - - # node 1 should now have a balance of 5 (no dirty) or 15 (including dirty) - assert_approx(self.nodes[1].getbalance(), 5, 0.001) - assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 15, 0.001) - - assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[1].sendtoaddress, retaddr, 10) - - self.nodes[1].sendtoaddress(retaddr, 4) - - # listunspent should show 2 total outputs (1, 10 btc), one unused (1), one reused (10) - assert_unspent(self.nodes[1], total_count=2, total_sum=11, reused_count=1, reused_sum=10) - # getbalances should show 10 used, 1 btc trusted - assert_balances(self.nodes[1], mine={"used": 10, "trusted": 1}) - - # node 1 should now have about 1 btc left (no dirty) and 11 (including dirty) - assert_approx(self.nodes[1].getbalance(), 1, 0.001) - assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 11, 0.001) - - def test_getbalances_used(self): - ''' - getbalances and listunspent should pick up on reused addresses - immediately, even for address reusing outputs created before the first - transaction was spending from that address - ''' - self.log.info("Test getbalances used category") - - # node under test should be completely empty - assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0) - - new_addr = self.nodes[1].getnewaddress() - ret_addr = self.nodes[0].getnewaddress() - - # send multiple transactions, reusing one address - for _ in range(101): - self.nodes[0].sendtoaddress(new_addr, 1) - - self.generate(self.nodes[0], 1) - - # send transaction that should not use all the available outputs - # per the current coin selection algorithm - self.nodes[1].sendtoaddress(ret_addr, 5) - - # getbalances and listunspent should show the remaining outputs - # in the reused address as used/reused - assert_unspent(self.nodes[1], total_count=2, total_sum=96, reused_count=1, reused_sum=1, margin=0.01) - assert_balances(self.nodes[1], mine={"used": 1, "trusted": 95}, margin=0.01) - - def test_full_destination_group_is_preferred(self): - ''' - Test the case where [1] only has 101 outputs of 1 BTC in the same reused - address and tries to send a small payment of 0.5 BTC. The wallet - should use 100 outputs from the reused address as inputs and not a - single 1 BTC input, in order to join several outputs from the reused - address. - ''' - self.log.info("Test that full destination groups are preferred in coin selection") - - # Node under test should be empty - assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0) - - new_addr = self.nodes[1].getnewaddress() - ret_addr = self.nodes[0].getnewaddress() - - # Send 101 outputs of 1 BTC to the same, reused address in the wallet - for _ in range(101): - self.nodes[0].sendtoaddress(new_addr, 1) - - self.generate(self.nodes[0], 1) - - # Sending a transaction that is smaller than each one of the - # available outputs - txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=0.5) - inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"] - - # The transaction should use 100 inputs exactly - assert_equal(len(inputs), 100) - - def test_all_destination_groups_are_used(self): - ''' - Test the case where [1] only has 202 outputs of 1 BTC in the same reused - address and tries to send a payment of 200.5 BTC. The wallet - should use all 202 outputs from the reused address as inputs. - ''' - self.log.info("Test that all destination groups are used") - - # Node under test should be empty - assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0) - - new_addr = self.nodes[1].getnewaddress() - ret_addr = self.nodes[0].getnewaddress() - - # Send 202 outputs of 1 BTC to the same, reused address in the wallet - for _ in range(202): - self.nodes[0].sendtoaddress(new_addr, 1) - - self.generate(self.nodes[0], 1) - - # Sending a transaction that needs to use the full groups - # of 100 inputs but also the incomplete group of 2 inputs. - txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=200.5) - inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"] - - # The transaction should use 202 inputs exactly - assert_equal(len(inputs), 202) - - -if __name__ == '__main__': - AvoidReuseTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2018-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the avoid_reuse and setwalletflag features.""") +) +from test_framework.address import address_to_scriptpubkey) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_approx,) + assert_equal,) + assert_raises_rpc_error,) +)) +) +def reset_balance(node, discardaddr):) + '''Throw away all owned coins by the node so it gets a balance of 0.''') + balance = node.getbalance(avoid_reuse=False)) + if balance > 0.5:) + node.sendtoaddress(address=discardaddr, amount=balance, subtractfeefromamount=True, avoid_reuse=False)) +) +def count_unspent(node):) + '''Count the unspent outputs for the given node and return various statistics''') + r = {) + "total": {) + "count": 0,) + "sum": 0,) + },) + "reused": {) + "count": 0,) + "sum": 0,) + },) + }) + supports_reused = True) + for utxo in node.listunspent(minconf=0):) + r["total"]["count"] += 1) + r["total"]["sum"] += utxo["amount"]) + if supports_reused and "reused" in utxo:) + if utxo["reused"]:) + r["reused"]["count"] += 1) + r["reused"]["sum"] += utxo["amount"]) + else:) + supports_reused = False) + r["reused"]["supported"] = supports_reused) + return r) +) +def assert_unspent(node, total_count=None, total_sum=None, reused_supported=None, reused_count=None, reused_sum=None, margin=0.001):) + '''Make assertions about a node's unspent output statistics''') + stats = count_unspent(node)) + if total_count is not None:) + assert_equal(stats["total"]["count"], total_count)) + if total_sum is not None:) + assert_approx(stats["total"]["sum"], total_sum, margin)) + if reused_supported is not None:) + assert_equal(stats["reused"]["supported"], reused_supported)) + if reused_count is not None:) + assert_equal(stats["reused"]["count"], reused_count)) + if reused_sum is not None:) + assert_approx(stats["reused"]["sum"], reused_sum, margin)) +) +def assert_balances(node, mine, margin=0.001):) + '''Make assertions about a node's getbalances output''') + got = node.getbalances()["mine"]) + for k,v in mine.items():) + assert_approx(got[k], v, margin)) +) +class AvoidReuseTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 2) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def run_test(self):) + '''Set up initial chain and run tests defined below''') +) + self.test_persistence()) + self.test_immutable()) +) + self.generate(self.nodes[0], 110)) + self.test_change_remains_change(self.nodes[1])) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_sending_from_reused_address_without_avoid_reuse()) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_sending_from_reused_address_fails("legacy")) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_sending_from_reused_address_fails("p2sh-segwit")) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_sending_from_reused_address_fails("bech32")) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_getbalances_used()) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_full_destination_group_is_preferred()) + reset_balance(self.nodes[1], self.nodes[0].getnewaddress())) + self.test_all_destination_groups_are_used()) +) + def test_persistence(self):) + '''Test that wallet files persist the avoid_reuse flag.''') + self.log.info("Test wallet files persist avoid_reuse flag")) +) + # Configure node 1 to use avoid_reuse) + self.nodes[1].setwalletflag('avoid_reuse')) +) + # Flags should be node1.avoid_reuse=false, node2.avoid_reuse=true) + assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False)) + assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True)) +) + self.restart_node(1)) + self.connect_nodes(0, 1)) +) + # Flags should still be node1.avoid_reuse=false, node2.avoid_reuse=true) + assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False)) + assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True)) +) + # Attempting to set flag to its current state should throw) + assert_raises_rpc_error(-8, "Wallet flag is already set to false", self.nodes[0].setwalletflag, 'avoid_reuse', False)) + assert_raises_rpc_error(-8, "Wallet flag is already set to true", self.nodes[1].setwalletflag, 'avoid_reuse', True)) +) + assert_raises_rpc_error(-8, "Unknown wallet flag: abc", self.nodes[0].setwalletflag, 'abc', True)) +) + # Create a wallet with avoid reuse, and test that disabling it afterwards persists) + self.nodes[1].createwallet(wallet_name="avoid_reuse_persist", avoid_reuse=True)) + w = self.nodes[1].get_wallet_rpc("avoid_reuse_persist")) + assert_equal(w.getwalletinfo()["avoid_reuse"], True)) + w.setwalletflag("avoid_reuse", False)) + assert_equal(w.getwalletinfo()["avoid_reuse"], False)) + w.unloadwallet()) + self.nodes[1].loadwallet("avoid_reuse_persist")) + assert_equal(w.getwalletinfo()["avoid_reuse"], False)) + w.unloadwallet()) +) + def test_immutable(self):) + '''Test immutable wallet flags''') + self.log.info("Test immutable wallet flags")) +) + # Attempt to set the disable_private_keys flag; this should not work) + assert_raises_rpc_error(-8, "Wallet flag is immutable", self.nodes[1].setwalletflag, 'disable_private_keys')) +) + tempwallet = ".wallet_avoidreuse.py_test_immutable_wallet.dat") +) + # Create a wallet with disable_private_keys set; this should work) + self.nodes[1].createwallet(wallet_name=tempwallet, disable_private_keys=True)) + w = self.nodes[1].get_wallet_rpc(tempwallet)) +) + # Attempt to unset the disable_private_keys flag; this should not work) + assert_raises_rpc_error(-8, "Wallet flag is immutable", w.setwalletflag, 'disable_private_keys', False)) +) + # Unload temp wallet) + self.nodes[1].unloadwallet(tempwallet)) +) + def test_change_remains_change(self, node):) + self.log.info("Test that change doesn't turn into non-change when spent")) +) + reset_balance(node, node.getnewaddress())) + addr = node.getnewaddress()) + txid = node.sendtoaddress(addr, 1)) + out = node.listunspent(minconf=0, query_options={'minimumAmount': 2})) + assert_equal(len(out), 1)) + assert_equal(out[0]['txid'], txid)) + changeaddr = out[0]['address']) +) + # Make sure it's starting out as change as expected) + assert node.getaddressinfo(changeaddr)['ischange']) + for logical_tx in node.listtransactions():) + assert_not_equal(logical_tx.get('address'), changeaddr)) +) + # Spend it) + reset_balance(node, node.getnewaddress())) +) + # It should still be change) + assert node.getaddressinfo(changeaddr)['ischange']) + for logical_tx in node.listtransactions():) + assert_not_equal(logical_tx.get('address'), changeaddr)) +) + def test_sending_from_reused_address_without_avoid_reuse(self):) + ''') + Test the same as test_sending_from_reused_address_fails, except send the 10 BTC with) + the avoid_reuse flag set to false. This means the 10 BTC send should succeed,) + where it fails in test_sending_from_reused_address_fails.) + ''') + self.log.info("Test sending from reused address with avoid_reuse=false")) +) + fundaddr = self.nodes[1].getnewaddress()) + retaddr = self.nodes[0].getnewaddress()) +) + self.nodes[0].sendtoaddress(fundaddr, 10)) + self.generate(self.nodes[0], 1)) +) + # listunspent should show 1 single, unused 10 btc output) + assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0)) + # getbalances should show no used, 10 btc trusted) + assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10})) + # node 0 should not show a used entry, as it does not enable avoid_reuse) + assert "used" not in self.nodes[0].getbalances()["mine"]) +) + self.nodes[1].sendtoaddress(retaddr, 5)) + self.generate(self.nodes[0], 1)) +) + # listunspent should show 1 single, unused 5 btc output) + assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0)) + # getbalances should show no used, 5 btc trusted) + assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})) +) + self.nodes[0].sendtoaddress(fundaddr, 10)) + self.generate(self.nodes[0], 1)) +) + # listunspent should show 2 total outputs (5, 10 btc), one unused (5), one reused (10)) + assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10)) + # getbalances should show 10 used, 5 btc trusted) + assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5})) +) + self.nodes[1].sendtoaddress(address=retaddr, amount=10, avoid_reuse=False)) +) + # listunspent should show 1 total outputs (5 btc), unused) + assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_count=0)) + # getbalances should show no used, 5 btc trusted) + assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})) +) + # node 1 should now have about 5 btc left (for both cases)) + assert_approx(self.nodes[1].getbalance(), 5, 0.001)) + assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 5, 0.001)) +) + def test_sending_from_reused_address_fails(self, second_addr_type):) + ''') + Test the simple case where [1] generates a new address A, then) + [0] sends 10 BTC to A.) + [1] spends 5 BTC from A. (leaving roughly 5 BTC useable)) + [0] sends 10 BTC to A again.) + [1] tries to spend 10 BTC (fails; dirty).) + [1] tries to spend 4 BTC (succeeds; change address sufficient)) + ''') + self.log.info("Test sending from reused {} address fails".format(second_addr_type))) +) + fundaddr = self.nodes[1].getnewaddress(label="", address_type="legacy")) + retaddr = self.nodes[0].getnewaddress()) +) + self.nodes[0].sendtoaddress(fundaddr, 10)) + self.generate(self.nodes[0], 1)) +) + # listunspent should show 1 single, unused 10 btc output) + assert_unspent(self.nodes[1], total_count=1, total_sum=10, reused_supported=True, reused_count=0)) + # getbalances should show no used, 10 btc trusted) + assert_balances(self.nodes[1], mine={"used": 0, "trusted": 10})) +) + self.nodes[1].sendtoaddress(retaddr, 5)) + self.generate(self.nodes[0], 1)) +) + # listunspent should show 1 single, unused 5 btc output) + assert_unspent(self.nodes[1], total_count=1, total_sum=5, reused_supported=True, reused_count=0)) + # getbalances should show no used, 5 btc trusted) + assert_balances(self.nodes[1], mine={"used": 0, "trusted": 5})) +) + if not self.options.descriptors:) + # For the second send, we transmute it to a related single-key address) + # to make sure it's also detected as reuse) + fund_spk = address_to_scriptpubkey(fundaddr).hex()) + fund_decoded = self.nodes[0].decodescript(fund_spk)) + if second_addr_type == "p2sh-segwit":) + new_fundaddr = fund_decoded["segwit"]["p2sh-segwit"]) + elif second_addr_type == "bech32":) + new_fundaddr = fund_decoded["segwit"]["address"]) + else:) + new_fundaddr = fundaddr) + assert_equal(second_addr_type, "legacy")) +) + self.nodes[0].sendtoaddress(new_fundaddr, 10)) + self.generate(self.nodes[0], 1)) +) + # listunspent should show 2 total outputs (5, 10 btc), one unused (5), one reused (10)) + assert_unspent(self.nodes[1], total_count=2, total_sum=15, reused_count=1, reused_sum=10)) + # getbalances should show 10 used, 5 btc trusted) + assert_balances(self.nodes[1], mine={"used": 10, "trusted": 5})) +) + # node 1 should now have a balance of 5 (no dirty) or 15 (including dirty)) + assert_approx(self.nodes[1].getbalance(), 5, 0.001)) + assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 15, 0.001)) +) + assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[1].sendtoaddress, retaddr, 10)) +) + self.nodes[1].sendtoaddress(retaddr, 4)) +) + # listunspent should show 2 total outputs (1, 10 btc), one unused (1), one reused (10)) + assert_unspent(self.nodes[1], total_count=2, total_sum=11, reused_count=1, reused_sum=10)) + # getbalances should show 10 used, 1 btc trusted) + assert_balances(self.nodes[1], mine={"used": 10, "trusted": 1})) +) + # node 1 should now have about 1 btc left (no dirty) and 11 (including dirty)) + assert_approx(self.nodes[1].getbalance(), 1, 0.001)) + assert_approx(self.nodes[1].getbalance(avoid_reuse=False), 11, 0.001)) +) + def test_getbalances_used(self):) + ''') + getbalances and listunspent should pick up on reused addresses) + immediately, even for address reusing outputs created before the first) + transaction was spending from that address) + ''') + self.log.info("Test getbalances used category")) +) + # node under test should be completely empty) + assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)) +) + new_addr = self.nodes[1].getnewaddress()) + ret_addr = self.nodes[0].getnewaddress()) +) + # send multiple transactions, reusing one address) + for _ in range(101):) + self.nodes[0].sendtoaddress(new_addr, 1)) +) + self.generate(self.nodes[0], 1)) +) + # send transaction that should not use all the available outputs) + # per the current coin selection algorithm) + self.nodes[1].sendtoaddress(ret_addr, 5)) +) + # getbalances and listunspent should show the remaining outputs) + # in the reused address as used/reused) + assert_unspent(self.nodes[1], total_count=2, total_sum=96, reused_count=1, reused_sum=1, margin=0.01)) + assert_balances(self.nodes[1], mine={"used": 1, "trusted": 95}, margin=0.01)) +) + def test_full_destination_group_is_preferred(self):) + ''') + Test the case where [1] only has 101 outputs of 1 BTC in the same reused) + address and tries to send a small payment of 0.5 BTC. The wallet) + should use 100 outputs from the reused address as inputs and not a) + single 1 BTC input, in order to join several outputs from the reused) + address.) + ''') + self.log.info("Test that full destination groups are preferred in coin selection")) +) + # Node under test should be empty) + assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)) +) + new_addr = self.nodes[1].getnewaddress()) + ret_addr = self.nodes[0].getnewaddress()) +) + # Send 101 outputs of 1 BTC to the same, reused address in the wallet) + for _ in range(101):) + self.nodes[0].sendtoaddress(new_addr, 1)) +) + self.generate(self.nodes[0], 1)) +) + # Sending a transaction that is smaller than each one of the) + # available outputs) + txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=0.5)) + inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"]) +) + # The transaction should use 100 inputs exactly) + assert_equal(len(inputs), 100)) +) + def test_all_destination_groups_are_used(self):) + ''') + Test the case where [1] only has 202 outputs of 1 BTC in the same reused) + address and tries to send a payment of 200.5 BTC. The wallet) + should use all 202 outputs from the reused address as inputs.) + ''') + self.log.info("Test that all destination groups are used")) +) + # Node under test should be empty) + assert_equal(self.nodes[1].getbalance(avoid_reuse=False), 0)) +) + new_addr = self.nodes[1].getnewaddress()) + ret_addr = self.nodes[0].getnewaddress()) +) + # Send 202 outputs of 1 BTC to the same, reused address in the wallet) + for _ in range(202):) + self.nodes[0].sendtoaddress(new_addr, 1)) +) + self.generate(self.nodes[0], 1)) +) + # Sending a transaction that needs to use the full groups) + # of 100 inputs but also the incomplete group of 2 inputs.) + txid = self.nodes[1].sendtoaddress(address=ret_addr, amount=200.5)) + inputs = self.nodes[1].getrawtransaction(txid, 1)["vin"]) +) + # The transaction should use 202 inputs exactly) + assert_equal(len(inputs), 202)) +) +) +if __name__ == '__main__':) + AvoidReuseTest(__file__).main()) diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 62fdee69a724b4..142c58371606f1 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -1,286 +1,286 @@ -#!/usr/bin/env python3 -# Copyright (c) 2019-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test descriptor wallet function.""" - -try: - import sqlite3 -except ImportError: - pass - -import concurrent.futures - -from test_framework.blocktools import COINBASE_MATURITY -from test_framework.descriptors import descsum_create -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_raises_rpc_error -) -from test_framework.wallet_util import WalletUnlock - - -class WalletDescriptorTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser, legacy=False) - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - self.extra_args = [['-keypool=100']] - self.wallet_names = [] - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - self.skip_if_no_sqlite() - self.skip_if_no_py_sqlite3() - - def test_concurrent_writes(self): - self.log.info("Test sqlite concurrent writes are in the correct order") - self.restart_node(0, extra_args=["-unsafesqlitesync=0"]) - self.nodes[0].createwallet(wallet_name="concurrency", blank=True) - wallet = self.nodes[0].get_wallet_rpc("concurrency") - # First import a descriptor that uses hardened dervation so that topping up - # Will require writing a ton to db - wallet.importdescriptors([{"desc":descsum_create("wpkh(tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg/0h/0h/*h)"), "timestamp": "now", "active": True}]) - with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread: - topup = thread.submit(wallet.keypoolrefill, newsize=1000) - - # Then while the topup is running, we need to do something that will call - # ChainStateFlushed which will trigger a write to the db, hopefully at the - # same time that the topup still has an open db transaction. - self.nodes[0].cli.gettxoutsetinfo() - assert_equal(topup.result(), None) - - wallet.unloadwallet() - - # Check that everything was written - wallet_db = self.nodes[0].wallets_path / "concurrency" / self.wallet_data_filename - conn = sqlite3.connect(wallet_db) - with conn: - # Retrieve the bestblock_nomerkle record - bestblock_rec = conn.execute("SELECT value FROM main WHERE hex(key) = '1262657374626C6F636B5F6E6F6D65726B6C65'").fetchone()[0] - # Retrieve the number of descriptor cache records - # Since we store binary data, sqlite's comparison operators don't work everywhere - # so just retrieve all records and process them ourselves. - db_keys = conn.execute("SELECT key FROM main").fetchall() - cache_records = len([k[0] for k in db_keys if b"walletdescriptorcache" in k[0]]) - conn.close() - - assert_equal(bestblock_rec[5:37][::-1].hex(), self.nodes[0].getbestblockhash()) - assert_equal(cache_records, 1000) - - def run_test(self): - if self.is_bdb_compiled(): - # Make a legacy wallet and check it is BDB - self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False) - wallet_info = self.nodes[0].getwalletinfo() - assert_equal(wallet_info['format'], 'bdb') - self.nodes[0].unloadwallet("legacy1") - else: - self.log.warning("Skipping BDB test") - - # Make a descriptor wallet - self.log.info("Making a descriptor wallet") - self.nodes[0].createwallet(wallet_name="desc1", descriptors=True) - - # A descriptor wallet should have 100 addresses * 4 types = 400 keys - self.log.info("Checking wallet info") - wallet_info = self.nodes[0].getwalletinfo() - assert_equal(wallet_info['format'], 'sqlite') - assert_equal(wallet_info['keypoolsize'], 400) - assert_equal(wallet_info['keypoolsize_hd_internal'], 400) - assert 'keypoololdest' not in wallet_info - - # Check that getnewaddress works - self.log.info("Test that getnewaddress and getrawchangeaddress work") - addr = self.nodes[0].getnewaddress("", "legacy") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('pkh(') - assert_equal(addr_info['hdkeypath'], 'm/44h/1h/0h/0/0') - - addr = self.nodes[0].getnewaddress("", "p2sh-segwit") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('sh(wpkh(') - assert_equal(addr_info['hdkeypath'], 'm/49h/1h/0h/0/0') - - addr = self.nodes[0].getnewaddress("", "bech32") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('wpkh(') - assert_equal(addr_info['hdkeypath'], 'm/84h/1h/0h/0/0') - - addr = self.nodes[0].getnewaddress("", "bech32m") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('tr(') - assert_equal(addr_info['hdkeypath'], 'm/86h/1h/0h/0/0') - - # Check that getrawchangeaddress works - addr = self.nodes[0].getrawchangeaddress("legacy") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('pkh(') - assert_equal(addr_info['hdkeypath'], 'm/44h/1h/0h/1/0') - - addr = self.nodes[0].getrawchangeaddress("p2sh-segwit") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('sh(wpkh(') - assert_equal(addr_info['hdkeypath'], 'm/49h/1h/0h/1/0') - - addr = self.nodes[0].getrawchangeaddress("bech32") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('wpkh(') - assert_equal(addr_info['hdkeypath'], 'm/84h/1h/0h/1/0') - - addr = self.nodes[0].getrawchangeaddress("bech32m") - addr_info = self.nodes[0].getaddressinfo(addr) - assert addr_info['desc'].startswith('tr(') - assert_equal(addr_info['hdkeypath'], 'm/86h/1h/0h/1/0') - - # Make a wallet to receive coins at - self.nodes[0].createwallet(wallet_name="desc2", descriptors=True) - recv_wrpc = self.nodes[0].get_wallet_rpc("desc2") - send_wrpc = self.nodes[0].get_wallet_rpc("desc1") - - # Generate some coins - self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, send_wrpc.getnewaddress()) - - # Make transactions - self.log.info("Test sending and receiving") - addr = recv_wrpc.getnewaddress() - send_wrpc.sendtoaddress(addr, 10) - - # Make sure things are disabled - self.log.info("Test disabled RPCs") - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW") - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress())["pubkey"]) - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress()) - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importmulti, []) - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()]) - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress()) - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump') - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importwallet, 'wallet.dump') - assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.sethdseed) - - self.log.info("Test encryption") - # Get the master fingerprint before encrypt - info1 = send_wrpc.getaddressinfo(send_wrpc.getnewaddress()) - - # Encrypt wallet 0 - send_wrpc.encryptwallet('pass') - with WalletUnlock(send_wrpc, "pass"): - addr = send_wrpc.getnewaddress() - info2 = send_wrpc.getaddressinfo(addr) - assert info1['hdmasterfingerprint'] != info2['hdmasterfingerprint'] - assert 'hdmasterfingerprint' in send_wrpc.getaddressinfo(send_wrpc.getnewaddress()) - info3 = send_wrpc.getaddressinfo(addr) - assert_equal(info2['desc'], info3['desc']) - - self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet") - for _ in range(500): - send_wrpc.getnewaddress() - - self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet") - with WalletUnlock(send_wrpc, "pass"): - send_wrpc.importdescriptors([{ - "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n", - "timestamp": "now", - "range": [0,10], - "active": True - }]) - # Exhaust keypool of 100 - for _ in range(100): - send_wrpc.getnewaddress(address_type='bech32') - # This should now error - assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32') - - self.log.info("Test born encrypted wallets") - self.nodes[0].createwallet('desc_enc', False, False, 'pass', False, True) - enc_rpc = self.nodes[0].get_wallet_rpc('desc_enc') - enc_rpc.getnewaddress() # Makes sure that we can get a new address from a born encrypted wallet - - self.log.info("Test blank descriptor wallets") - self.nodes[0].createwallet(wallet_name='desc_blank', blank=True, descriptors=True) - blank_rpc = self.nodes[0].get_wallet_rpc('desc_blank') - assert_raises_rpc_error(-4, 'This wallet has no available keys', blank_rpc.getnewaddress) - - self.log.info("Test descriptor wallet with disabled private keys") - self.nodes[0].createwallet(wallet_name='desc_no_priv', disable_private_keys=True, descriptors=True) - nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv') - assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress) - - self.log.info("Test descriptor exports") - self.nodes[0].createwallet(wallet_name='desc_export', descriptors=True) - exp_rpc = self.nodes[0].get_wallet_rpc('desc_export') - self.nodes[0].createwallet(wallet_name='desc_import', disable_private_keys=True, descriptors=True) - imp_rpc = self.nodes[0].get_wallet_rpc('desc_import') - - addr_types = [('legacy', False, 'pkh(', '44h/1h/0h', -13), - ('p2sh-segwit', False, 'sh(wpkh(', '49h/1h/0h', -14), - ('bech32', False, 'wpkh(', '84h/1h/0h', -13), - ('bech32m', False, 'tr(', '86h/1h/0h', -13), - ('legacy', True, 'pkh(', '44h/1h/0h', -13), - ('p2sh-segwit', True, 'sh(wpkh(', '49h/1h/0h', -14), - ('bech32', True, 'wpkh(', '84h/1h/0h', -13), - ('bech32m', True, 'tr(', '86h/1h/0h', -13)] - - for addr_type, internal, desc_prefix, deriv_path, int_idx in addr_types: - int_str = 'internal' if internal else 'external' - - self.log.info("Testing descriptor address type for {} {}".format(addr_type, int_str)) - if internal: - addr = exp_rpc.getrawchangeaddress(address_type=addr_type) - else: - addr = exp_rpc.getnewaddress(address_type=addr_type) - desc = exp_rpc.getaddressinfo(addr)['parent_desc'] - assert_equal(desc_prefix, desc[0:len(desc_prefix)]) - idx = desc.index('/') + 1 - assert_equal(deriv_path, desc[idx:idx + 9]) - if internal: - assert_equal('1', desc[int_idx]) - else: - assert_equal('0', desc[int_idx]) - - self.log.info("Testing the same descriptor is returned for address type {} {}".format(addr_type, int_str)) - for i in range(0, 10): - if internal: - addr = exp_rpc.getrawchangeaddress(address_type=addr_type) - else: - addr = exp_rpc.getnewaddress(address_type=addr_type) - test_desc = exp_rpc.getaddressinfo(addr)['parent_desc'] - assert_equal(desc, test_desc) - - self.log.info("Testing import of exported {} descriptor".format(addr_type)) - imp_rpc.importdescriptors([{ - 'desc': desc, - 'active': True, - 'next_index': 11, - 'timestamp': 'now', - 'internal': internal - }]) - - for i in range(0, 10): - if internal: - exp_addr = exp_rpc.getrawchangeaddress(address_type=addr_type) - imp_addr = imp_rpc.getrawchangeaddress(address_type=addr_type) - else: - exp_addr = exp_rpc.getnewaddress(address_type=addr_type) - imp_addr = imp_rpc.getnewaddress(address_type=addr_type) - assert_equal(exp_addr, imp_addr) - - self.log.info("Test that loading descriptor wallet containing legacy key types throws error") - self.nodes[0].createwallet(wallet_name="crashme", descriptors=True) - self.nodes[0].unloadwallet("crashme") - wallet_db = self.nodes[0].wallets_path / "crashme" / self.wallet_data_filename - conn = sqlite3.connect(wallet_db) - with conn: - # add "cscript" entry: key type is uint160 (20 bytes), value type is CScript (zero-length here) - conn.execute('INSERT INTO main VALUES(?, ?)', (b'\x07cscript' + b'\x00'*20, b'\x00')) - conn.close() - assert_raises_rpc_error(-4, "Unexpected legacy entry in descriptor wallet found.", self.nodes[0].loadwallet, "crashme") - - self.test_concurrent_writes() - - -if __name__ == '__main__': - WalletDescriptorTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2019-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test descriptor wallet function.""") +) +try:) + import sqlite3) +except ImportError:) + pass) +) +import concurrent.futures) +) +from test_framework.blocktools import COINBASE_MATURITY) +from test_framework.descriptors import descsum_create) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_raises_rpc_error) +)) +from test_framework.wallet_util import WalletUnlock) +) +) +class WalletDescriptorTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser, legacy=False)) +) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 1) + self.extra_args = [['-keypool=100']]) + self.wallet_names = []) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) + self.skip_if_no_sqlite()) + self.skip_if_no_py_sqlite3()) +) + def test_concurrent_writes(self):) + self.log.info("Test sqlite concurrent writes are in the correct order")) + self.restart_node(0, extra_args=["-unsafesqlitesync=0"])) + self.nodes[0].createwallet(wallet_name="concurrency", blank=True)) + wallet = self.nodes[0].get_wallet_rpc("concurrency")) + # First import a descriptor that uses hardened dervation so that topping up) + # Will require writing a ton to db) + wallet.importdescriptors([{"desc":descsum_create("wpkh(tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg/0h/0h/*h)"), "timestamp": "now", "active": True}])) + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread:) + topup = thread.submit(wallet.keypoolrefill, newsize=1000)) +) + # Then while the topup is running, we need to do something that will call) + # ChainStateFlushed which will trigger a write to the db, hopefully at the) + # same time that the topup still has an open db transaction.) + self.nodes[0].cli.gettxoutsetinfo()) + assert_equal(topup.result(), None)) +) + wallet.unloadwallet()) +) + # Check that everything was written) + wallet_db = self.nodes[0].wallets_path / "concurrency" / self.wallet_data_filename) + conn = sqlite3.connect(wallet_db)) + with conn:) + # Retrieve the bestblock_nomerkle record) + bestblock_rec = conn.execute("SELECT value FROM main WHERE hex(key) = '1262657374626C6F636B5F6E6F6D65726B6C65'").fetchone()[0]) + # Retrieve the number of descriptor cache records) + # Since we store binary data, sqlite's comparison operators don't work everywhere) + # so just retrieve all records and process them ourselves.) + db_keys = conn.execute("SELECT key FROM main").fetchall()) + cache_records = len([k[0] for k in db_keys if b"walletdescriptorcache" in k[0]])) + conn.close()) +) + assert_equal(bestblock_rec[5:37][::-1].hex(), self.nodes[0].getbestblockhash())) + assert_equal(cache_records, 1000)) +) + def run_test(self):) + if self.is_bdb_compiled():) + # Make a legacy wallet and check it is BDB) + self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False)) + wallet_info = self.nodes[0].getwalletinfo()) + assert_equal(wallet_info['format'], 'bdb')) + self.nodes[0].unloadwallet("legacy1")) + else:) + self.log.warning("Skipping BDB test")) +) + # Make a descriptor wallet) + self.log.info("Making a descriptor wallet")) + self.nodes[0].createwallet(wallet_name="desc1", descriptors=True)) +) + # A descriptor wallet should have 100 addresses * 4 types = 400 keys) + self.log.info("Checking wallet info")) + wallet_info = self.nodes[0].getwalletinfo()) + assert_equal(wallet_info['format'], 'sqlite')) + assert_equal(wallet_info['keypoolsize'], 400)) + assert_equal(wallet_info['keypoolsize_hd_internal'], 400)) + assert 'keypoololdest' not in wallet_info) +) + # Check that getnewaddress works) + self.log.info("Test that getnewaddress and getrawchangeaddress work")) + addr = self.nodes[0].getnewaddress("", "legacy")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('pkh(')) + assert_equal(addr_info['hdkeypath'], 'm/44h/1h/0h/0/0')) +) + addr = self.nodes[0].getnewaddress("", "p2sh-segwit")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('sh(wpkh(')) + assert_equal(addr_info['hdkeypath'], 'm/49h/1h/0h/0/0')) +) + addr = self.nodes[0].getnewaddress("", "bech32")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('wpkh(')) + assert_equal(addr_info['hdkeypath'], 'm/84h/1h/0h/0/0')) +) + addr = self.nodes[0].getnewaddress("", "bech32m")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('tr(')) + assert_equal(addr_info['hdkeypath'], 'm/86h/1h/0h/0/0')) +) + # Check that getrawchangeaddress works) + addr = self.nodes[0].getrawchangeaddress("legacy")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('pkh(')) + assert_equal(addr_info['hdkeypath'], 'm/44h/1h/0h/1/0')) +) + addr = self.nodes[0].getrawchangeaddress("p2sh-segwit")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('sh(wpkh(')) + assert_equal(addr_info['hdkeypath'], 'm/49h/1h/0h/1/0')) +) + addr = self.nodes[0].getrawchangeaddress("bech32")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('wpkh(')) + assert_equal(addr_info['hdkeypath'], 'm/84h/1h/0h/1/0')) +) + addr = self.nodes[0].getrawchangeaddress("bech32m")) + addr_info = self.nodes[0].getaddressinfo(addr)) + assert addr_info['desc'].startswith('tr(')) + assert_equal(addr_info['hdkeypath'], 'm/86h/1h/0h/1/0')) +) + # Make a wallet to receive coins at) + self.nodes[0].createwallet(wallet_name="desc2", descriptors=True)) + recv_wrpc = self.nodes[0].get_wallet_rpc("desc2")) + send_wrpc = self.nodes[0].get_wallet_rpc("desc1")) +) + # Generate some coins) + self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, send_wrpc.getnewaddress())) +) + # Make transactions) + self.log.info("Test sending and receiving")) + addr = recv_wrpc.getnewaddress()) + send_wrpc.sendtoaddress(addr, 10)) +) + # Make sure things are disabled) + self.log.info("Test disabled RPCs")) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress())["pubkey"])) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importmulti, [])) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.importwallet, 'wallet.dump')) + assert_raises_rpc_error(-4, "Only legacy wallets are supported by this command", recv_wrpc.rpc.sethdseed)) +) + self.log.info("Test encryption")) + # Get the master fingerprint before encrypt) + info1 = send_wrpc.getaddressinfo(send_wrpc.getnewaddress())) +) + # Encrypt wallet 0) + send_wrpc.encryptwallet('pass')) + with WalletUnlock(send_wrpc, "pass"):) + addr = send_wrpc.getnewaddress()) + info2 = send_wrpc.getaddressinfo(addr)) + assert_not_equal(info1['hdmasterfingerprint'], info2['hdmasterfingerprint'])) + assert 'hdmasterfingerprint' in send_wrpc.getaddressinfo(send_wrpc.getnewaddress())) + info3 = send_wrpc.getaddressinfo(addr)) + assert_equal(info2['desc'], info3['desc'])) +) + self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")) + for _ in range(500):) + send_wrpc.getnewaddress()) +) + self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")) + with WalletUnlock(send_wrpc, "pass"):) + send_wrpc.importdescriptors([{) + "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n",) + "timestamp": "now",) + "range": [0,10],) + "active": True) + }])) + # Exhaust keypool of 100) + for _ in range(100):) + send_wrpc.getnewaddress(address_type='bech32')) + # This should now error) + assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')) +) + self.log.info("Test born encrypted wallets")) + self.nodes[0].createwallet('desc_enc', False, False, 'pass', False, True)) + enc_rpc = self.nodes[0].get_wallet_rpc('desc_enc')) + enc_rpc.getnewaddress() # Makes sure that we can get a new address from a born encrypted wallet) +) + self.log.info("Test blank descriptor wallets")) + self.nodes[0].createwallet(wallet_name='desc_blank', blank=True, descriptors=True)) + blank_rpc = self.nodes[0].get_wallet_rpc('desc_blank')) + assert_raises_rpc_error(-4, 'This wallet has no available keys', blank_rpc.getnewaddress)) +) + self.log.info("Test descriptor wallet with disabled private keys")) + self.nodes[0].createwallet(wallet_name='desc_no_priv', disable_private_keys=True, descriptors=True)) + nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv')) + assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress)) +) + self.log.info("Test descriptor exports")) + self.nodes[0].createwallet(wallet_name='desc_export', descriptors=True)) + exp_rpc = self.nodes[0].get_wallet_rpc('desc_export')) + self.nodes[0].createwallet(wallet_name='desc_import', disable_private_keys=True, descriptors=True)) + imp_rpc = self.nodes[0].get_wallet_rpc('desc_import')) +) + addr_types = [('legacy', False, 'pkh(', '44h/1h/0h', -13),) + ('p2sh-segwit', False, 'sh(wpkh(', '49h/1h/0h', -14),) + ('bech32', False, 'wpkh(', '84h/1h/0h', -13),) + ('bech32m', False, 'tr(', '86h/1h/0h', -13),) + ('legacy', True, 'pkh(', '44h/1h/0h', -13),) + ('p2sh-segwit', True, 'sh(wpkh(', '49h/1h/0h', -14),) + ('bech32', True, 'wpkh(', '84h/1h/0h', -13),) + ('bech32m', True, 'tr(', '86h/1h/0h', -13)]) +) + for addr_type, internal, desc_prefix, deriv_path, int_idx in addr_types:) + int_str = 'internal' if internal else 'external') +) + self.log.info("Testing descriptor address type for {} {}".format(addr_type, int_str))) + if internal:) + addr = exp_rpc.getrawchangeaddress(address_type=addr_type)) + else:) + addr = exp_rpc.getnewaddress(address_type=addr_type)) + desc = exp_rpc.getaddressinfo(addr)['parent_desc']) + assert_equal(desc_prefix, desc[0:len(desc_prefix)])) + idx = desc.index('/') + 1) + assert_equal(deriv_path, desc[idx:idx + 9])) + if internal:) + assert_equal('1', desc[int_idx])) + else:) + assert_equal('0', desc[int_idx])) +) + self.log.info("Testing the same descriptor is returned for address type {} {}".format(addr_type, int_str))) + for i in range(0, 10):) + if internal:) + addr = exp_rpc.getrawchangeaddress(address_type=addr_type)) + else:) + addr = exp_rpc.getnewaddress(address_type=addr_type)) + test_desc = exp_rpc.getaddressinfo(addr)['parent_desc']) + assert_equal(desc, test_desc)) +) + self.log.info("Testing import of exported {} descriptor".format(addr_type))) + imp_rpc.importdescriptors([{) + 'desc': desc,) + 'active': True,) + 'next_index': 11,) + 'timestamp': 'now',) + 'internal': internal) + }])) +) + for i in range(0, 10):) + if internal:) + exp_addr = exp_rpc.getrawchangeaddress(address_type=addr_type)) + imp_addr = imp_rpc.getrawchangeaddress(address_type=addr_type)) + else:) + exp_addr = exp_rpc.getnewaddress(address_type=addr_type)) + imp_addr = imp_rpc.getnewaddress(address_type=addr_type)) + assert_equal(exp_addr, imp_addr)) +) + self.log.info("Test that loading descriptor wallet containing legacy key types throws error")) + self.nodes[0].createwallet(wallet_name="crashme", descriptors=True)) + self.nodes[0].unloadwallet("crashme")) + wallet_db = self.nodes[0].wallets_path / "crashme" / self.wallet_data_filename) + conn = sqlite3.connect(wallet_db)) + with conn:) + # add "cscript" entry: key type is uint160 (20 bytes), value type is CScript (zero-length here)) + conn.execute('INSERT INTO main VALUES(?, ?)', (b'\x07cscript' + b'\x00'*20, b'\x00'))) + conn.close()) + assert_raises_rpc_error(-4, "Unexpected legacy entry in descriptor wallet found.", self.nodes[0].loadwallet, "crashme")) +) + self.test_concurrent_writes()) +) +) +if __name__ == '__main__':) + WalletDescriptorTest(__file__).main()) diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index 21ce42a0119442..a5e5ec63f35008 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -1,227 +1,227 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the dumpwallet RPC.""" -import datetime -import time - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_raises_rpc_error, -) -from test_framework.wallet_util import WalletUnlock - - -def read_dump(file_name, addrs, script_addrs, hd_master_addr_old): - """ - Read the given dump, count the addrs that match, count change and reserve. - Also check that the old hd_master is inactive - """ - with open(file_name, encoding='utf8') as inputfile: - found_comments = [] - found_legacy_addr = 0 - found_p2sh_segwit_addr = 0 - found_bech32_addr = 0 - found_script_addr = 0 - found_addr_chg = 0 - found_addr_rsv = 0 - hd_master_addr_ret = None - for line in inputfile: - line = line.strip() - if not line: - continue - if line[0] == '#': - found_comments.append(line) - else: - # split out some data - key_date_label, comment = line.split("#") - key_date_label = key_date_label.split(" ") - # key = key_date_label[0] - date = key_date_label[1] - keytype = key_date_label[2] - - imported_key = date == '1970-01-01T00:00:01Z' - if imported_key: - # Imported keys have multiple addresses, no label (keypath) and timestamp - # Skip them - continue - - addr_keypath = comment.split(" addr=")[1] - addr = addr_keypath.split(" ")[0] - keypath = None - if keytype == "inactivehdseed=1": - # ensure the old master is still available - assert hd_master_addr_old == addr - elif keytype == "hdseed=1": - # ensure we have generated a new hd master key - assert hd_master_addr_old != addr - hd_master_addr_ret = addr - elif keytype == "script=1": - # scripts don't have keypaths - keypath = None - else: - keypath = addr_keypath.rstrip().split("hdkeypath=")[1] - - # count key types - for addrObj in addrs: - if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=": - if addr.startswith('m') or addr.startswith('n'): - # P2PKH address - found_legacy_addr += 1 - elif addr.startswith('2'): - # P2SH-segwit address - found_p2sh_segwit_addr += 1 - elif addr.startswith('bcrt1'): - found_bech32_addr += 1 - break - elif keytype == "change=1": - found_addr_chg += 1 - break - elif keytype == "reserve=1": - found_addr_rsv += 1 - break - - # count scripts - for script_addr in script_addrs: - if script_addr == addr.rstrip() and keytype == "script=1": - found_script_addr += 1 - break - - return found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret - - -class WalletDumpTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser, descriptors=False) - - def set_test_params(self): - self.num_nodes = 1 - self.extra_args = [["-keypool=90", "-addresstype=legacy"]] - self.rpc_timeout = 120 - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def setup_network(self): - self.add_nodes(self.num_nodes, extra_args=self.extra_args) - self.start_nodes() - - def run_test(self): - self.nodes[0].createwallet("dump") - - wallet_unenc_dump = self.nodes[0].datadir_path / "wallet.unencrypted.dump" - wallet_enc_dump = self.nodes[0].datadir_path / "wallet.encrypted.dump" - - # generate 30 addresses to compare against the dump - # - 10 legacy P2PKH - # - 10 P2SH-segwit - # - 10 bech32 - test_addr_count = 10 - addrs = [] - for address_type in ['legacy', 'p2sh-segwit', 'bech32']: - for _ in range(test_addr_count): - addr = self.nodes[0].getnewaddress(address_type=address_type) - vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath - addrs.append(vaddr) - - # Test scripts dump by adding a 1-of-1 multisig address - multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"] - - # Refill the keypool. getnewaddress() refills the keypool *before* taking a key from - # the keypool, so the final call to getnewaddress leaves the keypool with one key below - # its capacity - self.nodes[0].keypoolrefill() - - self.log.info('Mine a block one second before the wallet is dumped') - dump_time = int(time.time()) - self.nodes[0].setmocktime(dump_time - 1) - self.generate(self.nodes[0], 1) - self.nodes[0].setmocktime(dump_time) - dump_time_str = '# * Created on {}Z'.format( - datetime.datetime.fromtimestamp( - dump_time, - tz=datetime.timezone.utc, - ).replace(tzinfo=None).isoformat()) - dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format( - self.nodes[0].getblockcount(), - self.nodes[0].getbestblockhash(), - ) - dump_best_block_2 = '# mined on {}Z'.format( - datetime.datetime.fromtimestamp( - dump_time - 1, - tz=datetime.timezone.utc, - ).replace(tzinfo=None).isoformat()) - - self.log.info('Dump unencrypted wallet') - result = self.nodes[0].dumpwallet(wallet_unenc_dump) - assert_equal(result['filename'], str(wallet_unenc_dump)) - - found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ - read_dump(wallet_unenc_dump, addrs, [multisig_addr], None) - assert '# End of dump' in found_comments # Check that file is not corrupt - assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on'))) - assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block'))) - assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on'))) - assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_script_addr, 1) # all scripts must be in the dump - assert_equal(found_addr_chg, 0) # 0 blocks where mined - assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys - - # encrypt wallet, restart, unlock and dump - self.nodes[0].encryptwallet('test') - with WalletUnlock(self.nodes[0], "test"): - # Should be a no-op: - self.nodes[0].keypoolrefill() - self.nodes[0].dumpwallet(wallet_enc_dump) - - found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \ - read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc) - assert '# End of dump' in found_comments # Check that file is not corrupt - assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on'))) - assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block'))) - assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on'))) - assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump - assert_equal(found_script_addr, 1) - assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now - assert_equal(found_addr_rsv, 90 * 2) - - # Overwriting should fail - assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump)) - - # Restart node with new wallet, and test importwallet - self.restart_node(0) - self.nodes[0].createwallet("w2") - - # Make sure the address is not IsMine before import - result = self.nodes[0].getaddressinfo(multisig_addr) - assert not result['ismine'] - - self.nodes[0].importwallet(wallet_unenc_dump) - - # Now check IsMine is true - result = self.nodes[0].getaddressinfo(multisig_addr) - assert result['ismine'] - - self.log.info('Check that wallet is flushed') - with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20): - self.nodes[0].getnewaddress() - - # Make sure that dumpwallet doesn't have a lock order issue when there is an unconfirmed tx and it is reloaded - # See https://github.com/bitcoin/bitcoin/issues/22489 - self.nodes[0].createwallet("w3") - w3 = self.nodes[0].get_wallet_rpc("w3") - w3.importprivkey(privkey=self.nodes[0].get_deterministic_priv_key().key, label="coinbase_import") - w3.sendtoaddress(w3.getnewaddress(), 10) - w3.unloadwallet() - self.nodes[0].loadwallet("w3") - w3.dumpwallet(self.nodes[0].datadir_path / "w3.dump") - -if __name__ == '__main__': - WalletDumpTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2016-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the dumpwallet RPC.""") +import datetime) +import time) +) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_raises_rpc_error,) +)) +from test_framework.wallet_util import WalletUnlock) +) +) +def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):) + """) + Read the given dump, count the addrs that match, count change and reserve.) + Also check that the old hd_master is inactive) + """) + with open(file_name, encoding='utf8') as inputfile:) + found_comments = []) + found_legacy_addr = 0) + found_p2sh_segwit_addr = 0) + found_bech32_addr = 0) + found_script_addr = 0) + found_addr_chg = 0) + found_addr_rsv = 0) + hd_master_addr_ret = None) + for line in inputfile:) + line = line.strip()) + if not line:) + continue) + if line[0] == '#':) + found_comments.append(line)) + else:) + # split out some data) + key_date_label, comment = line.split("#")) + key_date_label = key_date_label.split(" ")) + # key = key_date_label[0]) + date = key_date_label[1]) + keytype = key_date_label[2]) +) + imported_key = date == '1970-01-01T00:00:01Z') + if imported_key:) + # Imported keys have multiple addresses, no label (keypath) and timestamp) + # Skip them) + continue) +) + addr_keypath = comment.split(" addr=")[1]) + addr = addr_keypath.split(" ")[0]) + keypath = None) + if keytype == "inactivehdseed=1":) + # ensure the old master is still available) + assert hd_master_addr_old == addr) + elif keytype == "hdseed=1":) + # ensure we have generated a new hd master key) + assert_not_equal(hd_master_addr_old, addr)) + hd_master_addr_ret = addr) + elif keytype == "script=1":) + # scripts don't have keypaths) + keypath = None) + else:) + keypath = addr_keypath.rstrip().split("hdkeypath=")[1]) +) + # count key types) + for addrObj in addrs:) + if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=":) + if addr.startswith('m') or addr.startswith('n'):) + # P2PKH address) + found_legacy_addr += 1) + elif addr.startswith('2'):) + # P2SH-segwit address) + found_p2sh_segwit_addr += 1) + elif addr.startswith('bcrt1'):) + found_bech32_addr += 1) + break) + elif keytype == "change=1":) + found_addr_chg += 1) + break) + elif keytype == "reserve=1":) + found_addr_rsv += 1) + break) +) + # count scripts) + for script_addr in script_addrs:) + if script_addr == addr.rstrip() and keytype == "script=1":) + found_script_addr += 1) + break) +) + return found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret) +) +) +class WalletDumpTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser, descriptors=False)) +) + def set_test_params(self):) + self.num_nodes = 1) + self.extra_args = [["-keypool=90", "-addresstype=legacy"]]) + self.rpc_timeout = 120) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def setup_network(self):) + self.add_nodes(self.num_nodes, extra_args=self.extra_args)) + self.start_nodes()) +) + def run_test(self):) + self.nodes[0].createwallet("dump")) +) + wallet_unenc_dump = self.nodes[0].datadir_path / "wallet.unencrypted.dump") + wallet_enc_dump = self.nodes[0].datadir_path / "wallet.encrypted.dump") +) + # generate 30 addresses to compare against the dump) + # - 10 legacy P2PKH) + # - 10 P2SH-segwit) + # - 10 bech32) + test_addr_count = 10) + addrs = []) + for address_type in ['legacy', 'p2sh-segwit', 'bech32']:) + for _ in range(test_addr_count):) + addr = self.nodes[0].getnewaddress(address_type=address_type)) + vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath) + addrs.append(vaddr)) +) + # Test scripts dump by adding a 1-of-1 multisig address) + multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"]) +) + # Refill the keypool. getnewaddress() refills the keypool *before* taking a key from) + # the keypool, so the final call to getnewaddress leaves the keypool with one key below) + # its capacity) + self.nodes[0].keypoolrefill()) +) + self.log.info('Mine a block one second before the wallet is dumped')) + dump_time = int(time.time())) + self.nodes[0].setmocktime(dump_time - 1)) + self.generate(self.nodes[0], 1)) + self.nodes[0].setmocktime(dump_time)) + dump_time_str = '# * Created on {}Z'.format() + datetime.datetime.fromtimestamp() + dump_time,) + tz=datetime.timezone.utc,) + ).replace(tzinfo=None).isoformat())) + dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format() + self.nodes[0].getblockcount(),) + self.nodes[0].getbestblockhash(),) + )) + dump_best_block_2 = '# mined on {}Z'.format() + datetime.datetime.fromtimestamp() + dump_time - 1,) + tz=datetime.timezone.utc,) + ).replace(tzinfo=None).isoformat())) +) + self.log.info('Dump unencrypted wallet')) + result = self.nodes[0].dumpwallet(wallet_unenc_dump)) + assert_equal(result['filename'], str(wallet_unenc_dump))) +) + found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \) + read_dump(wallet_unenc_dump, addrs, [multisig_addr], None)) + assert '# End of dump' in found_comments # Check that file is not corrupt) + assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))) + assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))) + assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))) + assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump) + assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump) + assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump) + assert_equal(found_script_addr, 1) # all scripts must be in the dump) + assert_equal(found_addr_chg, 0) # 0 blocks where mined) + assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys) +) + # encrypt wallet, restart, unlock and dump) + self.nodes[0].encryptwallet('test')) + with WalletUnlock(self.nodes[0], "test"):) + # Should be a no-op:) + self.nodes[0].keypoolrefill()) + self.nodes[0].dumpwallet(wallet_enc_dump)) +) + found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \) + read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc)) + assert '# End of dump' in found_comments # Check that file is not corrupt) + assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))) + assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))) + assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))) + assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump) + assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump) + assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump) + assert_equal(found_script_addr, 1)) + assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now) + assert_equal(found_addr_rsv, 90 * 2)) +) + # Overwriting should fail) + assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))) +) + # Restart node with new wallet, and test importwallet) + self.restart_node(0)) + self.nodes[0].createwallet("w2")) +) + # Make sure the address is not IsMine before import) + result = self.nodes[0].getaddressinfo(multisig_addr)) + assert not result['ismine']) +) + self.nodes[0].importwallet(wallet_unenc_dump)) +) + # Now check IsMine is true) + result = self.nodes[0].getaddressinfo(multisig_addr)) + assert result['ismine']) +) + self.log.info('Check that wallet is flushed')) + with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20):) + self.nodes[0].getnewaddress()) +) + # Make sure that dumpwallet doesn't have a lock order issue when there is an unconfirmed tx and it is reloaded) + # See https://github.com/bitcoin/bitcoin/issues/22489) + self.nodes[0].createwallet("w3")) + w3 = self.nodes[0].get_wallet_rpc("w3")) + w3.importprivkey(privkey=self.nodes[0].get_deterministic_priv_key().key, label="coinbase_import")) + w3.sendtoaddress(w3.getnewaddress(), 10)) + w3.unloadwallet()) + self.nodes[0].loadwallet("w3")) + w3.dumpwallet(self.nodes[0].datadir_path / "w3.dump")) +) +if __name__ == '__main__':) + WalletDumpTest(__file__).main()) diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py index 05b5696530e318..40f371ea96f5ce 100755 --- a/test/functional/wallet_fundrawtransaction.py +++ b/test/functional/wallet_fundrawtransaction.py @@ -1,1527 +1,1527 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the fundrawtransaction RPC.""" - - -from decimal import Decimal -from itertools import product -from math import ceil -from test_framework.address import address_to_scriptpubkey - -from test_framework.descriptors import descsum_create -from test_framework.messages import ( - COIN, - CTransaction, - CTxOut, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_approx, - assert_equal, - assert_fee_amount, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises_rpc_error, - count_bytes, - get_fee, -) -from test_framework.wallet_util import generate_keypair, WalletUnlock - -ERR_NOT_ENOUGH_PRESET_INPUTS = "The preselected coins total amount does not cover the transaction target. " \ - "Please allow other inputs to be automatically selected or include more coins manually" - -def get_unspent(listunspent, amount): - for utx in listunspent: - if utx['amount'] == amount: - return utx - raise AssertionError('Could not find unspent with amount={}'.format(amount)) - -class RawTransactionsTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 4 - self.setup_clean_chain = True - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - self.rpc_timeout = 90 # to prevent timeouts in `test_transaction_too_large` - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def setup_network(self): - self.setup_nodes() - - self.connect_nodes(0, 1) - self.connect_nodes(1, 2) - self.connect_nodes(0, 2) - self.connect_nodes(0, 3) - - def lock_outputs_type(self, wallet, outputtype): - """ - Only allow UTXOs of the given type - """ - if outputtype in ["legacy", "p2pkh", "pkh"]: - prefixes = ["pkh(", "sh(multi("] - elif outputtype in ["p2sh-segwit", "sh_wpkh"]: - prefixes = ["sh(wpkh(", "sh(wsh("] - elif outputtype in ["bech32", "wpkh"]: - prefixes = ["wpkh(", "wsh("] - else: - assert False, f"Unknown output type {outputtype}" - - to_lock = [] - for utxo in wallet.listunspent(): - if "desc" in utxo: - for prefix in prefixes: - if utxo["desc"].startswith(prefix): - to_lock.append({"txid": utxo["txid"], "vout": utxo["vout"]}) - wallet.lockunspent(False, to_lock) - - def unlock_utxos(self, wallet): - """ - Unlock all UTXOs except the watchonly one - """ - to_keep = [] - if self.watchonly_utxo is not None: - to_keep.append(self.watchonly_utxo) - wallet.lockunspent(True) - wallet.lockunspent(False, to_keep) - - def run_test(self): - self.watchonly_utxo = None - self.log.info("Connect nodes, set fees, generate blocks, and sync") - self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] - # This test is not meant to test fee estimation and we'd like - # to be sure all txs are sent at a consistent desired feerate - for node in self.nodes: - node.settxfee(self.min_relay_tx_fee) - - # if the fee's positive delta is higher than this value tests will fail, - # neg. delta always fail the tests. - # The size of the signature of every input may be at most 2 bytes larger - # than a minimum sized signature. - - # = 2 bytes * minRelayTxFeePerByte - self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000 - - self.generate(self.nodes[2], 1) - self.generate(self.nodes[0], 121) - - self.test_add_inputs_default_value() - self.test_preset_inputs_selection() - self.test_weight_calculation() - self.test_weight_limits() - self.test_change_position() - self.test_simple() - self.test_simple_two_coins() - self.test_simple_two_outputs() - self.test_change() - self.test_no_change() - self.test_invalid_option() - self.test_invalid_change_address() - self.test_valid_change_address() - self.test_change_type() - self.test_coin_selection() - self.test_two_vin() - self.test_two_vin_two_vout() - self.test_invalid_input() - self.test_fee_p2pkh() - self.test_fee_p2pkh_multi_out() - self.test_fee_p2sh() - self.test_fee_4of5() - self.test_spend_2of2() - self.test_locked_wallet() - self.test_many_inputs_fee() - self.test_many_inputs_send() - self.test_op_return() - self.test_watchonly() - self.test_all_watched_funds() - self.test_option_feerate() - self.test_address_reuse() - self.test_option_subtract_fee_from_outputs() - self.test_subtract_fee_with_presets() - self.test_transaction_too_large() - self.test_include_unsafe() - self.test_external_inputs() - self.test_22670() - self.test_feerate_rounding() - self.test_input_confs_control() - self.test_duplicate_outputs() - - def test_duplicate_outputs(self): - self.log.info("Test deserializing and funding a transaction with duplicate outputs") - self.nodes[1].createwallet("fundtx_duplicate_outputs") - w = self.nodes[1].get_wallet_rpc("fundtx_duplicate_outputs") - - addr = w.getnewaddress(address_type="bech32") - self.nodes[0].sendtoaddress(addr, 5) - self.generate(self.nodes[0], 1) - - address = self.nodes[0].getnewaddress("bech32") - tx = CTransaction() - tx.vin = [] - tx.vout = [CTxOut(1 * COIN, bytearray(address_to_scriptpubkey(address)))] * 2 - tx.nLockTime = 0 - tx_hex = tx.serialize().hex() - res = w.fundrawtransaction(tx_hex, add_inputs=True) - signed_res = w.signrawtransactionwithwallet(res["hex"]) - txid = w.sendrawtransaction(signed_res["hex"]) - assert self.nodes[1].getrawtransaction(txid) - - self.log.info("Test SFFO with duplicate outputs") - - res_sffo = w.fundrawtransaction(tx_hex, add_inputs=True, subtractFeeFromOutputs=[0,1]) - signed_res_sffo = w.signrawtransactionwithwallet(res_sffo["hex"]) - txid_sffo = w.sendrawtransaction(signed_res_sffo["hex"]) - assert self.nodes[1].getrawtransaction(txid_sffo) - - def test_change_position(self): - """Ensure setting changePosition in fundraw with an exact match is handled properly.""" - self.log.info("Test fundrawtxn changePosition option") - rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50}) - rawmatch = self.nodes[2].fundrawtransaction(rawmatch, changePosition=1, subtractFeeFromOutputs=[0]) - assert_equal(rawmatch["changepos"], -1) - - self.nodes[3].createwallet(wallet_name="wwatch", disable_private_keys=True) - wwatch = self.nodes[3].get_wallet_rpc('wwatch') - watchonly_address = self.nodes[0].getnewaddress() - watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"] - self.watchonly_amount = Decimal(200) - wwatch.importpubkey(watchonly_pubkey, "", True) - self.watchonly_utxo = self.create_outpoints(self.nodes[0], outputs=[{watchonly_address: self.watchonly_amount}])[0] - - # Lock UTXO so nodes[0] doesn't accidentally spend it - self.nodes[0].lockunspent(False, [self.watchonly_utxo]) - - self.nodes[0].sendtoaddress(self.nodes[3].get_wallet_rpc(self.default_wallet_name).getnewaddress(), self.watchonly_amount / 10) - - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) - self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) - - self.generate(self.nodes[0], 1) - - wwatch.unloadwallet() - - def test_simple(self): - self.log.info("Test fundrawtxn") - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - assert len(dec_tx['vin']) > 0 #test that we have enough inputs - - def test_simple_two_coins(self): - self.log.info("Test fundrawtxn with 2 coins") - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 2.2 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - assert len(dec_tx['vin']) > 0 #test if we have enough inputs - assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') - - def test_simple_two_outputs(self): - self.log.info("Test fundrawtxn with 2 outputs") - - inputs = [ ] - outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - - assert len(dec_tx['vin']) > 0 - assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') - - def test_change(self): - self.log.info("Test fundrawtxn with a vin > required amount") - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] - outputs = { self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - self.test_no_change_fee = fee # Use the same fee for the next tx - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - - assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee - - def test_no_change(self): - self.log.info("Test fundrawtxn not having a change output") - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] - outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance} - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - fee = rawtxfund['fee'] - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - totalOut = 0 - for out in dec_tx['vout']: - totalOut += out['value'] - - assert_equal(rawtxfund['changepos'], -1) - assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee - - def test_invalid_option(self): - self.log.info("Test fundrawtxn with an invalid option") - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - assert_raises_rpc_error(-8, "Unknown named parameter foo", self.nodes[2].fundrawtransaction, rawtx, foo='bar') - - # reserveChangeKey was deprecated and is now removed - assert_raises_rpc_error(-8, "Unknown named parameter reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, reserveChangeKey=True)) - - def test_invalid_change_address(self): - self.log.info("Test fundrawtxn with an invalid change address") - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - assert_raises_rpc_error(-5, "Change address must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, changeAddress='foobar') - - def test_valid_change_address(self): - self.log.info("Test fundrawtxn with a provided change address") - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - change = self.nodes[2].getnewaddress() - assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, changeAddress=change, changePosition=2) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx, changeAddress=change, changePosition=0) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - out = dec_tx['vout'][0] - assert_equal(change, out['scriptPubKey']['address']) - - def test_change_type(self): - self.log.info("Test fundrawtxn with a provided change type") - utx = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ] - outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - assert_raises_rpc_error(-3, "JSON value of type null is not of expected type string", self.nodes[2].fundrawtransaction, rawtx, change_type=None) - assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, change_type='') - rawtx = self.nodes[2].fundrawtransaction(rawtx, change_type='bech32') - dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex']) - assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type']) - - def test_coin_selection(self): - self.log.info("Test fundrawtxn with a vin < required amount") - utx = get_unspent(self.nodes[2].listunspent(), 1) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] - outputs = { self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - - # 4-byte version + 1-byte vin count + 36-byte prevout then script_len - rawtx = rawtx[:82] + "0100" + rawtx[84:] - - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) - - # Should fail without add_inputs: - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, self.nodes[2].fundrawtransaction, rawtx, add_inputs=False) - # add_inputs is enabled by default - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - matchingOuts = 0 - for i, out in enumerate(dec_tx['vout']): - if out['scriptPubKey']['address'] in outputs: - matchingOuts+=1 - else: - assert_equal(i, rawtxfund['changepos']) - - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) - - assert_equal(matchingOuts, 1) - assert_equal(len(dec_tx['vout']), 2) - - def test_two_vin(self): - self.log.info("Test fundrawtxn with 2 vins") - utx = get_unspent(self.nodes[2].listunspent(), 1) - utx2 = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] - outputs = { self.nodes[0].getnewaddress() : 6.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - # Should fail without add_inputs: - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, self.nodes[2].fundrawtransaction, rawtx, add_inputs=False) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx, add_inputs=True) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - matchingOuts = 0 - for out in dec_tx['vout']: - if out['scriptPubKey']['address'] in outputs: - matchingOuts+=1 - - assert_equal(matchingOuts, 1) - assert_equal(len(dec_tx['vout']), 2) - - matchingIns = 0 - for vinOut in dec_tx['vin']: - for vinIn in inputs: - if vinIn['txid'] == vinOut['txid']: - matchingIns+=1 - - assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params - - def test_two_vin_two_vout(self): - self.log.info("Test fundrawtxn with 2 vins and 2 vouts") - utx = get_unspent(self.nodes[2].listunspent(), 1) - utx2 = get_unspent(self.nodes[2].listunspent(), 5) - - inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] - outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 } - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) - - # Should fail without add_inputs: - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, self.nodes[2].fundrawtransaction, rawtx, add_inputs=False) - rawtxfund = self.nodes[2].fundrawtransaction(rawtx, add_inputs=True) - - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - matchingOuts = 0 - for out in dec_tx['vout']: - if out['scriptPubKey']['address'] in outputs: - matchingOuts+=1 - - assert_equal(matchingOuts, 2) - assert_equal(len(dec_tx['vout']), 3) - - def test_invalid_input(self): - self.log.info("Test fundrawtxn with an invalid vin") - txid = "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1" - vout = 0 - inputs = [ {'txid' : txid, 'vout' : vout} ] #invalid vin! - outputs = { self.nodes[0].getnewaddress() : 1.0} - rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - assert_raises_rpc_error(-4, "Unable to find UTXO for external input", self.nodes[2].fundrawtransaction, rawtx) - - def test_fee_p2pkh(self): - """Compare fee of a standard pubkeyhash transaction.""" - self.log.info("Test fundrawtxn p2pkh fee") - self.lock_outputs_type(self.nodes[0], "p2pkh") - inputs = [] - outputs = {self.nodes[1].getnewaddress():1.1} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - # Create same transaction over sendtoaddress. - txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1) - signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base'] - - # Compare fee. - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert feeDelta >= 0 and feeDelta <= self.fee_tolerance - - self.unlock_utxos(self.nodes[0]) - - def test_fee_p2pkh_multi_out(self): - """Compare fee of a standard pubkeyhash transaction with multiple outputs.""" - self.log.info("Test fundrawtxn p2pkh fee with multiple outputs") - self.lock_outputs_type(self.nodes[0], "p2pkh") - inputs = [] - outputs = { - self.nodes[1].getnewaddress():1.1, - self.nodes[1].getnewaddress():1.2, - self.nodes[1].getnewaddress():0.1, - self.nodes[1].getnewaddress():1.3, - self.nodes[1].getnewaddress():0.2, - self.nodes[1].getnewaddress():0.3, - } - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - # Create same transaction over sendtoaddress. - txId = self.nodes[0].sendmany("", outputs) - signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base'] - - # Compare fee. - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert feeDelta >= 0 and feeDelta <= self.fee_tolerance - - self.unlock_utxos(self.nodes[0]) - - def test_fee_p2sh(self): - """Compare fee of a 2-of-2 multisig p2sh transaction.""" - self.lock_outputs_type(self.nodes[0], "p2pkh") - # Create 2-of-2 addr. - addr1 = self.nodes[1].getnewaddress() - addr2 = self.nodes[1].getnewaddress() - - addr1Obj = self.nodes[1].getaddressinfo(addr1) - addr2Obj = self.nodes[1].getaddressinfo(addr2) - - mSigObj = self.nodes[3].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] - - inputs = [] - outputs = {mSigObj:1.1} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - # Create same transaction over sendtoaddress. - txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) - signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base'] - - # Compare fee. - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert feeDelta >= 0 and feeDelta <= self.fee_tolerance - - self.unlock_utxos(self.nodes[0]) - - def test_fee_4of5(self): - """Compare fee of a standard pubkeyhash transaction.""" - self.log.info("Test fundrawtxn fee with 4-of-5 addresses") - self.lock_outputs_type(self.nodes[0], "p2pkh") - - # Create 4-of-5 addr. - addr1 = self.nodes[1].getnewaddress() - addr2 = self.nodes[1].getnewaddress() - addr3 = self.nodes[1].getnewaddress() - addr4 = self.nodes[1].getnewaddress() - addr5 = self.nodes[1].getnewaddress() - - addr1Obj = self.nodes[1].getaddressinfo(addr1) - addr2Obj = self.nodes[1].getaddressinfo(addr2) - addr3Obj = self.nodes[1].getaddressinfo(addr3) - addr4Obj = self.nodes[1].getaddressinfo(addr4) - addr5Obj = self.nodes[1].getaddressinfo(addr5) - - mSigObj = self.nodes[1].createmultisig( - 4, - [ - addr1Obj['pubkey'], - addr2Obj['pubkey'], - addr3Obj['pubkey'], - addr4Obj['pubkey'], - addr5Obj['pubkey'], - ] - )['address'] - - inputs = [] - outputs = {mSigObj:1.1} - rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[0].fundrawtransaction(rawtx) - - # Create same transaction over sendtoaddress. - txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) - signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base'] - - # Compare fee. - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert feeDelta >= 0 and feeDelta <= self.fee_tolerance - - self.unlock_utxos(self.nodes[0]) - - def test_spend_2of2(self): - """Spend a 2-of-2 multisig transaction over fundraw.""" - self.log.info("Test fundpsbt spending 2-of-2 multisig") - - # Create 2-of-2 addr. - addr1 = self.nodes[2].getnewaddress() - addr2 = self.nodes[2].getnewaddress() - - addr1Obj = self.nodes[2].getaddressinfo(addr1) - addr2Obj = self.nodes[2].getaddressinfo(addr2) - - self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True) - wmulti = self.nodes[2].get_wallet_rpc('wmulti') - w2 = self.nodes[2].get_wallet_rpc(self.default_wallet_name) - mSigObj = wmulti.addmultisigaddress( - 2, - [ - addr1Obj['pubkey'], - addr2Obj['pubkey'], - ] - )['address'] - if not self.options.descriptors: - wmulti.importaddress(mSigObj) - - # Send 1.2 BTC to msig addr. - self.nodes[0].sendtoaddress(mSigObj, 1.2) - self.generate(self.nodes[0], 1) - - oldBalance = self.nodes[1].getbalance() - inputs = [] - outputs = {self.nodes[1].getnewaddress():1.1} - funded_psbt = wmulti.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, changeAddress=w2.getrawchangeaddress())['psbt'] - - signed_psbt = w2.walletprocesspsbt(funded_psbt) - self.nodes[2].sendrawtransaction(signed_psbt['hex']) - self.generate(self.nodes[2], 1) - - # Make sure funds are received at node1. - assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance()) - - wmulti.unloadwallet() - - def test_locked_wallet(self): - self.log.info("Test fundrawtxn with locked wallet and hardened derivation") - - df_wallet = self.nodes[1].get_wallet_rpc(self.default_wallet_name) - self.nodes[1].createwallet(wallet_name="locked_wallet", descriptors=self.options.descriptors) - wallet = self.nodes[1].get_wallet_rpc("locked_wallet") - # This test is not meant to exercise fee estimation. Making sure all txs are sent at a consistent fee rate. - wallet.settxfee(self.min_relay_tx_fee) - - # Add some balance to the wallet (this will be reverted at the end of the test) - df_wallet.sendall(recipients=[wallet.getnewaddress()]) - self.generate(self.nodes[1], 1) - - # Encrypt wallet and import descriptors - wallet.encryptwallet("test") - - if self.options.descriptors: - with WalletUnlock(wallet, "test"): - wallet.importdescriptors([{ - 'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/0h/*h)'), - 'timestamp': 'now', - 'active': True - }, - { - 'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/1h/*h)'), - 'timestamp': 'now', - 'active': True, - 'internal': True - }]) - - # Drain the keypool. - wallet.getnewaddress() - wallet.getrawchangeaddress() - - # Choose input - inputs = wallet.listunspent() - - # Deduce exact fee to produce a changeless transaction - tx_size = 110 # Total tx size: 110 vbytes, p2wpkh -> p2wpkh. Input 68 vbytes + rest of tx is 42 vbytes. - value = inputs[0]["amount"] - get_fee(tx_size, self.min_relay_tx_fee) - - outputs = {self.nodes[0].getnewaddress():value} - rawtx = wallet.createrawtransaction(inputs, outputs) - # fund a transaction that does not require a new key for the change output - funded_tx = wallet.fundrawtransaction(rawtx) - assert_equal(funded_tx["changepos"], -1) - - # fund a transaction that requires a new key for the change output - # creating the key must be impossible because the wallet is locked - outputs = {self.nodes[0].getnewaddress():value - Decimal("0.1")} - rawtx = wallet.createrawtransaction(inputs, outputs) - assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", wallet.fundrawtransaction, rawtx) - - # Refill the keypool. - with WalletUnlock(wallet, "test"): - wallet.keypoolrefill(8) #need to refill the keypool to get an internal change address - - assert_raises_rpc_error(-13, "walletpassphrase", wallet.sendtoaddress, self.nodes[0].getnewaddress(), 1.2) - - oldBalance = self.nodes[0].getbalance() - - inputs = [] - outputs = {self.nodes[0].getnewaddress():1.1} - rawtx = wallet.createrawtransaction(inputs, outputs) - fundedTx = wallet.fundrawtransaction(rawtx) - assert fundedTx["changepos"] != -1 - - # Now we need to unlock. - with WalletUnlock(wallet, "test"): - signedTx = wallet.signrawtransactionwithwallet(fundedTx['hex']) - wallet.sendrawtransaction(signedTx['hex']) - self.generate(self.nodes[1], 1) - - # Make sure funds are received at node1. - assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance()) - - # Restore pre-test wallet state - wallet.sendall(recipients=[df_wallet.getnewaddress(), df_wallet.getnewaddress(), df_wallet.getnewaddress()]) - wallet.unloadwallet() - self.generate(self.nodes[1], 1) - - def test_many_inputs_fee(self): - """Multiple (~19) inputs tx test | Compare fee.""" - self.log.info("Test fundrawtxn fee with many inputs") - - # Empty node1, send some small coins from node0 to node1. - self.nodes[1].sendall(recipients=[self.nodes[0].getnewaddress()]) - self.generate(self.nodes[1], 1) - - for _ in range(20): - self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) - self.generate(self.nodes[0], 1) - - # Fund a tx with ~20 small inputs. - inputs = [] - outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[1].fundrawtransaction(rawtx) - - # Create same transaction over sendtoaddress. - txId = self.nodes[1].sendmany("", outputs) - signedFee = self.nodes[1].getmempoolentry(txId)['fees']['base'] - - # Compare fee. - feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs - - def test_many_inputs_send(self): - """Multiple (~19) inputs tx test | sign/send.""" - self.log.info("Test fundrawtxn sign+send with many inputs") - - # Again, empty node1, send some small coins from node0 to node1. - self.nodes[1].sendall(recipients=[self.nodes[0].getnewaddress()]) - self.generate(self.nodes[1], 1) - - for _ in range(20): - self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) - self.generate(self.nodes[0], 1) - - # Fund a tx with ~20 small inputs. - oldBalance = self.nodes[0].getbalance() - - inputs = [] - outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} - rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - fundedTx = self.nodes[1].fundrawtransaction(rawtx) - fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex']) - self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) - self.generate(self.nodes[1], 1) - assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward - - def test_op_return(self): - self.log.info("Test fundrawtxn with OP_RETURN and no vin") - - rawtx = "0100000000010000000000000000066a047465737400000000" - dec_tx = self.nodes[2].decoderawtransaction(rawtx) - - assert_equal(len(dec_tx['vin']), 0) - assert_equal(len(dec_tx['vout']), 1) - - rawtxfund = self.nodes[2].fundrawtransaction(rawtx) - dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) - - assert_greater_than(len(dec_tx['vin']), 0) # at least one vin - assert_equal(len(dec_tx['vout']), 2) # one change output added - - def test_watchonly(self): - self.log.info("Test fundrawtxn using only watchonly") - - inputs = [] - outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - self.nodes[3].loadwallet('wwatch') - wwatch = self.nodes[3].get_wallet_rpc('wwatch') - # Setup change addresses for the watchonly wallet - desc_import = [{ - "desc": descsum_create("wpkh(tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H/1/*)"), - "timestamp": "now", - "internal": True, - "active": True, - "keypool": True, - "range": [0, 100], - "watchonly": True, - }] - if self.options.descriptors: - wwatch.importdescriptors(desc_import) - else: - wwatch.importmulti(desc_import) - - # Backward compatibility test (2nd params is includeWatching) - result = wwatch.fundrawtransaction(rawtx, True) - res_dec = self.nodes[0].decoderawtransaction(result["hex"]) - assert_equal(len(res_dec["vin"]), 1) - assert_equal(res_dec["vin"][0]["txid"], self.watchonly_utxo['txid']) - - assert "fee" in result.keys() - assert_greater_than(result["changepos"], -1) - - wwatch.unloadwallet() - - def test_all_watched_funds(self): - self.log.info("Test fundrawtxn using entirety of watched funds") - - inputs = [] - outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - self.nodes[3].loadwallet('wwatch') - wwatch = self.nodes[3].get_wallet_rpc('wwatch') - w3 = self.nodes[3].get_wallet_rpc(self.default_wallet_name) - result = wwatch.fundrawtransaction(rawtx, includeWatching=True, changeAddress=w3.getrawchangeaddress(), subtractFeeFromOutputs=[0]) - res_dec = self.nodes[0].decoderawtransaction(result["hex"]) - assert_equal(len(res_dec["vin"]), 1) - assert res_dec["vin"][0]["txid"] == self.watchonly_utxo['txid'] - - assert_greater_than(result["fee"], 0) - assert_equal(result["changepos"], -1) - assert_equal(result["fee"] + res_dec["vout"][0]["value"], self.watchonly_amount) - - signedtx = wwatch.signrawtransactionwithwallet(result["hex"]) - assert not signedtx["complete"] - signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"]) - assert signedtx["complete"] - self.nodes[0].sendrawtransaction(signedtx["hex"]) - self.generate(self.nodes[0], 1) - - wwatch.unloadwallet() - - def test_option_feerate(self): - self.log.info("Test fundrawtxn with explicit fee rates (fee_rate sat/vB and feeRate BTC/kvB)") - node = self.nodes[3] - # Make sure there is exactly one input so coin selection can't skew the result. - assert_equal(len(self.nodes[3].listunspent(1)), 1) - inputs = [] - outputs = {node.getnewaddress() : 1} - rawtx = node.createrawtransaction(inputs, outputs) - - result = node.fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee) - btc_kvb_to_sat_vb = 100000 # (1e5) - result1 = node.fundrawtransaction(rawtx, fee_rate=str(2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee)) - result2 = node.fundrawtransaction(rawtx, feeRate=2 * self.min_relay_tx_fee) - result3 = node.fundrawtransaction(rawtx, fee_rate=10 * btc_kvb_to_sat_vb * self.min_relay_tx_fee) - result4 = node.fundrawtransaction(rawtx, feeRate=str(10 * self.min_relay_tx_fee)) - - result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex']) - assert_fee_amount(result1['fee'], count_bytes(result1['hex']), 2 * result_fee_rate) - assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate) - assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate) - assert_fee_amount(result4['fee'], count_bytes(result4['hex']), 10 * result_fee_rate) - - # Test that funding non-standard "zero-fee" transactions is valid. - for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]): - assert_equal(self.nodes[3].fundrawtransaction(rawtx, {param: zero_value})["fee"], 0) - - # With no arguments passed, expect fee of 141 satoshis. - assert_approx(node.fundrawtransaction(rawtx)["fee"], vexp=0.00000141, vspan=0.00000001) - # Expect fee to be 10,000x higher when an explicit fee rate 10,000x greater is specified. - result = node.fundrawtransaction(rawtx, fee_rate=10000) - assert_approx(result["fee"], vexp=0.0141, vspan=0.0001) - - self.log.info("Test fundrawtxn with invalid estimate_mode settings") - for k, v in {"number": 42, "object": {"foo": "bar"}}.items(): - assert_raises_rpc_error(-3, f"JSON value of type {k} for field estimate_mode is not of expected type string", - node.fundrawtransaction, rawtx, estimate_mode=v, conf_target=0.1, add_inputs=True) - for mode in ["", "foo", Decimal("3.141592")]: - assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"', - node.fundrawtransaction, rawtx, estimate_mode=mode, conf_target=0.1, add_inputs=True) - - self.log.info("Test fundrawtxn with invalid conf_target settings") - for mode in ["unset", "economical", "conservative"]: - self.log.debug("{}".format(mode)) - for k, v in {"string": "", "object": {"foo": "bar"}}.items(): - assert_raises_rpc_error(-3, f"JSON value of type {k} for field conf_target is not of expected type number", - node.fundrawtransaction, rawtx, estimate_mode=mode, conf_target=v, add_inputs=True) - for n in [-1, 0, 1009]: - assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h - node.fundrawtransaction, rawtx, estimate_mode=mode, conf_target=n, add_inputs=True) - - self.log.info("Test invalid fee rate settings") - for param, value in {("fee_rate", 100000), ("feeRate", 1.000)}: - assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", - node.fundrawtransaction, rawtx, add_inputs=True, **{param: value}) - assert_raises_rpc_error(-3, "Amount out of range", - node.fundrawtransaction, rawtx, add_inputs=True, **{param: -1}) - assert_raises_rpc_error(-3, "Amount is not a number or string", - node.fundrawtransaction, rawtx, add_inputs=True, **{param: {"foo": "bar"}}) - # Test fee rate values that don't pass fixed-point parsing checks. - for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]: - assert_raises_rpc_error(-3, "Invalid amount", node.fundrawtransaction, rawtx, add_inputs=True, **{param: invalid_value}) - # Test fee_rate values that cannot be represented in sat/vB. - for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999]: - assert_raises_rpc_error(-3, "Invalid amount", - node.fundrawtransaction, rawtx, fee_rate=invalid_value, add_inputs=True) - - self.log.info("Test min fee rate checks are bypassed with fundrawtxn, e.g. a fee_rate under 1 sat/vB is allowed") - node.fundrawtransaction(rawtx, fee_rate=0.999, add_inputs=True) - node.fundrawtransaction(rawtx, feeRate=0.00000999, add_inputs=True) - - self.log.info("- raises RPC error if both feeRate and fee_rate are passed") - assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)", - node.fundrawtransaction, rawtx, fee_rate=0.1, feeRate=0.1, add_inputs=True) - - self.log.info("- raises RPC error if both feeRate and estimate_mode passed") - assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate", - node.fundrawtransaction, rawtx, estimate_mode="economical", feeRate=0.1, add_inputs=True) - - for param in ["feeRate", "fee_rate"]: - self.log.info("- raises RPC error if both {} and conf_target are passed".format(param)) - assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation " - "target in blocks for automatic fee estimation, or an explicit fee rate.".format(param), - node.fundrawtransaction, rawtx, {param: 1, "conf_target": 1, "add_inputs": True}) - - self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed") - assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate", - node.fundrawtransaction, rawtx, fee_rate=1, estimate_mode="economical", add_inputs=True) - - def test_address_reuse(self): - """Test no address reuse occurs.""" - self.log.info("Test fundrawtxn does not reuse addresses") - - rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1}) - result3 = self.nodes[3].fundrawtransaction(rawtx) - res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) - changeaddress = "" - for out in res_dec['vout']: - if out['value'] > 1.0: - changeaddress += out['scriptPubKey']['address'] - assert changeaddress != "" - nextaddr = self.nodes[3].getnewaddress() - # Now the change address key should be removed from the keypool. - assert changeaddress != nextaddr - - def test_option_subtract_fee_from_outputs(self): - self.log.info("Test fundrawtxn subtractFeeFromOutputs option") - - # Make sure there is exactly one input so coin selection can't skew the result. - assert_equal(len(self.nodes[3].listunspent(1)), 1) - - inputs = [] - outputs = {self.nodes[2].getnewaddress(): 1} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - # Test subtract fee from outputs with feeRate (BTC/kvB) - result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee) - self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[]), # empty subtraction list - self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0]), # uses self.min_relay_tx_fee (set by settxfee) - self.nodes[3].fundrawtransaction(rawtx, feeRate=2 * self.min_relay_tx_fee), - self.nodes[3].fundrawtransaction(rawtx, feeRate=2 * self.min_relay_tx_fee, subtractFeeFromOutputs=[0]),] - dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result] - output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] - change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] - - assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) - assert_equal(result[3]['fee'], result[4]['fee']) - assert_equal(change[0], change[1]) - assert_equal(output[0], output[1]) - assert_equal(output[0], output[2] + result[2]['fee']) - assert_equal(change[0] + result[0]['fee'], change[2]) - assert_equal(output[3], output[4] + result[4]['fee']) - assert_equal(change[3] + result[3]['fee'], change[4]) - - # Test subtract fee from outputs with fee_rate (sat/vB) - btc_kvb_to_sat_vb = 100000 # (1e5) - result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee) - self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[]), # empty subtraction list - self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0]), # uses self.min_relay_tx_fee (set by settxfee) - self.nodes[3].fundrawtransaction(rawtx, fee_rate=2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee), - self.nodes[3].fundrawtransaction(rawtx, fee_rate=2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee, subtractFeeFromOutputs=[0]),] - dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result] - output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] - change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] - - assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) - assert_equal(result[3]['fee'], result[4]['fee']) - assert_equal(change[0], change[1]) - assert_equal(output[0], output[1]) - assert_equal(output[0], output[2] + result[2]['fee']) - assert_equal(change[0] + result[0]['fee'], change[2]) - assert_equal(output[3], output[4] + result[4]['fee']) - assert_equal(change[3] + result[3]['fee'], change[4]) - - inputs = [] - outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} - rawtx = self.nodes[3].createrawtransaction(inputs, outputs) - - result = [self.nodes[3].fundrawtransaction(rawtx), - # Split the fee between outputs 0, 2, and 3, but not output 1. - self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0, 2, 3])] - - dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), - self.nodes[3].decoderawtransaction(result[1]['hex'])] - - # Nested list of non-change output amounts for each transaction. - output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] - for d, r in zip(dec_tx, result)] - - # List of differences in output amounts between normal and subtractFee transactions. - share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] - - # Output 1 is the same in both transactions. - assert_equal(share[1], 0) - - # The other 3 outputs are smaller as a result of subtractFeeFromOutputs. - assert_greater_than(share[0], 0) - assert_greater_than(share[2], 0) - assert_greater_than(share[3], 0) - - # Outputs 2 and 3 take the same share of the fee. - assert_equal(share[2], share[3]) - - # Output 0 takes at least as much share of the fee, and no more than 2 - # satoshis more, than outputs 2 and 3. - assert_greater_than_or_equal(share[0], share[2]) - assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) - - # The fee is the same in both transactions. - assert_equal(result[0]['fee'], result[1]['fee']) - - # The total subtracted from the outputs is equal to the fee. - assert_equal(share[0] + share[2] + share[3], result[0]['fee']) - - def test_subtract_fee_with_presets(self): - self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient") - - addr = self.nodes[0].getnewaddress() - utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 10}])[0] - - rawtx = self.nodes[0].createrawtransaction([utxo], [{self.nodes[0].getnewaddress(): 5}]) - fundedtx = self.nodes[0].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0]) - signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex']) - self.nodes[0].sendrawtransaction(signedtx['hex']) - - def test_transaction_too_large(self): - self.log.info("Test fundrawtx where BnB solution would result in a too large transaction, but Knapsack would not") - self.nodes[0].createwallet("large") - wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - recipient = self.nodes[0].get_wallet_rpc("large") - outputs = {} - rawtx = recipient.createrawtransaction([], {wallet.getnewaddress(): 147.99899260}) - - # Make 1500 0.1 BTC outputs. The amount that we target for funding is in - # the BnB range when these outputs are used. However if these outputs - # are selected, the transaction will end up being too large, so it - # shouldn't use BnB and instead fall back to Knapsack but that behavior - # is not implemented yet. For now we just check that we get an error. - # First, force the wallet to bulk-generate the addresses we'll need. - recipient.keypoolrefill(1500) - for _ in range(1500): - outputs[recipient.getnewaddress()] = 0.1 - wallet.sendmany("", outputs) - self.generate(self.nodes[0], 10) - assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. " - "Please try sending a smaller amount or manually consolidating your wallet's UTXOs", - recipient.fundrawtransaction, rawtx) - self.nodes[0].unloadwallet("large") - - def test_external_inputs(self): - self.log.info("Test funding with external inputs") - privkey, _ = generate_keypair(wif=True) - self.nodes[2].createwallet("extfund") - wallet = self.nodes[2].get_wallet_rpc("extfund") - - # Make a weird but signable script. sh(pkh()) descriptor accomplishes this - desc = descsum_create("sh(pkh({}))".format(privkey)) - if self.options.descriptors: - res = self.nodes[0].importdescriptors([{"desc": desc, "timestamp": "now"}]) - else: - res = self.nodes[0].importmulti([{"desc": desc, "timestamp": "now"}]) - assert res[0]["success"] - addr = self.nodes[0].deriveaddresses(desc)[0] - addr_info = self.nodes[0].getaddressinfo(addr) - - self.nodes[0].sendtoaddress(addr, 10) - self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10) - self.generate(self.nodes[0], 6) - ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0] - - # An external input without solving data should result in an error - raw_tx = wallet.createrawtransaction([ext_utxo], {self.nodes[0].getnewaddress(): ext_utxo["amount"] / 2}) - assert_raises_rpc_error(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]), wallet.fundrawtransaction, raw_tx) - - # Error conditions - assert_raises_rpc_error(-5, 'Pubkey "not a pubkey" must be a hex string', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["not a pubkey"]}) - assert_raises_rpc_error(-5, 'Pubkey "01234567890a0b0c0d0e0f" must have a length of either 33 or 65 bytes', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["01234567890a0b0c0d0e0f"]}) - assert_raises_rpc_error(-5, "'not a script' is not hex", wallet.fundrawtransaction, raw_tx, solving_data={"scripts":["not a script"]}) - assert_raises_rpc_error(-8, "Unable to parse descriptor 'not a descriptor'", wallet.fundrawtransaction, raw_tx, solving_data={"descriptors":["not a descriptor"]}) - assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"]}]) - assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": -1}]) - assert_raises_rpc_error(-8, "Invalid parameter, missing weight key", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"]}]) - assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 164}]) - assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": -1}]) - assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be greater than", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 400001}]) - - # But funding should work when the solving data is provided - funded_tx = wallet.fundrawtransaction(raw_tx, solving_data={"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}) - signed_tx = wallet.signrawtransactionwithwallet(funded_tx['hex']) - assert not signed_tx['complete'] - signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx['hex']) - assert signed_tx['complete'] - - funded_tx = wallet.fundrawtransaction(raw_tx, solving_data={"descriptors": [desc]}) - signed_tx1 = wallet.signrawtransactionwithwallet(funded_tx['hex']) - assert not signed_tx1['complete'] - signed_tx2 = self.nodes[0].signrawtransactionwithwallet(signed_tx1['hex']) - assert signed_tx2['complete'] - - unsigned_weight = self.nodes[0].decoderawtransaction(signed_tx1["hex"])["weight"] - signed_weight = self.nodes[0].decoderawtransaction(signed_tx2["hex"])["weight"] - # Input's weight is difference between weight of signed and unsigned, - # and the weight of stuff that didn't change (prevout, sequence, 1 byte of scriptSig) - input_weight = signed_weight - unsigned_weight + (41 * 4) - low_input_weight = input_weight // 2 - high_input_weight = input_weight * 2 - - # Funding should also work if the input weight is provided - funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}], fee_rate=2) - signed_tx = wallet.signrawtransactionwithwallet(funded_tx["hex"]) - signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx["hex"]) - assert_equal(self.nodes[0].testmempoolaccept([signed_tx["hex"]])[0]["allowed"], True) - assert_equal(signed_tx["complete"], True) - # Reducing the weight should have a lower fee - funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}], fee_rate=2) - assert_greater_than(funded_tx["fee"], funded_tx2["fee"]) - # Increasing the weight should have a higher fee - funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=2) - assert_greater_than(funded_tx2["fee"], funded_tx["fee"]) - # The provided weight should override the calculated weight when solving data is provided - funded_tx3 = wallet.fundrawtransaction(raw_tx, solving_data={"descriptors": [desc]}, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=2) - assert_equal(funded_tx2["fee"], funded_tx3["fee"]) - # The feerate should be met - funded_tx4 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=10) - input_add_weight = high_input_weight - (41 * 4) - tx4_weight = wallet.decoderawtransaction(funded_tx4["hex"])["weight"] + input_add_weight - tx4_vsize = int(ceil(tx4_weight / 4)) - assert_fee_amount(funded_tx4["fee"], tx4_vsize, Decimal(0.0001)) - - # Funding with weight at csuint boundaries should not cause problems - funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 255}], fee_rate=2) - funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 65539}], fee_rate=2) - - self.nodes[2].unloadwallet("extfund") - - def test_add_inputs_default_value(self): - self.log.info("Test 'add_inputs' default value") - - # Create and fund the wallet with 5 BTC - self.nodes[2].createwallet("test_preset_inputs") - wallet = self.nodes[2].get_wallet_rpc("test_preset_inputs") - addr1 = wallet.getnewaddress(address_type="bech32") - self.nodes[0].sendtoaddress(addr1, 5) - self.generate(self.nodes[0], 1) - - # Covered cases: - # 1. Default add_inputs value with no preset inputs (add_inputs=true): - # Expect: automatically add coins from the wallet to the tx. - # 2. Default add_inputs value with preset inputs (add_inputs=false): - # Expect: disallow automatic coin selection. - # 3. Explicit add_inputs=true and preset inputs (with preset inputs not-covering the target amount). - # Expect: include inputs from the wallet. - # 4. Explicit add_inputs=true and preset inputs (with preset inputs covering the target amount). - # Expect: only preset inputs are used. - # 5. Explicit add_inputs=true, no preset inputs (same as (1) but with an explicit set): - # Expect: include inputs from the wallet. - # 6. Explicit add_inputs=false, no preset inputs: - # Expect: failure as we did not provide inputs and the process cannot automatically select coins. - - # Case (1), 'send' command - # 'add_inputs' value is true unless "inputs" are specified, in such case, add_inputs=false. - # So, the wallet will automatically select coins and create the transaction if only the outputs are provided. - tx = wallet.send(outputs=[{addr1: 3}]) - assert tx["complete"] - - # Case (2), 'send' command - # Select an input manually, which doesn't cover the entire output amount and - # verify that the dynamically set 'add_inputs=false' value works. - - # Fund wallet with 2 outputs, 5 BTC each. - addr2 = wallet.getnewaddress(address_type="bech32") - source_tx = self.nodes[0].send(outputs=[{addr1: 5}, {addr2: 5}], change_position=0) - self.generate(self.nodes[0], 1) - - # Select only one input. - options = { - "inputs": [ - { - "txid": source_tx["txid"], - "vout": 1 # change position was hardcoded to index 0 - } - ] - } - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.send, outputs=[{addr1: 8}], **options) - - # Case (3), Explicit add_inputs=true and preset inputs (with preset inputs not-covering the target amount) - options["add_inputs"] = True - options["add_to_wallet"] = False - tx = wallet.send(outputs=[{addr1: 8}], **options) - assert tx["complete"] - - # Case (4), Explicit add_inputs=true and preset inputs (with preset inputs covering the target amount) - options["inputs"].append({ - "txid": source_tx["txid"], - "vout": 2 # change position was hardcoded to index 0 +#!/usr/bin/env python3) +# Copyright (c) 2014-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the fundrawtransaction RPC.""") +) +) +from decimal import Decimal) +from itertools import product) +from math import ceil) +from test_framework.address import address_to_scriptpubkey) +) +from test_framework.descriptors import descsum_create) +from test_framework.messages import () + COIN,) + CTransaction,) + CTxOut,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_approx,) + assert_equal,) + assert_fee_amount,) + assert_greater_than,) + assert_greater_than_or_equal,) + assert_raises_rpc_error,) + count_bytes,) + get_fee,) +)) +from test_framework.wallet_util import generate_keypair, WalletUnlock) +) +ERR_NOT_ENOUGH_PRESET_INPUTS = "The preselected coins total amount does not cover the transaction target. " \) + "Please allow other inputs to be automatically selected or include more coins manually") +) +def get_unspent(listunspent, amount):) + for utx in listunspent:) + if utx['amount'] == amount:) + return utx) + raise AssertionError('Could not find unspent with amount={}'.format(amount))) +) +class RawTransactionsTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 4) + self.setup_clean_chain = True) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) + self.rpc_timeout = 90 # to prevent timeouts in `test_transaction_too_large`) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def setup_network(self):) + self.setup_nodes()) +) + self.connect_nodes(0, 1)) + self.connect_nodes(1, 2)) + self.connect_nodes(0, 2)) + self.connect_nodes(0, 3)) +) + def lock_outputs_type(self, wallet, outputtype):) + """) + Only allow UTXOs of the given type) + """) + if outputtype in ["legacy", "p2pkh", "pkh"]:) + prefixes = ["pkh(", "sh(multi("]) + elif outputtype in ["p2sh-segwit", "sh_wpkh"]:) + prefixes = ["sh(wpkh(", "sh(wsh("]) + elif outputtype in ["bech32", "wpkh"]:) + prefixes = ["wpkh(", "wsh("]) + else:) + assert False, f"Unknown output type {outputtype}") +) + to_lock = []) + for utxo in wallet.listunspent():) + if "desc" in utxo:) + for prefix in prefixes:) + if utxo["desc"].startswith(prefix):) + to_lock.append({"txid": utxo["txid"], "vout": utxo["vout"]})) + wallet.lockunspent(False, to_lock)) +) + def unlock_utxos(self, wallet):) + """) + Unlock all UTXOs except the watchonly one) + """) + to_keep = []) + if self.watchonly_utxo is not None:) + to_keep.append(self.watchonly_utxo)) + wallet.lockunspent(True)) + wallet.lockunspent(False, to_keep)) +) + def run_test(self):) + self.watchonly_utxo = None) + self.log.info("Connect nodes, set fees, generate blocks, and sync")) + self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']) + # This test is not meant to test fee estimation and we'd like) + # to be sure all txs are sent at a consistent desired feerate) + for node in self.nodes:) + node.settxfee(self.min_relay_tx_fee)) +) + # if the fee's positive delta is higher than this value tests will fail,) + # neg. delta always fail the tests.) + # The size of the signature of every input may be at most 2 bytes larger) + # than a minimum sized signature.) +) + # = 2 bytes * minRelayTxFeePerByte) + self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000) +) + self.generate(self.nodes[2], 1)) + self.generate(self.nodes[0], 121)) +) + self.test_add_inputs_default_value()) + self.test_preset_inputs_selection()) + self.test_weight_calculation()) + self.test_weight_limits()) + self.test_change_position()) + self.test_simple()) + self.test_simple_two_coins()) + self.test_simple_two_outputs()) + self.test_change()) + self.test_no_change()) + self.test_invalid_option()) + self.test_invalid_change_address()) + self.test_valid_change_address()) + self.test_change_type()) + self.test_coin_selection()) + self.test_two_vin()) + self.test_two_vin_two_vout()) + self.test_invalid_input()) + self.test_fee_p2pkh()) + self.test_fee_p2pkh_multi_out()) + self.test_fee_p2sh()) + self.test_fee_4of5()) + self.test_spend_2of2()) + self.test_locked_wallet()) + self.test_many_inputs_fee()) + self.test_many_inputs_send()) + self.test_op_return()) + self.test_watchonly()) + self.test_all_watched_funds()) + self.test_option_feerate()) + self.test_address_reuse()) + self.test_option_subtract_fee_from_outputs()) + self.test_subtract_fee_with_presets()) + self.test_transaction_too_large()) + self.test_include_unsafe()) + self.test_external_inputs()) + self.test_22670()) + self.test_feerate_rounding()) + self.test_input_confs_control()) + self.test_duplicate_outputs()) +) + def test_duplicate_outputs(self):) + self.log.info("Test deserializing and funding a transaction with duplicate outputs")) + self.nodes[1].createwallet("fundtx_duplicate_outputs")) + w = self.nodes[1].get_wallet_rpc("fundtx_duplicate_outputs")) +) + addr = w.getnewaddress(address_type="bech32")) + self.nodes[0].sendtoaddress(addr, 5)) + self.generate(self.nodes[0], 1)) +) + address = self.nodes[0].getnewaddress("bech32")) + tx = CTransaction()) + tx.vin = []) + tx.vout = [CTxOut(1 * COIN, bytearray(address_to_scriptpubkey(address)))] * 2) + tx.nLockTime = 0) + tx_hex = tx.serialize().hex()) + res = w.fundrawtransaction(tx_hex, add_inputs=True)) + signed_res = w.signrawtransactionwithwallet(res["hex"])) + txid = w.sendrawtransaction(signed_res["hex"])) + assert self.nodes[1].getrawtransaction(txid)) +) + self.log.info("Test SFFO with duplicate outputs")) +) + res_sffo = w.fundrawtransaction(tx_hex, add_inputs=True, subtractFeeFromOutputs=[0,1])) + signed_res_sffo = w.signrawtransactionwithwallet(res_sffo["hex"])) + txid_sffo = w.sendrawtransaction(signed_res_sffo["hex"])) + assert self.nodes[1].getrawtransaction(txid_sffo)) +) + def test_change_position(self):) + """Ensure setting changePosition in fundraw with an exact match is handled properly.""") + self.log.info("Test fundrawtxn changePosition option")) + rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})) + rawmatch = self.nodes[2].fundrawtransaction(rawmatch, changePosition=1, subtractFeeFromOutputs=[0])) + assert_equal(rawmatch["changepos"], -1)) +) + self.nodes[3].createwallet(wallet_name="wwatch", disable_private_keys=True)) + wwatch = self.nodes[3].get_wallet_rpc('wwatch')) + watchonly_address = self.nodes[0].getnewaddress()) + watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]) + self.watchonly_amount = Decimal(200)) + wwatch.importpubkey(watchonly_pubkey, "", True)) + self.watchonly_utxo = self.create_outpoints(self.nodes[0], outputs=[{watchonly_address: self.watchonly_amount}])[0]) +) + # Lock UTXO so nodes[0] doesn't accidentally spend it) + self.nodes[0].lockunspent(False, [self.watchonly_utxo])) +) + self.nodes[0].sendtoaddress(self.nodes[3].get_wallet_rpc(self.default_wallet_name).getnewaddress(), self.watchonly_amount / 10)) +) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)) + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)) +) + self.generate(self.nodes[0], 1)) +) + wwatch.unloadwallet()) +) + def test_simple(self):) + self.log.info("Test fundrawtxn")) + inputs = [ ]) + outputs = { self.nodes[0].getnewaddress() : 1.0 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + assert len(dec_tx['vin']) > 0 #test that we have enough inputs) +) + def test_simple_two_coins(self):) + self.log.info("Test fundrawtxn with 2 coins")) + inputs = [ ]) + outputs = { self.nodes[0].getnewaddress() : 2.2 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + assert len(dec_tx['vin']) > 0 #test if we have enough inputs) + assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')) +) + def test_simple_two_outputs(self):) + self.log.info("Test fundrawtxn with 2 outputs")) +) + inputs = [ ]) + outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) +) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) +) + assert len(dec_tx['vin']) > 0) + assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')) +) + def test_change(self):) + self.log.info("Test fundrawtxn with a vin > required amount")) + utx = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]) + outputs = { self.nodes[0].getnewaddress() : 1.0 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) + fee = rawtxfund['fee']) + self.test_no_change_fee = fee # Use the same fee for the next tx) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + totalOut = 0) + for out in dec_tx['vout']:) + totalOut += out['value']) +) + assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee) +) + def test_no_change(self):) + self.log.info("Test fundrawtxn not having a change output")) + utx = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]) + outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance}) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) + fee = rawtxfund['fee']) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + totalOut = 0) + for out in dec_tx['vout']:) + totalOut += out['value']) +) + assert_equal(rawtxfund['changepos'], -1)) + assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee) +) + def test_invalid_option(self):) + self.log.info("Test fundrawtxn with an invalid option")) + utx = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]) + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + assert_raises_rpc_error(-8, "Unknown named parameter foo", self.nodes[2].fundrawtransaction, rawtx, foo='bar')) +) + # reserveChangeKey was deprecated and is now removed) + assert_raises_rpc_error(-8, "Unknown named parameter reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, reserveChangeKey=True))) +) + def test_invalid_change_address(self):) + self.log.info("Test fundrawtxn with an invalid change address")) + utx = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]) + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + assert_raises_rpc_error(-5, "Change address must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, changeAddress='foobar')) +) + def test_valid_change_address(self):) + self.log.info("Test fundrawtxn with a provided change address")) + utx = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]) + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + change = self.nodes[2].getnewaddress()) + assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, changeAddress=change, changePosition=2)) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx, changeAddress=change, changePosition=0)) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + out = dec_tx['vout'][0]) + assert_equal(change, out['scriptPubKey']['address'])) +) + def test_change_type(self):) + self.log.info("Test fundrawtxn with a provided change type")) + utx = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]) + outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + assert_raises_rpc_error(-3, "JSON value of type null is not of expected type string", self.nodes[2].fundrawtransaction, rawtx, change_type=None)) + assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, change_type='')) + rawtx = self.nodes[2].fundrawtransaction(rawtx, change_type='bech32')) + dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])) + assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])) +) + def test_coin_selection(self):) + self.log.info("Test fundrawtxn with a vin < required amount")) + utx = get_unspent(self.nodes[2].listunspent(), 1)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]) + outputs = { self.nodes[0].getnewaddress() : 1.0 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) +) + # 4-byte version + 1-byte vin count + 36-byte prevout then script_len) + rawtx = rawtx[:82] + "0100" + rawtx[84:]) +) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) + assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])) +) + # Should fail without add_inputs:) + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, self.nodes[2].fundrawtransaction, rawtx, add_inputs=False)) + # add_inputs is enabled by default) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) +) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + matchingOuts = 0) + for i, out in enumerate(dec_tx['vout']):) + if out['scriptPubKey']['address'] in outputs:) + matchingOuts+=1) + else:) + assert_equal(i, rawtxfund['changepos'])) +) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) + assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])) +) + assert_equal(matchingOuts, 1)) + assert_equal(len(dec_tx['vout']), 2)) +) + def test_two_vin(self):) + self.log.info("Test fundrawtxn with 2 vins")) + utx = get_unspent(self.nodes[2].listunspent(), 1)) + utx2 = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]) + outputs = { self.nodes[0].getnewaddress() : 6.0 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + # Should fail without add_inputs:) + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, self.nodes[2].fundrawtransaction, rawtx, add_inputs=False)) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx, add_inputs=True)) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + matchingOuts = 0) + for out in dec_tx['vout']:) + if out['scriptPubKey']['address'] in outputs:) + matchingOuts+=1) +) + assert_equal(matchingOuts, 1)) + assert_equal(len(dec_tx['vout']), 2)) +) + matchingIns = 0) + for vinOut in dec_tx['vin']:) + for vinIn in inputs:) + if vinIn['txid'] == vinOut['txid']:) + matchingIns+=1) +) + assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params) +) + def test_two_vin_two_vout(self):) + self.log.info("Test fundrawtxn with 2 vins and 2 vouts")) + utx = get_unspent(self.nodes[2].listunspent(), 1)) + utx2 = get_unspent(self.nodes[2].listunspent(), 5)) +) + inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]) + outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) + assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])) +) + # Should fail without add_inputs:) + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, self.nodes[2].fundrawtransaction, rawtx, add_inputs=False)) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx, add_inputs=True)) +) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) + matchingOuts = 0) + for out in dec_tx['vout']:) + if out['scriptPubKey']['address'] in outputs:) + matchingOuts+=1) +) + assert_equal(matchingOuts, 2)) + assert_equal(len(dec_tx['vout']), 3)) +) + def test_invalid_input(self):) + self.log.info("Test fundrawtxn with an invalid vin")) + txid = "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1") + vout = 0) + inputs = [ {'txid' : txid, 'vout' : vout} ] #invalid vin!) + outputs = { self.nodes[0].getnewaddress() : 1.0}) + rawtx = self.nodes[2].createrawtransaction(inputs, outputs)) + assert_raises_rpc_error(-4, "Unable to find UTXO for external input", self.nodes[2].fundrawtransaction, rawtx)) +) + def test_fee_p2pkh(self):) + """Compare fee of a standard pubkeyhash transaction.""") + self.log.info("Test fundrawtxn p2pkh fee")) + self.lock_outputs_type(self.nodes[0], "p2pkh")) + inputs = []) + outputs = {self.nodes[1].getnewaddress():1.1}) + rawtx = self.nodes[0].createrawtransaction(inputs, outputs)) + fundedTx = self.nodes[0].fundrawtransaction(rawtx)) +) + # Create same transaction over sendtoaddress.) + txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)) + signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base']) +) + # Compare fee.) + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)) + assert feeDelta >= 0 and feeDelta <= self.fee_tolerance) +) + self.unlock_utxos(self.nodes[0])) +) + def test_fee_p2pkh_multi_out(self):) + """Compare fee of a standard pubkeyhash transaction with multiple outputs.""") + self.log.info("Test fundrawtxn p2pkh fee with multiple outputs")) + self.lock_outputs_type(self.nodes[0], "p2pkh")) + inputs = []) + outputs = {) + self.nodes[1].getnewaddress():1.1,) + self.nodes[1].getnewaddress():1.2,) + self.nodes[1].getnewaddress():0.1,) + self.nodes[1].getnewaddress():1.3,) + self.nodes[1].getnewaddress():0.2,) + self.nodes[1].getnewaddress():0.3,) }) - tx = wallet.send(outputs=[{addr1: 8}], **options) - assert tx["complete"] - # Check that only the preset inputs were added to the tx - decoded_psbt_inputs = self.nodes[0].decodepsbt(tx["psbt"])['tx']['vin'] - assert_equal(len(decoded_psbt_inputs), 2) - for input in decoded_psbt_inputs: - assert_equal(input["txid"], source_tx["txid"]) - - # Case (5), assert that inputs are added to the tx by explicitly setting add_inputs=true - options = {"add_inputs": True, "add_to_wallet": True} - tx = wallet.send(outputs=[{addr1: 8}], **options) - assert tx["complete"] - - # 6. Explicit add_inputs=false, no preset inputs: - options = {"add_inputs": False} - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.send, outputs=[{addr1: 3}], **options) - - ################################################ - - # Case (1), 'walletcreatefundedpsbt' command - # Default add_inputs value with no preset inputs (add_inputs=true) - inputs = [] - outputs = {self.nodes[1].getnewaddress(): 8} - assert "psbt" in wallet.walletcreatefundedpsbt(inputs=inputs, outputs=outputs) - - # Case (2), 'walletcreatefundedpsbt' command - # Default add_inputs value with preset inputs (add_inputs=false). - inputs = [{ - "txid": source_tx["txid"], - "vout": 1 # change position was hardcoded to index 0 - }] - outputs = {self.nodes[1].getnewaddress(): 8} - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.walletcreatefundedpsbt, inputs=inputs, outputs=outputs) - - # Case (3), Explicit add_inputs=true and preset inputs (with preset inputs not-covering the target amount) - options["add_inputs"] = True - assert "psbt" in wallet.walletcreatefundedpsbt(outputs=[{addr1: 8}], inputs=inputs, **options) - - # Case (4), Explicit add_inputs=true and preset inputs (with preset inputs covering the target amount) - inputs.append({ - "txid": source_tx["txid"], - "vout": 2 # change position was hardcoded to index 0 + rawtx = self.nodes[0].createrawtransaction(inputs, outputs)) + fundedTx = self.nodes[0].fundrawtransaction(rawtx)) +) + # Create same transaction over sendtoaddress.) + txId = self.nodes[0].sendmany("", outputs)) + signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base']) +) + # Compare fee.) + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)) + assert feeDelta >= 0 and feeDelta <= self.fee_tolerance) +) + self.unlock_utxos(self.nodes[0])) +) + def test_fee_p2sh(self):) + """Compare fee of a 2-of-2 multisig p2sh transaction.""") + self.lock_outputs_type(self.nodes[0], "p2pkh")) + # Create 2-of-2 addr.) + addr1 = self.nodes[1].getnewaddress()) + addr2 = self.nodes[1].getnewaddress()) +) + addr1Obj = self.nodes[1].getaddressinfo(addr1)) + addr2Obj = self.nodes[1].getaddressinfo(addr2)) +) + mSigObj = self.nodes[3].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']) +) + inputs = []) + outputs = {mSigObj:1.1}) + rawtx = self.nodes[0].createrawtransaction(inputs, outputs)) + fundedTx = self.nodes[0].fundrawtransaction(rawtx)) +) + # Create same transaction over sendtoaddress.) + txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)) + signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base']) +) + # Compare fee.) + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)) + assert feeDelta >= 0 and feeDelta <= self.fee_tolerance) +) + self.unlock_utxos(self.nodes[0])) +) + def test_fee_4of5(self):) + """Compare fee of a standard pubkeyhash transaction.""") + self.log.info("Test fundrawtxn fee with 4-of-5 addresses")) + self.lock_outputs_type(self.nodes[0], "p2pkh")) +) + # Create 4-of-5 addr.) + addr1 = self.nodes[1].getnewaddress()) + addr2 = self.nodes[1].getnewaddress()) + addr3 = self.nodes[1].getnewaddress()) + addr4 = self.nodes[1].getnewaddress()) + addr5 = self.nodes[1].getnewaddress()) +) + addr1Obj = self.nodes[1].getaddressinfo(addr1)) + addr2Obj = self.nodes[1].getaddressinfo(addr2)) + addr3Obj = self.nodes[1].getaddressinfo(addr3)) + addr4Obj = self.nodes[1].getaddressinfo(addr4)) + addr5Obj = self.nodes[1].getaddressinfo(addr5)) +) + mSigObj = self.nodes[1].createmultisig() + 4,) + [) + addr1Obj['pubkey'],) + addr2Obj['pubkey'],) + addr3Obj['pubkey'],) + addr4Obj['pubkey'],) + addr5Obj['pubkey'],) + ]) + )['address']) +) + inputs = []) + outputs = {mSigObj:1.1}) + rawtx = self.nodes[0].createrawtransaction(inputs, outputs)) + fundedTx = self.nodes[0].fundrawtransaction(rawtx)) +) + # Create same transaction over sendtoaddress.) + txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)) + signedFee = self.nodes[0].getmempoolentry(txId)['fees']['base']) +) + # Compare fee.) + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)) + assert feeDelta >= 0 and feeDelta <= self.fee_tolerance) +) + self.unlock_utxos(self.nodes[0])) +) + def test_spend_2of2(self):) + """Spend a 2-of-2 multisig transaction over fundraw.""") + self.log.info("Test fundpsbt spending 2-of-2 multisig")) +) + # Create 2-of-2 addr.) + addr1 = self.nodes[2].getnewaddress()) + addr2 = self.nodes[2].getnewaddress()) +) + addr1Obj = self.nodes[2].getaddressinfo(addr1)) + addr2Obj = self.nodes[2].getaddressinfo(addr2)) +) + self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)) + wmulti = self.nodes[2].get_wallet_rpc('wmulti')) + w2 = self.nodes[2].get_wallet_rpc(self.default_wallet_name)) + mSigObj = wmulti.addmultisigaddress() + 2,) + [) + addr1Obj['pubkey'],) + addr2Obj['pubkey'],) + ]) + )['address']) + if not self.options.descriptors:) + wmulti.importaddress(mSigObj)) +) + # Send 1.2 BTC to msig addr.) + self.nodes[0].sendtoaddress(mSigObj, 1.2)) + self.generate(self.nodes[0], 1)) +) + oldBalance = self.nodes[1].getbalance()) + inputs = []) + outputs = {self.nodes[1].getnewaddress():1.1}) + funded_psbt = wmulti.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, changeAddress=w2.getrawchangeaddress())['psbt']) +) + signed_psbt = w2.walletprocesspsbt(funded_psbt)) + self.nodes[2].sendrawtransaction(signed_psbt['hex'])) + self.generate(self.nodes[2], 1)) +) + # Make sure funds are received at node1.) + assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())) +) + wmulti.unloadwallet()) +) + def test_locked_wallet(self):) + self.log.info("Test fundrawtxn with locked wallet and hardened derivation")) +) + df_wallet = self.nodes[1].get_wallet_rpc(self.default_wallet_name)) + self.nodes[1].createwallet(wallet_name="locked_wallet", descriptors=self.options.descriptors)) + wallet = self.nodes[1].get_wallet_rpc("locked_wallet")) + # This test is not meant to exercise fee estimation. Making sure all txs are sent at a consistent fee rate.) + wallet.settxfee(self.min_relay_tx_fee)) +) + # Add some balance to the wallet (this will be reverted at the end of the test)) + df_wallet.sendall(recipients=[wallet.getnewaddress()])) + self.generate(self.nodes[1], 1)) +) + # Encrypt wallet and import descriptors) + wallet.encryptwallet("test")) +) + if self.options.descriptors:) + with WalletUnlock(wallet, "test"):) + wallet.importdescriptors([{) + 'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/0h/*h)'),) + 'timestamp': 'now',) + 'active': True) + },) + {) + 'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/1h/*h)'),) + 'timestamp': 'now',) + 'active': True,) + 'internal': True) + }])) +) + # Drain the keypool.) + wallet.getnewaddress()) + wallet.getrawchangeaddress()) +) + # Choose input) + inputs = wallet.listunspent()) +) + # Deduce exact fee to produce a changeless transaction) + tx_size = 110 # Total tx size: 110 vbytes, p2wpkh -> p2wpkh. Input 68 vbytes + rest of tx is 42 vbytes.) + value = inputs[0]["amount"] - get_fee(tx_size, self.min_relay_tx_fee)) +) + outputs = {self.nodes[0].getnewaddress():value}) + rawtx = wallet.createrawtransaction(inputs, outputs)) + # fund a transaction that does not require a new key for the change output) + funded_tx = wallet.fundrawtransaction(rawtx)) + assert_equal(funded_tx["changepos"], -1)) +) + # fund a transaction that requires a new key for the change output) + # creating the key must be impossible because the wallet is locked) + outputs = {self.nodes[0].getnewaddress():value - Decimal("0.1")}) + rawtx = wallet.createrawtransaction(inputs, outputs)) + assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", wallet.fundrawtransaction, rawtx)) +) + # Refill the keypool.) + with WalletUnlock(wallet, "test"):) + wallet.keypoolrefill(8) #need to refill the keypool to get an internal change address) +) + assert_raises_rpc_error(-13, "walletpassphrase", wallet.sendtoaddress, self.nodes[0].getnewaddress(), 1.2)) +) + oldBalance = self.nodes[0].getbalance()) +) + inputs = []) + outputs = {self.nodes[0].getnewaddress():1.1}) + rawtx = wallet.createrawtransaction(inputs, outputs)) + fundedTx = wallet.fundrawtransaction(rawtx)) + assert_not_equal(fundedTx["changepos"], -1)) +) + # Now we need to unlock.) + with WalletUnlock(wallet, "test"):) + signedTx = wallet.signrawtransactionwithwallet(fundedTx['hex'])) + wallet.sendrawtransaction(signedTx['hex'])) + self.generate(self.nodes[1], 1)) +) + # Make sure funds are received at node1.) + assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())) +) + # Restore pre-test wallet state) + wallet.sendall(recipients=[df_wallet.getnewaddress(), df_wallet.getnewaddress(), df_wallet.getnewaddress()])) + wallet.unloadwallet()) + self.generate(self.nodes[1], 1)) +) + def test_many_inputs_fee(self):) + """Multiple (~19) inputs tx test | Compare fee.""") + self.log.info("Test fundrawtxn fee with many inputs")) +) + # Empty node1, send some small coins from node0 to node1.) + self.nodes[1].sendall(recipients=[self.nodes[0].getnewaddress()])) + self.generate(self.nodes[1], 1)) +) + for _ in range(20):) + self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)) + self.generate(self.nodes[0], 1)) +) + # Fund a tx with ~20 small inputs.) + inputs = []) + outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}) + rawtx = self.nodes[1].createrawtransaction(inputs, outputs)) + fundedTx = self.nodes[1].fundrawtransaction(rawtx)) +) + # Create same transaction over sendtoaddress.) + txId = self.nodes[1].sendmany("", outputs)) + signedFee = self.nodes[1].getmempoolentry(txId)['fees']['base']) +) + # Compare fee.) + feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)) + assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs) +) + def test_many_inputs_send(self):) + """Multiple (~19) inputs tx test | sign/send.""") + self.log.info("Test fundrawtxn sign+send with many inputs")) +) + # Again, empty node1, send some small coins from node0 to node1.) + self.nodes[1].sendall(recipients=[self.nodes[0].getnewaddress()])) + self.generate(self.nodes[1], 1)) +) + for _ in range(20):) + self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)) + self.generate(self.nodes[0], 1)) +) + # Fund a tx with ~20 small inputs.) + oldBalance = self.nodes[0].getbalance()) +) + inputs = []) + outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}) + rawtx = self.nodes[1].createrawtransaction(inputs, outputs)) + fundedTx = self.nodes[1].fundrawtransaction(rawtx)) + fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])) + self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])) + self.generate(self.nodes[1], 1)) + assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward) +) + def test_op_return(self):) + self.log.info("Test fundrawtxn with OP_RETURN and no vin")) +) + rawtx = "0100000000010000000000000000066a047465737400000000") + dec_tx = self.nodes[2].decoderawtransaction(rawtx)) +) + assert_equal(len(dec_tx['vin']), 0)) + assert_equal(len(dec_tx['vout']), 1)) +) + rawtxfund = self.nodes[2].fundrawtransaction(rawtx)) + dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])) +) + assert_greater_than(len(dec_tx['vin']), 0) # at least one vin) + assert_equal(len(dec_tx['vout']), 2) # one change output added) +) + def test_watchonly(self):) + self.log.info("Test fundrawtxn using only watchonly")) +) + inputs = []) + outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2}) + rawtx = self.nodes[3].createrawtransaction(inputs, outputs)) +) + self.nodes[3].loadwallet('wwatch')) + wwatch = self.nodes[3].get_wallet_rpc('wwatch')) + # Setup change addresses for the watchonly wallet) + desc_import = [{) + "desc": descsum_create("wpkh(tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H/1/*)"),) + "timestamp": "now",) + "internal": True,) + "active": True,) + "keypool": True,) + "range": [0, 100],) + "watchonly": True,) + }]) + if self.options.descriptors:) + wwatch.importdescriptors(desc_import)) + else:) + wwatch.importmulti(desc_import)) +) + # Backward compatibility test (2nd params is includeWatching)) + result = wwatch.fundrawtransaction(rawtx, True)) + res_dec = self.nodes[0].decoderawtransaction(result["hex"])) + assert_equal(len(res_dec["vin"]), 1)) + assert_equal(res_dec["vin"][0]["txid"], self.watchonly_utxo['txid'])) +) + assert "fee" in result.keys()) + assert_greater_than(result["changepos"], -1)) +) + wwatch.unloadwallet()) +) + def test_all_watched_funds(self):) + self.log.info("Test fundrawtxn using entirety of watched funds")) +) + inputs = []) + outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount}) + rawtx = self.nodes[3].createrawtransaction(inputs, outputs)) +) + self.nodes[3].loadwallet('wwatch')) + wwatch = self.nodes[3].get_wallet_rpc('wwatch')) + w3 = self.nodes[3].get_wallet_rpc(self.default_wallet_name)) + result = wwatch.fundrawtransaction(rawtx, includeWatching=True, changeAddress=w3.getrawchangeaddress(), subtractFeeFromOutputs=[0])) + res_dec = self.nodes[0].decoderawtransaction(result["hex"])) + assert_equal(len(res_dec["vin"]), 1)) + assert res_dec["vin"][0]["txid"] == self.watchonly_utxo['txid']) +) + assert_greater_than(result["fee"], 0)) + assert_equal(result["changepos"], -1)) + assert_equal(result["fee"] + res_dec["vout"][0]["value"], self.watchonly_amount)) +) + signedtx = wwatch.signrawtransactionwithwallet(result["hex"])) + assert not signedtx["complete"]) + signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])) + assert signedtx["complete"]) + self.nodes[0].sendrawtransaction(signedtx["hex"])) + self.generate(self.nodes[0], 1)) +) + wwatch.unloadwallet()) +) + def test_option_feerate(self):) + self.log.info("Test fundrawtxn with explicit fee rates (fee_rate sat/vB and feeRate BTC/kvB)")) + node = self.nodes[3]) + # Make sure there is exactly one input so coin selection can't skew the result.) + assert_equal(len(self.nodes[3].listunspent(1)), 1)) + inputs = []) + outputs = {node.getnewaddress() : 1}) + rawtx = node.createrawtransaction(inputs, outputs)) +) + result = node.fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)) + btc_kvb_to_sat_vb = 100000 # (1e5)) + result1 = node.fundrawtransaction(rawtx, fee_rate=str(2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee))) + result2 = node.fundrawtransaction(rawtx, feeRate=2 * self.min_relay_tx_fee)) + result3 = node.fundrawtransaction(rawtx, fee_rate=10 * btc_kvb_to_sat_vb * self.min_relay_tx_fee)) + result4 = node.fundrawtransaction(rawtx, feeRate=str(10 * self.min_relay_tx_fee))) +) + result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])) + assert_fee_amount(result1['fee'], count_bytes(result1['hex']), 2 * result_fee_rate)) + assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)) + assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)) + assert_fee_amount(result4['fee'], count_bytes(result4['hex']), 10 * result_fee_rate)) +) + # Test that funding non-standard "zero-fee" transactions is valid.) + for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):) + assert_equal(self.nodes[3].fundrawtransaction(rawtx, {param: zero_value})["fee"], 0)) +) + # With no arguments passed, expect fee of 141 satoshis.) + assert_approx(node.fundrawtransaction(rawtx)["fee"], vexp=0.00000141, vspan=0.00000001)) + # Expect fee to be 10,000x higher when an explicit fee rate 10,000x greater is specified.) + result = node.fundrawtransaction(rawtx, fee_rate=10000)) + assert_approx(result["fee"], vexp=0.0141, vspan=0.0001)) +) + self.log.info("Test fundrawtxn with invalid estimate_mode settings")) + for k, v in {"number": 42, "object": {"foo": "bar"}}.items():) + assert_raises_rpc_error(-3, f"JSON value of type {k} for field estimate_mode is not of expected type string",) + node.fundrawtransaction, rawtx, estimate_mode=v, conf_target=0.1, add_inputs=True)) + for mode in ["", "foo", Decimal("3.141592")]:) + assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',) + node.fundrawtransaction, rawtx, estimate_mode=mode, conf_target=0.1, add_inputs=True)) +) + self.log.info("Test fundrawtxn with invalid conf_target settings")) + for mode in ["unset", "economical", "conservative"]:) + self.log.debug("{}".format(mode))) + for k, v in {"string": "", "object": {"foo": "bar"}}.items():) + assert_raises_rpc_error(-3, f"JSON value of type {k} for field conf_target is not of expected type number",) + node.fundrawtransaction, rawtx, estimate_mode=mode, conf_target=v, add_inputs=True)) + for n in [-1, 0, 1009]:) + assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h) + node.fundrawtransaction, rawtx, estimate_mode=mode, conf_target=n, add_inputs=True)) +) + self.log.info("Test invalid fee rate settings")) + for param, value in {("fee_rate", 100000), ("feeRate", 1.000)}:) + assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",) + node.fundrawtransaction, rawtx, add_inputs=True, **{param: value})) + assert_raises_rpc_error(-3, "Amount out of range",) + node.fundrawtransaction, rawtx, add_inputs=True, **{param: -1})) + assert_raises_rpc_error(-3, "Amount is not a number or string",) + node.fundrawtransaction, rawtx, add_inputs=True, **{param: {"foo": "bar"}})) + # Test fee rate values that don't pass fixed-point parsing checks.) + for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:) + assert_raises_rpc_error(-3, "Invalid amount", node.fundrawtransaction, rawtx, add_inputs=True, **{param: invalid_value})) + # Test fee_rate values that cannot be represented in sat/vB.) + for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999]:) + assert_raises_rpc_error(-3, "Invalid amount",) + node.fundrawtransaction, rawtx, fee_rate=invalid_value, add_inputs=True)) +) + self.log.info("Test min fee rate checks are bypassed with fundrawtxn, e.g. a fee_rate under 1 sat/vB is allowed")) + node.fundrawtransaction(rawtx, fee_rate=0.999, add_inputs=True)) + node.fundrawtransaction(rawtx, feeRate=0.00000999, add_inputs=True)) +) + self.log.info("- raises RPC error if both feeRate and fee_rate are passed")) + assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)",) + node.fundrawtransaction, rawtx, fee_rate=0.1, feeRate=0.1, add_inputs=True)) +) + self.log.info("- raises RPC error if both feeRate and estimate_mode passed")) + assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",) + node.fundrawtransaction, rawtx, estimate_mode="economical", feeRate=0.1, add_inputs=True)) +) + for param in ["feeRate", "fee_rate"]:) + self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))) + assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation ") + "target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),) + node.fundrawtransaction, rawtx, {param: 1, "conf_target": 1, "add_inputs": True})) +) + self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")) + assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",) + node.fundrawtransaction, rawtx, fee_rate=1, estimate_mode="economical", add_inputs=True)) +) + def test_address_reuse(self):) + """Test no address reuse occurs.""") + self.log.info("Test fundrawtxn does not reuse addresses")) +) + rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1})) + result3 = self.nodes[3].fundrawtransaction(rawtx)) + res_dec = self.nodes[0].decoderawtransaction(result3["hex"])) + changeaddress = "") + for out in res_dec['vout']:) + if out['value'] > 1.0:) + changeaddress += out['scriptPubKey']['address']) + assert_not_equal(changeaddress, "")) + nextaddr = self.nodes[3].getnewaddress()) + # Now the change address key should be removed from the keypool.) + assert_not_equal(changeaddress, nextaddr)) +) + def test_option_subtract_fee_from_outputs(self):) + self.log.info("Test fundrawtxn subtractFeeFromOutputs option")) +) + # Make sure there is exactly one input so coin selection can't skew the result.) + assert_equal(len(self.nodes[3].listunspent(1)), 1)) +) + inputs = []) + outputs = {self.nodes[2].getnewaddress(): 1}) + rawtx = self.nodes[3].createrawtransaction(inputs, outputs)) +) + # Test subtract fee from outputs with feeRate (BTC/kvB)) + result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)) + self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[]), # empty subtraction list) + self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0]), # uses self.min_relay_tx_fee (set by settxfee)) + self.nodes[3].fundrawtransaction(rawtx, feeRate=2 * self.min_relay_tx_fee),) + self.nodes[3].fundrawtransaction(rawtx, feeRate=2 * self.min_relay_tx_fee, subtractFeeFromOutputs=[0]),]) + dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]) + output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]) + change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]) +) + assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])) + assert_equal(result[3]['fee'], result[4]['fee'])) + assert_equal(change[0], change[1])) + assert_equal(output[0], output[1])) + assert_equal(output[0], output[2] + result[2]['fee'])) + assert_equal(change[0] + result[0]['fee'], change[2])) + assert_equal(output[3], output[4] + result[4]['fee'])) + assert_equal(change[3] + result[3]['fee'], change[4])) +) + # Test subtract fee from outputs with fee_rate (sat/vB)) + btc_kvb_to_sat_vb = 100000 # (1e5)) + result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)) + self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[]), # empty subtraction list) + self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0]), # uses self.min_relay_tx_fee (set by settxfee)) + self.nodes[3].fundrawtransaction(rawtx, fee_rate=2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee),) + self.nodes[3].fundrawtransaction(rawtx, fee_rate=2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee, subtractFeeFromOutputs=[0]),]) + dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]) + output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]) + change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]) +) + assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])) + assert_equal(result[3]['fee'], result[4]['fee'])) + assert_equal(change[0], change[1])) + assert_equal(output[0], output[1])) + assert_equal(output[0], output[2] + result[2]['fee'])) + assert_equal(change[0] + result[0]['fee'], change[2])) + assert_equal(output[3], output[4] + result[4]['fee'])) + assert_equal(change[3] + result[3]['fee'], change[4])) +) + inputs = []) + outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}) + rawtx = self.nodes[3].createrawtransaction(inputs, outputs)) +) + result = [self.nodes[3].fundrawtransaction(rawtx),) + # Split the fee between outputs 0, 2, and 3, but not output 1.) + self.nodes[3].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0, 2, 3])]) +) + dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),) + self.nodes[3].decoderawtransaction(result[1]['hex'])]) +) + # Nested list of non-change output amounts for each transaction.) + output = [[out['value'] for i, out in enumerate(d['vout']) if i,r['changepos']]) + for d, r in zip(dec_tx, result)]) +) + # List of differences in output amounts between normal and subtractFee transactions.) + share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]) +) + # Output 1 is the same in both transactions.) + assert_equal(share[1], 0)) +) + # The other 3 outputs are smaller as a result of subtractFeeFromOutputs.) + assert_greater_than(share[0], 0)) + assert_greater_than(share[2], 0)) + assert_greater_than(share[3], 0)) +) + # Outputs 2 and 3 take the same share of the fee.) + assert_equal(share[2], share[3])) +) + # Output 0 takes at least as much share of the fee, and no more than 2) + # satoshis more, than outputs 2 and 3.) + assert_greater_than_or_equal(share[0], share[2])) + assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])) +) + # The fee is the same in both transactions.) + assert_equal(result[0]['fee'], result[1]['fee'])) +) + # The total subtracted from the outputs is equal to the fee.) + assert_equal(share[0] + share[2] + share[3], result[0]['fee'])) +) + def test_subtract_fee_with_presets(self):) + self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient")) +) + addr = self.nodes[0].getnewaddress()) + utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 10}])[0]) +) + rawtx = self.nodes[0].createrawtransaction([utxo], [{self.nodes[0].getnewaddress(): 5}])) + fundedtx = self.nodes[0].fundrawtransaction(rawtx, subtractFeeFromOutputs=[0])) + signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex'])) + self.nodes[0].sendrawtransaction(signedtx['hex'])) +) + def test_transaction_too_large(self):) + self.log.info("Test fundrawtx where BnB solution would result in a too large transaction, but Knapsack would not")) + self.nodes[0].createwallet("large")) + wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + recipient = self.nodes[0].get_wallet_rpc("large")) + outputs = {}) + rawtx = recipient.createrawtransaction([], {wallet.getnewaddress(): 147.99899260})) +) + # Make 1500 0.1 BTC outputs. The amount that we target for funding is in) + # the BnB range when these outputs are used. However if these outputs) + # are selected, the transaction will end up being too large, so it) + # shouldn't use BnB and instead fall back to Knapsack but that behavior) + # is not implemented yet. For now we just check that we get an error.) + # First, force the wallet to bulk-generate the addresses we'll need.) + recipient.keypoolrefill(1500)) + for _ in range(1500):) + outputs[recipient.getnewaddress()] = 0.1) + wallet.sendmany("", outputs)) + self.generate(self.nodes[0], 10)) + assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. ") + "Please try sending a smaller amount or manually consolidating your wallet's UTXOs",) + recipient.fundrawtransaction, rawtx)) + self.nodes[0].unloadwallet("large")) +) + def test_external_inputs(self):) + self.log.info("Test funding with external inputs")) + privkey, _ = generate_keypair(wif=True)) + self.nodes[2].createwallet("extfund")) + wallet = self.nodes[2].get_wallet_rpc("extfund")) +) + # Make a weird but signable script. sh(pkh()) descriptor accomplishes this) + desc = descsum_create("sh(pkh({}))".format(privkey))) + if self.options.descriptors:) + res = self.nodes[0].importdescriptors([{"desc": desc, "timestamp": "now"}])) + else:) + res = self.nodes[0].importmulti([{"desc": desc, "timestamp": "now"}])) + assert res[0]["success"]) + addr = self.nodes[0].deriveaddresses(desc)[0]) + addr_info = self.nodes[0].getaddressinfo(addr)) +) + self.nodes[0].sendtoaddress(addr, 10)) + self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10)) + self.generate(self.nodes[0], 6)) + ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]) +) + # An external input without solving data should result in an error) + raw_tx = wallet.createrawtransaction([ext_utxo], {self.nodes[0].getnewaddress(): ext_utxo["amount"] / 2})) + assert_raises_rpc_error(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]), wallet.fundrawtransaction, raw_tx)) +) + # Error conditions) + assert_raises_rpc_error(-5, 'Pubkey "not a pubkey" must be a hex string', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["not a pubkey"]})) + assert_raises_rpc_error(-5, 'Pubkey "01234567890a0b0c0d0e0f" must have a length of either 33 or 65 bytes', wallet.fundrawtransaction, raw_tx, solving_data={"pubkeys":["01234567890a0b0c0d0e0f"]})) + assert_raises_rpc_error(-5, "'not a script' is not hex", wallet.fundrawtransaction, raw_tx, solving_data={"scripts":["not a script"]})) + assert_raises_rpc_error(-8, "Unable to parse descriptor 'not a descriptor'", wallet.fundrawtransaction, raw_tx, solving_data={"descriptors":["not a descriptor"]})) + assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"]}])) + assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": -1}])) + assert_raises_rpc_error(-8, "Invalid parameter, missing weight key", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"]}])) + assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 164}])) + assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": -1}])) + assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be greater than", wallet.fundrawtransaction, raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 400001}])) +) + # But funding should work when the solving data is provided) + funded_tx = wallet.fundrawtransaction(raw_tx, solving_data={"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]})) + signed_tx = wallet.signrawtransactionwithwallet(funded_tx['hex'])) + assert not signed_tx['complete']) + signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx['hex'])) + assert signed_tx['complete']) +) + funded_tx = wallet.fundrawtransaction(raw_tx, solving_data={"descriptors": [desc]})) + signed_tx1 = wallet.signrawtransactionwithwallet(funded_tx['hex'])) + assert not signed_tx1['complete']) + signed_tx2 = self.nodes[0].signrawtransactionwithwallet(signed_tx1['hex'])) + assert signed_tx2['complete']) +) + unsigned_weight = self.nodes[0].decoderawtransaction(signed_tx1["hex"])["weight"]) + signed_weight = self.nodes[0].decoderawtransaction(signed_tx2["hex"])["weight"]) + # Input's weight is difference between weight of signed and unsigned,) + # and the weight of stuff that didn't change (prevout, sequence, 1 byte of scriptSig)) + input_weight = signed_weight - unsigned_weight + (41 * 4)) + low_input_weight = input_weight // 2) + high_input_weight = input_weight * 2) +) + # Funding should also work if the input weight is provided) + funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}], fee_rate=2)) + signed_tx = wallet.signrawtransactionwithwallet(funded_tx["hex"])) + signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx["hex"])) + assert_equal(self.nodes[0].testmempoolaccept([signed_tx["hex"]])[0]["allowed"], True)) + assert_equal(signed_tx["complete"], True)) + # Reducing the weight should have a lower fee) + funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}], fee_rate=2)) + assert_greater_than(funded_tx["fee"], funded_tx2["fee"])) + # Increasing the weight should have a higher fee) + funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=2)) + assert_greater_than(funded_tx2["fee"], funded_tx["fee"])) + # The provided weight should override the calculated weight when solving data is provided) + funded_tx3 = wallet.fundrawtransaction(raw_tx, solving_data={"descriptors": [desc]}, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=2)) + assert_equal(funded_tx2["fee"], funded_tx3["fee"])) + # The feerate should be met) + funded_tx4 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=10)) + input_add_weight = high_input_weight - (41 * 4)) + tx4_weight = wallet.decoderawtransaction(funded_tx4["hex"])["weight"] + input_add_weight) + tx4_vsize = int(ceil(tx4_weight / 4))) + assert_fee_amount(funded_tx4["fee"], tx4_vsize, Decimal(0.0001))) +) + # Funding with weight at csuint boundaries should not cause problems) + funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 255}], fee_rate=2)) + funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 65539}], fee_rate=2)) +) + self.nodes[2].unloadwallet("extfund")) +) + def test_add_inputs_default_value(self):) + self.log.info("Test 'add_inputs' default value")) +) + # Create and fund the wallet with 5 BTC) + self.nodes[2].createwallet("test_preset_inputs")) + wallet = self.nodes[2].get_wallet_rpc("test_preset_inputs")) + addr1 = wallet.getnewaddress(address_type="bech32")) + self.nodes[0].sendtoaddress(addr1, 5)) + self.generate(self.nodes[0], 1)) +) + # Covered cases:) + # 1. Default add_inputs value with no preset inputs (add_inputs=true):) + # Expect: automatically add coins from the wallet to the tx.) + # 2. Default add_inputs value with preset inputs (add_inputs=false):) + # Expect: disallow automatic coin selection.) + # 3. Explicit add_inputs=true and preset inputs (with preset inputs not-covering the target amount).) + # Expect: include inputs from the wallet.) + # 4. Explicit add_inputs=true and preset inputs (with preset inputs covering the target amount).) + # Expect: only preset inputs are used.) + # 5. Explicit add_inputs=true, no preset inputs (same as (1) but with an explicit set):) + # Expect: include inputs from the wallet.) + # 6. Explicit add_inputs=false, no preset inputs:) + # Expect: failure as we did not provide inputs and the process cannot automatically select coins.) +) + # Case (1), 'send' command) + # 'add_inputs' value is true unless "inputs" are specified, in such case, add_inputs=false.) + # So, the wallet will automatically select coins and create the transaction if only the outputs are provided.) + tx = wallet.send(outputs=[{addr1: 3}])) + assert tx["complete"]) +) + # Case (2), 'send' command) + # Select an input manually, which doesn't cover the entire output amount and) + # verify that the dynamically set 'add_inputs=false' value works.) +) + # Fund wallet with 2 outputs, 5 BTC each.) + addr2 = wallet.getnewaddress(address_type="bech32")) + source_tx = self.nodes[0].send(outputs=[{addr1: 5}, {addr2: 5}], change_position=0)) + self.generate(self.nodes[0], 1)) +) + # Select only one input.) + options = {) + "inputs": [) + {) + "txid": source_tx["txid"],) + "vout": 1 # change position was hardcoded to index 0) + }) + ]) }) - psbt_tx = wallet.walletcreatefundedpsbt(outputs=[{addr1: 8}], inputs=inputs, **options) - # Check that only the preset inputs were added to the tx - decoded_psbt_inputs = self.nodes[0].decodepsbt(psbt_tx["psbt"])['tx']['vin'] - assert_equal(len(decoded_psbt_inputs), 2) - for input in decoded_psbt_inputs: - assert_equal(input["txid"], source_tx["txid"]) - - # Case (5), 'walletcreatefundedpsbt' command - # Explicit add_inputs=true, no preset inputs - options = { - "add_inputs": True - } - assert "psbt" in wallet.walletcreatefundedpsbt(inputs=[], outputs=outputs, **options) - - # Case (6). Explicit add_inputs=false, no preset inputs: - options = {"add_inputs": False} - assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.walletcreatefundedpsbt, inputs=[], outputs=outputs, **options) - - self.nodes[2].unloadwallet("test_preset_inputs") - - def test_preset_inputs_selection(self): - self.log.info('Test wallet preset inputs are not double-counted or reused in coin selection') - - # Create and fund the wallet with 4 UTXO of 5 BTC each (20 BTC total) - self.nodes[2].createwallet("test_preset_inputs_selection") - wallet = self.nodes[2].get_wallet_rpc("test_preset_inputs_selection") - outputs = {} - for _ in range(4): - outputs[wallet.getnewaddress(address_type="bech32")] = 5 - self.nodes[0].sendmany("", outputs) - self.generate(self.nodes[0], 1) - - # Select the preset inputs - coins = wallet.listunspent() - preset_inputs = [coins[0], coins[1], coins[2]] - - # Now let's create the tx creation options - options = { - "inputs": preset_inputs, - "add_inputs": True, # automatically add coins from the wallet to fulfill the target - "subtract_fee_from_outputs": [0], # deduct fee from first output - "add_to_wallet": False - } - - # Attempt to send 29 BTC from a wallet that only has 20 BTC. The wallet should exclude - # the preset inputs from the pool of available coins, realize that there is not enough - # money to fund the 29 BTC payment, and fail with "Insufficient funds". - # - # Even with SFFO, the wallet can only afford to send 20 BTC. - # If the wallet does not properly exclude preset inputs from the pool of available coins - # prior to coin selection, it may create a transaction that does not fund the full payment - # amount or, through SFFO, incorrectly reduce the recipient's amount by the difference - # between the original target and the wrongly counted inputs (in this case 9 BTC) - # so that the recipient's amount is no longer equal to the user's selected target of 29 BTC. - - # First case, use 'subtract_fee_from_outputs = true' - assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, outputs=[{wallet.getnewaddress(address_type="bech32"): 29}], options=options) - - # Second case, don't use 'subtract_fee_from_outputs' - del options["subtract_fee_from_outputs"] - assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, outputs=[{wallet.getnewaddress(address_type="bech32"): 29}], options=options) - - self.nodes[2].unloadwallet("test_preset_inputs_selection") - - def test_weight_calculation(self): - self.log.info("Test weight calculation with external inputs") - - self.nodes[2].createwallet("test_weight_calculation") - wallet = self.nodes[2].get_wallet_rpc("test_weight_calculation") - - addr = wallet.getnewaddress(address_type="bech32") - ext_addr = self.nodes[0].getnewaddress(address_type="bech32") - utxo, ext_utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 5}, {ext_addr: 5}]) - - self.nodes[0].sendtoaddress(wallet.getnewaddress(address_type="bech32"), 5) - self.generate(self.nodes[0], 1) - - rawtx = wallet.createrawtransaction([utxo], [{self.nodes[0].getnewaddress(address_type="bech32"): 8}]) - fundedtx = wallet.fundrawtransaction(rawtx, fee_rate=10, change_type="bech32") - # with 71-byte signatures we should expect following tx size - # tx overhead (10) + 2 inputs (41 each) + 2 p2wpkh (31 each) + (segwit marker and flag (2) + 2 p2wpkh 71 byte sig witnesses (107 each)) / witness scaling factor (4) - tx_size = ceil(10 + 41*2 + 31*2 + (2 + 107*2)/4) - assert_equal(fundedtx['fee'] * COIN, tx_size * 10) - - # Using the other output should have 72 byte sigs - rawtx = wallet.createrawtransaction([ext_utxo], [{self.nodes[0].getnewaddress(): 13}]) - ext_desc = self.nodes[0].getaddressinfo(ext_addr)["desc"] - fundedtx = wallet.fundrawtransaction(rawtx, fee_rate=10, change_type="bech32", solving_data={"descriptors": [ext_desc]}) - # tx overhead (10) + 3 inputs (41 each) + 2 p2wpkh(31 each) + (segwit marker and flag (2) + 2 p2wpkh 71 bytes sig witnesses (107 each) + p2wpkh 72 byte sig witness (108)) / witness scaling factor (4) - tx_size = ceil(10 + 41*3 + 31*2 + (2 + 107*2 + 108)/4) - assert_equal(fundedtx['fee'] * COIN, tx_size * 10) - - self.nodes[2].unloadwallet("test_weight_calculation") - - def test_weight_limits(self): - self.log.info("Test weight limits") - - self.nodes[2].createwallet("test_weight_limits") - wallet = self.nodes[2].get_wallet_rpc("test_weight_limits") - - outputs = [] - for _ in range(1472): - outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1}) - txid = self.nodes[0].send(outputs=outputs, change_position=0)["txid"] - self.generate(self.nodes[0], 1) - - # 272 WU per input (273 when high-s); picking 1471 inputs will exceed the max standard tx weight. - rawtx = wallet.createrawtransaction([], [{wallet.getnewaddress(): 0.1 * 1471}]) - - # 1) Try to fund transaction only using the preset inputs (pick all 1472 inputs to cover the fee) - input_weights = [] - for i in range(1, 1473): # skip first output as it is the parent tx change output - input_weights.append({"txid": txid, "vout": i, "weight": 273}) - assert_raises_rpc_error(-4, "Transaction too large", wallet.fundrawtransaction, hexstring=rawtx, input_weights=input_weights) - - # 2) Let the wallet fund the transaction - assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs", - wallet.fundrawtransaction, hexstring=rawtx) - - # 3) Pre-select some inputs and let the wallet fill-up the remaining amount - inputs = input_weights[0:1000] - assert_raises_rpc_error(-4, "The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs", - wallet.fundrawtransaction, hexstring=rawtx, input_weights=inputs) - - self.nodes[2].unloadwallet("test_weight_limits") - - def test_include_unsafe(self): - self.log.info("Test fundrawtxn with unsafe inputs") - - self.nodes[0].createwallet("unsafe") - wallet = self.nodes[0].get_wallet_rpc("unsafe") - - # We receive unconfirmed funds from external keys (unsafe outputs). - addr = wallet.getnewaddress() - inputs = [] - for i in range(0, 2): - utxo = self.create_outpoints(self.nodes[2], outputs=[{addr: 5}])[0] - inputs.append((utxo['txid'], utxo['vout'])) - self.sync_mempools() - - # Unsafe inputs are ignored by default. - rawtx = wallet.createrawtransaction([], [{self.nodes[2].getnewaddress(): 7.5}]) - assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, rawtx) - - # But we can opt-in to use them for funding. - fundedtx = wallet.fundrawtransaction(rawtx, include_unsafe=True) - tx_dec = wallet.decoderawtransaction(fundedtx['hex']) - assert all((txin["txid"], txin["vout"]) in inputs for txin in tx_dec["vin"]) - signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex']) - assert wallet.testmempoolaccept([signedtx['hex']])[0]["allowed"] - - # And we can also use them once they're confirmed. - self.generate(self.nodes[0], 1) - fundedtx = wallet.fundrawtransaction(rawtx, include_unsafe=False) - tx_dec = wallet.decoderawtransaction(fundedtx['hex']) - assert all((txin["txid"], txin["vout"]) in inputs for txin in tx_dec["vin"]) - signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex']) - assert wallet.testmempoolaccept([signedtx['hex']])[0]["allowed"] - self.nodes[0].unloadwallet("unsafe") - - def test_22670(self): - # In issue #22670, it was observed that ApproximateBestSubset may - # choose enough value to cover the target amount but not enough to cover the transaction fees. - # This leads to a transaction whose actual transaction feerate is lower than expected. - # However at normal feerates, the difference between the effective value and the real value - # that this bug is not detected because the transaction fee must be at least 0.01 BTC (the minimum change value). - # Otherwise the targeted minimum change value will be enough to cover the transaction fees that were not - # being accounted for. So the minimum relay fee is set to 0.1 BTC/kvB in this test. - self.log.info("Test issue 22670 ApproximateBestSubset bug") - # Make sure the default wallet will not be loaded when restarted with a high minrelaytxfee - self.nodes[0].unloadwallet(self.default_wallet_name, False) - feerate = Decimal("0.1") - self.restart_node(0, [f"-minrelaytxfee={feerate}", "-discardfee=0"]) # Set high minrelayfee, set discardfee to 0 for easier calculation - - self.nodes[0].loadwallet(self.default_wallet_name, True) - funds = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - self.nodes[0].createwallet(wallet_name="tester") - tester = self.nodes[0].get_wallet_rpc("tester") - - # Because this test is specifically for ApproximateBestSubset, the target value must be greater - # than any single input available, and require more than 1 input. So we make 3 outputs - for i in range(0, 3): - funds.sendtoaddress(tester.getnewaddress(address_type="bech32"), 1) - self.generate(self.nodes[0], 1, sync_fun=self.no_op) - - # Create transactions in order to calculate fees for the target bounds that can trigger this bug - change_tx = tester.fundrawtransaction(tester.createrawtransaction([], [{funds.getnewaddress(): 1.5}])) - tx = tester.createrawtransaction([], [{funds.getnewaddress(): 2}]) - no_change_tx = tester.fundrawtransaction(tx, subtractFeeFromOutputs=[0]) - - overhead_fees = feerate * len(tx) / 2 / 1000 - cost_of_change = change_tx["fee"] - no_change_tx["fee"] - fees = no_change_tx["fee"] - assert_greater_than(fees, 0.01) - - def do_fund_send(target): - create_tx = tester.createrawtransaction([], [{funds.getnewaddress(): target}]) - funded_tx = tester.fundrawtransaction(create_tx) - signed_tx = tester.signrawtransactionwithwallet(funded_tx["hex"]) - assert signed_tx["complete"] - decoded_tx = tester.decoderawtransaction(signed_tx["hex"]) - assert_equal(len(decoded_tx["vin"]), 3) - assert tester.testmempoolaccept([signed_tx["hex"]])[0]["allowed"] - - # We want to choose more value than is available in 2 inputs when considering the fee, - # but not enough to need 3 inputs when not considering the fee. - # So the target value must be at least 2.00000001 - fee. - lower_bound = Decimal("2.00000001") - fees - # The target value must be at most 2 - cost_of_change - not_input_fees - min_change (these are all - # included in the target before ApproximateBestSubset). - upper_bound = Decimal("2.0") - cost_of_change - overhead_fees - Decimal("0.01") - assert_greater_than_or_equal(upper_bound, lower_bound) - do_fund_send(lower_bound) - do_fund_send(upper_bound) - - self.restart_node(0) - self.connect_nodes(0, 1) - self.connect_nodes(0, 2) - self.connect_nodes(0, 3) - - def test_feerate_rounding(self): - self.log.info("Test that rounding of GetFee does not result in an assertion") - - self.nodes[1].createwallet("roundtest") - w = self.nodes[1].get_wallet_rpc("roundtest") - - addr = w.getnewaddress(address_type="bech32") - self.nodes[0].sendtoaddress(addr, 1) - self.generate(self.nodes[0], 1) - - # A P2WPKH input costs 68 vbytes; With a single P2WPKH output, the rest of the tx is 42 vbytes for a total of 110 vbytes. - # At a feerate of 1.85 sat/vb, the input will need a fee of 125.8 sats and the rest 77.7 sats - # The entire tx fee should be 203.5 sats. - # Coin selection rounds the fee individually instead of at the end (due to how CFeeRate::GetFee works). - # If rounding down (which is the incorrect behavior), then the calculated fee will be 125 + 77 = 202. - # If rounding up, then the calculated fee will be 126 + 78 = 204. - # In the former case, the calculated needed fee is higher than the actual fee being paid, so an assertion is reached - # To test this does not happen, we subtract 202 sats from the input value. If working correctly, this should - # fail with insufficient funds rather than bitcoind asserting. - rawtx = w.createrawtransaction(inputs=[], outputs=[{self.nodes[0].getnewaddress(address_type="bech32"): 1 - 0.00000202}]) - assert_raises_rpc_error(-4, "Insufficient funds", w.fundrawtransaction, rawtx, fee_rate=1.85) - - def test_input_confs_control(self): - self.nodes[0].createwallet("minconf") - wallet = self.nodes[0].get_wallet_rpc("minconf") - - # Fund the wallet with different chain heights - for _ in range(2): - self.nodes[2].sendmany("", {wallet.getnewaddress():1, wallet.getnewaddress():1}) - self.generate(self.nodes[2], 1) - - unconfirmed_txid = wallet.sendtoaddress(wallet.getnewaddress(), 0.5) - - self.log.info("Crafting TX using an unconfirmed input") - target_address = self.nodes[2].getnewaddress() - raw_tx1 = wallet.createrawtransaction([], {target_address: 0.1}, 0, True) - funded_tx1 = wallet.fundrawtransaction(raw_tx1, {'fee_rate': 1, 'maxconf': 0})['hex'] - - # Make sure we only had the one input - tx1_inputs = self.nodes[0].decoderawtransaction(funded_tx1)['vin'] - assert_equal(len(tx1_inputs), 1) - - utxo1 = tx1_inputs[0] - assert unconfirmed_txid == utxo1['txid'] - - final_tx1 = wallet.signrawtransactionwithwallet(funded_tx1)['hex'] - txid1 = self.nodes[0].sendrawtransaction(final_tx1) - - mempool = self.nodes[0].getrawmempool() - assert txid1 in mempool - - self.log.info("Fail to craft a new TX with minconf above highest one") - # Create a replacement tx to 'final_tx1' that has 1 BTC target instead of 0.1. - raw_tx2 = wallet.createrawtransaction([{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1}) - assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, raw_tx2, {'add_inputs': True, 'minconf': 3, 'fee_rate': 10}) - - self.log.info("Fail to broadcast a new TX with maxconf 0 due to BIP125 rules to verify it actually chose unconfirmed outputs") - # Now fund 'raw_tx2' to fulfill the total target (1 BTC) by using all the wallet unconfirmed outputs. - # As it was created with the first unconfirmed output, 'raw_tx2' only has 0.1 BTC covered (need to fund 0.9 BTC more). - # So, the selection process, to cover the amount, will pick up the 'final_tx1' output as well, which is an output of the tx that this - # new tx is replacing!. So, once we send it to the mempool, it will return a "bad-txns-spends-conflicting-tx" - # because the input will no longer exist once the first tx gets replaced by this new one). - funded_invalid = wallet.fundrawtransaction(raw_tx2, {'add_inputs': True, 'maxconf': 0, 'fee_rate': 10})['hex'] - final_invalid = wallet.signrawtransactionwithwallet(funded_invalid)['hex'] - assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, final_invalid) - - self.log.info("Craft a replacement adding inputs with highest depth possible") - funded_tx2 = wallet.fundrawtransaction(raw_tx2, {'add_inputs': True, 'minconf': 2, 'fee_rate': 10})['hex'] - tx2_inputs = self.nodes[0].decoderawtransaction(funded_tx2)['vin'] - assert_greater_than_or_equal(len(tx2_inputs), 2) - for vin in tx2_inputs: - if vin['txid'] != unconfirmed_txid: - assert_greater_than_or_equal(self.nodes[0].gettxout(vin['txid'], vin['vout'])['confirmations'], 2) - - final_tx2 = wallet.signrawtransactionwithwallet(funded_tx2)['hex'] - txid2 = self.nodes[0].sendrawtransaction(final_tx2) - - mempool = self.nodes[0].getrawmempool() - assert txid1 not in mempool - assert txid2 in mempool - - wallet.unloadwallet() - -if __name__ == '__main__': - RawTransactionsTest(__file__).main() + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.send, outputs=[{addr1: 8}], **options)) +) + # Case (3), Explicit add_inputs=true and preset inputs (with preset inputs not-covering the target amount)) + options["add_inputs"] = True) + options["add_to_wallet"] = False) + tx = wallet.send(outputs=[{addr1: 8}], **options)) + assert tx["complete"]) +) + # Case (4), Explicit add_inputs=true and preset inputs (with preset inputs covering the target amount)) + options["inputs"].append({) + "txid": source_tx["txid"],) + "vout": 2 # change position was hardcoded to index 0) + })) + tx = wallet.send(outputs=[{addr1: 8}], **options)) + assert tx["complete"]) + # Check that only the preset inputs were added to the tx) + decoded_psbt_inputs = self.nodes[0].decodepsbt(tx["psbt"])['tx']['vin']) + assert_equal(len(decoded_psbt_inputs), 2)) + for input in decoded_psbt_inputs:) + assert_equal(input["txid"], source_tx["txid"])) +) + # Case (5), assert that inputs are added to the tx by explicitly setting add_inputs=true) + options = {"add_inputs": True, "add_to_wallet": True}) + tx = wallet.send(outputs=[{addr1: 8}], **options)) + assert tx["complete"]) +) + # 6. Explicit add_inputs=false, no preset inputs:) + options = {"add_inputs": False}) + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.send, outputs=[{addr1: 3}], **options)) +) + ################################################) +) + # Case (1), 'walletcreatefundedpsbt' command) + # Default add_inputs value with no preset inputs (add_inputs=true)) + inputs = []) + outputs = {self.nodes[1].getnewaddress(): 8}) + assert "psbt" in wallet.walletcreatefundedpsbt(inputs=inputs, outputs=outputs)) +) + # Case (2), 'walletcreatefundedpsbt' command) + # Default add_inputs value with preset inputs (add_inputs=false).) + inputs = [{) + "txid": source_tx["txid"],) + "vout": 1 # change position was hardcoded to index 0) + }]) + outputs = {self.nodes[1].getnewaddress(): 8}) + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.walletcreatefundedpsbt, inputs=inputs, outputs=outputs)) +) + # Case (3), Explicit add_inputs=true and preset inputs (with preset inputs not-covering the target amount)) + options["add_inputs"] = True) + assert "psbt" in wallet.walletcreatefundedpsbt(outputs=[{addr1: 8}], inputs=inputs, **options)) +) + # Case (4), Explicit add_inputs=true and preset inputs (with preset inputs covering the target amount)) + inputs.append({) + "txid": source_tx["txid"],) + "vout": 2 # change position was hardcoded to index 0) + })) + psbt_tx = wallet.walletcreatefundedpsbt(outputs=[{addr1: 8}], inputs=inputs, **options)) + # Check that only the preset inputs were added to the tx) + decoded_psbt_inputs = self.nodes[0].decodepsbt(psbt_tx["psbt"])['tx']['vin']) + assert_equal(len(decoded_psbt_inputs), 2)) + for input in decoded_psbt_inputs:) + assert_equal(input["txid"], source_tx["txid"])) +) + # Case (5), 'walletcreatefundedpsbt' command) + # Explicit add_inputs=true, no preset inputs) + options = {) + "add_inputs": True) + }) + assert "psbt" in wallet.walletcreatefundedpsbt(inputs=[], outputs=outputs, **options)) +) + # Case (6). Explicit add_inputs=false, no preset inputs:) + options = {"add_inputs": False}) + assert_raises_rpc_error(-4, ERR_NOT_ENOUGH_PRESET_INPUTS, wallet.walletcreatefundedpsbt, inputs=[], outputs=outputs, **options)) +) + self.nodes[2].unloadwallet("test_preset_inputs")) +) + def test_preset_inputs_selection(self):) + self.log.info('Test wallet preset inputs are not double-counted or reused in coin selection')) +) + # Create and fund the wallet with 4 UTXO of 5 BTC each (20 BTC total)) + self.nodes[2].createwallet("test_preset_inputs_selection")) + wallet = self.nodes[2].get_wallet_rpc("test_preset_inputs_selection")) + outputs = {}) + for _ in range(4):) + outputs[wallet.getnewaddress(address_type="bech32")] = 5) + self.nodes[0].sendmany("", outputs)) + self.generate(self.nodes[0], 1)) +) + # Select the preset inputs) + coins = wallet.listunspent()) + preset_inputs = [coins[0], coins[1], coins[2]]) +) + # Now let's create the tx creation options) + options = {) + "inputs": preset_inputs,) + "add_inputs": True, # automatically add coins from the wallet to fulfill the target) + "subtract_fee_from_outputs": [0], # deduct fee from first output) + "add_to_wallet": False) + }) +) + # Attempt to send 29 BTC from a wallet that only has 20 BTC. The wallet should exclude) + # the preset inputs from the pool of available coins, realize that there is not enough) + # money to fund the 29 BTC payment, and fail with "Insufficient funds".) + #) + # Even with SFFO, the wallet can only afford to send 20 BTC.) + # If the wallet does not properly exclude preset inputs from the pool of available coins) + # prior to coin selection, it may create a transaction that does not fund the full payment) + # amount or, through SFFO, incorrectly reduce the recipient's amount by the difference) + # between the original target and the wrongly counted inputs (in this case 9 BTC)) + # so that the recipient's amount is no longer equal to the user's selected target of 29 BTC.) +) + # First case, use 'subtract_fee_from_outputs = true') + assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, outputs=[{wallet.getnewaddress(address_type="bech32"): 29}], options=options)) +) + # Second case, don't use 'subtract_fee_from_outputs') + del options["subtract_fee_from_outputs"]) + assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, outputs=[{wallet.getnewaddress(address_type="bech32"): 29}], options=options)) +) + self.nodes[2].unloadwallet("test_preset_inputs_selection")) +) + def test_weight_calculation(self):) + self.log.info("Test weight calculation with external inputs")) +) + self.nodes[2].createwallet("test_weight_calculation")) + wallet = self.nodes[2].get_wallet_rpc("test_weight_calculation")) +) + addr = wallet.getnewaddress(address_type="bech32")) + ext_addr = self.nodes[0].getnewaddress(address_type="bech32")) + utxo, ext_utxo = self.create_outpoints(self.nodes[0], outputs=[{addr: 5}, {ext_addr: 5}])) +) + self.nodes[0].sendtoaddress(wallet.getnewaddress(address_type="bech32"), 5)) + self.generate(self.nodes[0], 1)) +) + rawtx = wallet.createrawtransaction([utxo], [{self.nodes[0].getnewaddress(address_type="bech32"): 8}])) + fundedtx = wallet.fundrawtransaction(rawtx, fee_rate=10, change_type="bech32")) + # with 71-byte signatures we should expect following tx size) + # tx overhead (10) + 2 inputs (41 each) + 2 p2wpkh (31 each) + (segwit marker and flag (2) + 2 p2wpkh 71 byte sig witnesses (107 each)) / witness scaling factor (4)) + tx_size = ceil(10 + 41*2 + 31*2 + (2 + 107*2)/4)) + assert_equal(fundedtx['fee'] * COIN, tx_size * 10)) +) + # Using the other output should have 72 byte sigs) + rawtx = wallet.createrawtransaction([ext_utxo], [{self.nodes[0].getnewaddress(): 13}])) + ext_desc = self.nodes[0].getaddressinfo(ext_addr)["desc"]) + fundedtx = wallet.fundrawtransaction(rawtx, fee_rate=10, change_type="bech32", solving_data={"descriptors": [ext_desc]})) + # tx overhead (10) + 3 inputs (41 each) + 2 p2wpkh(31 each) + (segwit marker and flag (2) + 2 p2wpkh 71 bytes sig witnesses (107 each) + p2wpkh 72 byte sig witness (108)) / witness scaling factor (4)) + tx_size = ceil(10 + 41*3 + 31*2 + (2 + 107*2 + 108)/4)) + assert_equal(fundedtx['fee'] * COIN, tx_size * 10)) +) + self.nodes[2].unloadwallet("test_weight_calculation")) +) + def test_weight_limits(self):) + self.log.info("Test weight limits")) +) + self.nodes[2].createwallet("test_weight_limits")) + wallet = self.nodes[2].get_wallet_rpc("test_weight_limits")) +) + outputs = []) + for _ in range(1472):) + outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1})) + txid = self.nodes[0].send(outputs=outputs, change_position=0)["txid"]) + self.generate(self.nodes[0], 1)) +) + # 272 WU per input (273 when high-s); picking 1471 inputs will exceed the max standard tx weight.) + rawtx = wallet.createrawtransaction([], [{wallet.getnewaddress(): 0.1 * 1471}])) +) + # 1) Try to fund transaction only using the preset inputs (pick all 1472 inputs to cover the fee)) + input_weights = []) + for i in range(1, 1473): # skip first output as it is the parent tx change output) + input_weights.append({"txid": txid, "vout": i, "weight": 273})) + assert_raises_rpc_error(-4, "Transaction too large", wallet.fundrawtransaction, hexstring=rawtx, input_weights=input_weights)) +) + # 2) Let the wallet fund the transaction) + assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",) + wallet.fundrawtransaction, hexstring=rawtx)) +) + # 3) Pre-select some inputs and let the wallet fill-up the remaining amount) + inputs = input_weights[0:1000]) + assert_raises_rpc_error(-4, "The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",) + wallet.fundrawtransaction, hexstring=rawtx, input_weights=inputs)) +) + self.nodes[2].unloadwallet("test_weight_limits")) +) + def test_include_unsafe(self):) + self.log.info("Test fundrawtxn with unsafe inputs")) +) + self.nodes[0].createwallet("unsafe")) + wallet = self.nodes[0].get_wallet_rpc("unsafe")) +) + # We receive unconfirmed funds from external keys (unsafe outputs).) + addr = wallet.getnewaddress()) + inputs = []) + for i in range(0, 2):) + utxo = self.create_outpoints(self.nodes[2], outputs=[{addr: 5}])[0]) + inputs.append((utxo['txid'], utxo['vout']))) + self.sync_mempools()) +) + # Unsafe inputs are ignored by default.) + rawtx = wallet.createrawtransaction([], [{self.nodes[2].getnewaddress(): 7.5}])) + assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, rawtx)) +) + # But we can opt-in to use them for funding.) + fundedtx = wallet.fundrawtransaction(rawtx, include_unsafe=True)) + tx_dec = wallet.decoderawtransaction(fundedtx['hex'])) + assert all((txin["txid"], txin["vout"]) in inputs for txin in tx_dec["vin"])) + signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex'])) + assert wallet.testmempoolaccept([signedtx['hex']])[0]["allowed"]) +) + # And we can also use them once they're confirmed.) + self.generate(self.nodes[0], 1)) + fundedtx = wallet.fundrawtransaction(rawtx, include_unsafe=False)) + tx_dec = wallet.decoderawtransaction(fundedtx['hex'])) + assert all((txin["txid"], txin["vout"]) in inputs for txin in tx_dec["vin"])) + signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex'])) + assert wallet.testmempoolaccept([signedtx['hex']])[0]["allowed"]) + self.nodes[0].unloadwallet("unsafe")) +) + def test_22670(self):) + # In issue #22670, it was observed that ApproximateBestSubset may) + # choose enough value to cover the target amount but not enough to cover the transaction fees.) + # This leads to a transaction whose actual transaction feerate is lower than expected.) + # However at normal feerates, the difference between the effective value and the real value) + # that this bug is not detected because the transaction fee must be at least 0.01 BTC (the minimum change value).) + # Otherwise the targeted minimum change value will be enough to cover the transaction fees that were not) + # being accounted for. So the minimum relay fee is set to 0.1 BTC/kvB in this test.) + self.log.info("Test issue 22670 ApproximateBestSubset bug")) + # Make sure the default wallet will not be loaded when restarted with a high minrelaytxfee) + self.nodes[0].unloadwallet(self.default_wallet_name, False)) + feerate = Decimal("0.1")) + self.restart_node(0, [f"-minrelaytxfee={feerate}", "-discardfee=0"]) # Set high minrelayfee, set discardfee to 0 for easier calculation) +) + self.nodes[0].loadwallet(self.default_wallet_name, True)) + funds = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + self.nodes[0].createwallet(wallet_name="tester")) + tester = self.nodes[0].get_wallet_rpc("tester")) +) + # Because this test is specifically for ApproximateBestSubset, the target value must be greater) + # than any single input available, and require more than 1 input. So we make 3 outputs) + for i in range(0, 3):) + funds.sendtoaddress(tester.getnewaddress(address_type="bech32"), 1)) + self.generate(self.nodes[0], 1, sync_fun=self.no_op)) +) + # Create transactions in order to calculate fees for the target bounds that can trigger this bug) + change_tx = tester.fundrawtransaction(tester.createrawtransaction([], [{funds.getnewaddress(): 1.5}]))) + tx = tester.createrawtransaction([], [{funds.getnewaddress(): 2}])) + no_change_tx = tester.fundrawtransaction(tx, subtractFeeFromOutputs=[0])) +) + overhead_fees = feerate * len(tx) / 2 / 1000) + cost_of_change = change_tx["fee"] - no_change_tx["fee"]) + fees = no_change_tx["fee"]) + assert_greater_than(fees, 0.01)) +) + def do_fund_send(target):) + create_tx = tester.createrawtransaction([], [{funds.getnewaddress(): target}])) + funded_tx = tester.fundrawtransaction(create_tx)) + signed_tx = tester.signrawtransactionwithwallet(funded_tx["hex"])) + assert signed_tx["complete"]) + decoded_tx = tester.decoderawtransaction(signed_tx["hex"])) + assert_equal(len(decoded_tx["vin"]), 3)) + assert tester.testmempoolaccept([signed_tx["hex"]])[0]["allowed"]) +) + # We want to choose more value than is available in 2 inputs when considering the fee,) + # but not enough to need 3 inputs when not considering the fee.) + # So the target value must be at least 2.00000001 - fee.) + lower_bound = Decimal("2.00000001") - fees) + # The target value must be at most 2 - cost_of_change - not_input_fees - min_change (these are all) + # included in the target before ApproximateBestSubset).) + upper_bound = Decimal("2.0") - cost_of_change - overhead_fees - Decimal("0.01")) + assert_greater_than_or_equal(upper_bound, lower_bound)) + do_fund_send(lower_bound)) + do_fund_send(upper_bound)) +) + self.restart_node(0)) + self.connect_nodes(0, 1)) + self.connect_nodes(0, 2)) + self.connect_nodes(0, 3)) +) + def test_feerate_rounding(self):) + self.log.info("Test that rounding of GetFee does not result in an assertion")) +) + self.nodes[1].createwallet("roundtest")) + w = self.nodes[1].get_wallet_rpc("roundtest")) +) + addr = w.getnewaddress(address_type="bech32")) + self.nodes[0].sendtoaddress(addr, 1)) + self.generate(self.nodes[0], 1)) +) + # A P2WPKH input costs 68 vbytes; With a single P2WPKH output, the rest of the tx is 42 vbytes for a total of 110 vbytes.) + # At a feerate of 1.85 sat/vb, the input will need a fee of 125.8 sats and the rest 77.7 sats) + # The entire tx fee should be 203.5 sats.) + # Coin selection rounds the fee individually instead of at the end (due to how CFeeRate::GetFee works).) + # If rounding down (which is the incorrect behavior), then the calculated fee will be 125 + 77 = 202.) + # If rounding up, then the calculated fee will be 126 + 78 = 204.) + # In the former case, the calculated needed fee is higher than the actual fee being paid, so an assertion is reached) + # To test this does not happen, we subtract 202 sats from the input value. If working correctly, this should) + # fail with insufficient funds rather than bitcoind asserting.) + rawtx = w.createrawtransaction(inputs=[], outputs=[{self.nodes[0].getnewaddress(address_type="bech32"): 1 - 0.00000202}])) + assert_raises_rpc_error(-4, "Insufficient funds", w.fundrawtransaction, rawtx, fee_rate=1.85)) +) + def test_input_confs_control(self):) + self.nodes[0].createwallet("minconf")) + wallet = self.nodes[0].get_wallet_rpc("minconf")) +) + # Fund the wallet with different chain heights) + for _ in range(2):) + self.nodes[2].sendmany("", {wallet.getnewaddress():1, wallet.getnewaddress():1})) + self.generate(self.nodes[2], 1)) +) + unconfirmed_txid = wallet.sendtoaddress(wallet.getnewaddress(), 0.5)) +) + self.log.info("Crafting TX using an unconfirmed input")) + target_address = self.nodes[2].getnewaddress()) + raw_tx1 = wallet.createrawtransaction([], {target_address: 0.1}, 0, True)) + funded_tx1 = wallet.fundrawtransaction(raw_tx1, {'fee_rate': 1, 'maxconf': 0})['hex']) +) + # Make sure we only had the one input) + tx1_inputs = self.nodes[0].decoderawtransaction(funded_tx1)['vin']) + assert_equal(len(tx1_inputs), 1)) +) + utxo1 = tx1_inputs[0]) + assert unconfirmed_txid == utxo1['txid']) +) + final_tx1 = wallet.signrawtransactionwithwallet(funded_tx1)['hex']) + txid1 = self.nodes[0].sendrawtransaction(final_tx1)) +) + mempool = self.nodes[0].getrawmempool()) + assert txid1 in mempool) +) + self.log.info("Fail to craft a new TX with minconf above highest one")) + # Create a replacement tx to 'final_tx1' that has 1 BTC target instead of 0.1.) + raw_tx2 = wallet.createrawtransaction([{'txid': utxo1['txid'], 'vout': utxo1['vout']}], {target_address: 1})) + assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, raw_tx2, {'add_inputs': True, 'minconf': 3, 'fee_rate': 10})) +) + self.log.info("Fail to broadcast a new TX with maxconf 0 due to BIP125 rules to verify it actually chose unconfirmed outputs")) + # Now fund 'raw_tx2' to fulfill the total target (1 BTC) by using all the wallet unconfirmed outputs.) + # As it was created with the first unconfirmed output, 'raw_tx2' only has 0.1 BTC covered (need to fund 0.9 BTC more).) + # So, the selection process, to cover the amount, will pick up the 'final_tx1' output as well, which is an output of the tx that this) + # new tx is replacing!. So, once we send it to the mempool, it will return a "bad-txns-spends-conflicting-tx") + # because the input will no longer exist once the first tx gets replaced by this new one).) + funded_invalid = wallet.fundrawtransaction(raw_tx2, {'add_inputs': True, 'maxconf': 0, 'fee_rate': 10})['hex']) + final_invalid = wallet.signrawtransactionwithwallet(funded_invalid)['hex']) + assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, final_invalid)) +) + self.log.info("Craft a replacement adding inputs with highest depth possible")) + funded_tx2 = wallet.fundrawtransaction(raw_tx2, {'add_inputs': True, 'minconf': 2, 'fee_rate': 10})['hex']) + tx2_inputs = self.nodes[0].decoderawtransaction(funded_tx2)['vin']) + assert_greater_than_or_equal(len(tx2_inputs), 2)) + for vin in tx2_inputs:) + if vin['txid'],unconfirmed_txid:) + assert_greater_than_or_equal(self.nodes[0].gettxout(vin['txid'], vin['vout'])['confirmations'], 2)) +) + final_tx2 = wallet.signrawtransactionwithwallet(funded_tx2)['hex']) + txid2 = self.nodes[0].sendrawtransaction(final_tx2)) +) + mempool = self.nodes[0].getrawmempool()) + assert txid1 not in mempool) + assert txid2 in mempool) +) + wallet.unloadwallet()) +) +if __name__ == '__main__':) + RawTransactionsTest(__file__).main()) diff --git a/test/functional/wallet_gethdkeys.py b/test/functional/wallet_gethdkeys.py index eec039589058d7..ba03435bd27ac4 100755 --- a/test/functional/wallet_gethdkeys.py +++ b/test/functional/wallet_gethdkeys.py @@ -1,186 +1,186 @@ -#!/usr/bin/env python3 -# Copyright (c) 2023 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test wallet gethdkeys RPC.""" - -from test_framework.descriptors import descsum_create -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, - assert_not_equal, -) -from test_framework.wallet_util import WalletUnlock - - -class WalletGetHDKeyTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser, descriptors=True, legacy=False) - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 1 - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def run_test(self): - self.test_basic_gethdkeys() - self.test_ranged_imports() - self.test_lone_key_imports() - self.test_ranged_multisig() - self.test_mixed_multisig() - - def test_basic_gethdkeys(self): - self.log.info("Test gethdkeys basics") - self.nodes[0].createwallet("basic") - wallet = self.nodes[0].get_wallet_rpc("basic") - xpub_info = wallet.gethdkeys() - assert_equal(len(xpub_info), 1) - assert_equal(xpub_info[0]["has_private"], True) - - assert "xprv" not in xpub_info[0] - xpub = xpub_info[0]["xpub"] - - xpub_info = wallet.gethdkeys(private=True) - xprv = xpub_info[0]["xprv"] - assert_equal(xpub_info[0]["xpub"], xpub) - assert_equal(xpub_info[0]["has_private"], True) - - descs = wallet.listdescriptors(True) - for desc in descs["descriptors"]: - assert xprv in desc["desc"] - - self.log.info("HD pubkey can be retrieved from encrypted wallets") - prev_xprv = xprv - wallet.encryptwallet("pass") - # HD key is rotated on encryption, there should now be 2 HD keys - assert_equal(len(wallet.gethdkeys()), 2) - # New key is active, should be able to get only that one and its descriptors - xpub_info = wallet.gethdkeys(active_only=True) - assert_equal(len(xpub_info), 1) - assert xpub_info[0]["xpub"] != xpub - assert "xprv" not in xpub_info[0] - assert_equal(xpub_info[0]["has_private"], True) - - self.log.info("HD privkey can be retrieved from encrypted wallets") - assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first", wallet.gethdkeys, private=True) - with WalletUnlock(wallet, "pass"): - xpub_info = wallet.gethdkeys(active_only=True, private=True)[0] - assert xpub_info["xprv"] != xprv - for desc in wallet.listdescriptors(True)["descriptors"]: - if desc["active"]: - # After encrypting, HD key was rotated and should appear in all active descriptors - assert xpub_info["xprv"] in desc["desc"] - else: - # Inactive descriptors should have the previous HD key - assert prev_xprv in desc["desc"] - - def test_ranged_imports(self): - self.log.info("Keys of imported ranged descriptors appear in gethdkeys") - def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - self.nodes[0].createwallet("imports") - wallet = self.nodes[0].get_wallet_rpc("imports") - - xpub_info = wallet.gethdkeys() - assert_equal(len(xpub_info), 1) - active_xpub = xpub_info[0]["xpub"] - - import_xpub = def_wallet.gethdkeys(active_only=True)[0]["xpub"] - desc_import = def_wallet.listdescriptors(True)["descriptors"] - for desc in desc_import: - desc["active"] = False - wallet.importdescriptors(desc_import) - assert_equal(wallet.gethdkeys(active_only=True), xpub_info) - - xpub_info = wallet.gethdkeys() - assert_equal(len(xpub_info), 2) - for x in xpub_info: - if x["xpub"] == active_xpub: - for desc in x["descriptors"]: - assert_equal(desc["active"], True) - elif x["xpub"] == import_xpub: - for desc in x["descriptors"]: - assert_equal(desc["active"], False) - else: - assert False - - - def test_lone_key_imports(self): - self.log.info("Non-HD keys do not appear in gethdkeys") - self.nodes[0].createwallet("lonekey", blank=True) - wallet = self.nodes[0].get_wallet_rpc("lonekey") - - assert_equal(wallet.gethdkeys(), []) - wallet.importdescriptors([{"desc": descsum_create("wpkh(cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh)"), "timestamp": "now"}]) - assert_equal(wallet.gethdkeys(), []) - - self.log.info("HD keys of non-ranged descriptors should appear in gethdkeys") - def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - xpub_info = def_wallet.gethdkeys(private=True) - xpub = xpub_info[0]["xpub"] - xprv = xpub_info[0]["xprv"] - prv_desc = descsum_create(f"wpkh({xprv})") - pub_desc = descsum_create(f"wpkh({xpub})") - assert_equal(wallet.importdescriptors([{"desc": prv_desc, "timestamp": "now"}])[0]["success"], True) - xpub_info = wallet.gethdkeys() - assert_equal(len(xpub_info), 1) - assert_equal(xpub_info[0]["xpub"], xpub) - assert_equal(len(xpub_info[0]["descriptors"]), 1) - assert_equal(xpub_info[0]["descriptors"][0]["desc"], pub_desc) - assert_equal(xpub_info[0]["descriptors"][0]["active"], False) - - def test_ranged_multisig(self): - self.log.info("HD keys of a multisig appear in gethdkeys") - def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - self.nodes[0].createwallet("ranged_multisig") - wallet = self.nodes[0].get_wallet_rpc("ranged_multisig") - - xpub1 = wallet.gethdkeys()[0]["xpub"] - xprv1 = wallet.gethdkeys(private=True)[0]["xprv"] - xpub2 = def_wallet.gethdkeys()[0]["xpub"] - - prv_multi_desc = descsum_create(f"wsh(multi(2,{xprv1}/*,{xpub2}/*))") - pub_multi_desc = descsum_create(f"wsh(multi(2,{xpub1}/*,{xpub2}/*))") - assert_equal(wallet.importdescriptors([{"desc": prv_multi_desc, "timestamp": "now"}])[0]["success"], True) - - xpub_info = wallet.gethdkeys() - assert_equal(len(xpub_info), 2) - for x in xpub_info: - if x["xpub"] == xpub1: - found_desc = next((d for d in xpub_info[0]["descriptors"] if d["desc"] == pub_multi_desc), None) - assert found_desc is not None - assert_equal(found_desc["active"], False) - elif x["xpub"] == xpub2: - assert_equal(len(x["descriptors"]), 1) - assert_equal(x["descriptors"][0]["desc"], pub_multi_desc) - assert_equal(x["descriptors"][0]["active"], False) - else: - assert False - - def test_mixed_multisig(self): - self.log.info("Non-HD keys of a multisig do not appear in gethdkeys") - def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - self.nodes[0].createwallet("single_multisig") - wallet = self.nodes[0].get_wallet_rpc("single_multisig") - - xpub = wallet.gethdkeys()[0]["xpub"] - xprv = wallet.gethdkeys(private=True)[0]["xprv"] - pub = def_wallet.getaddressinfo(def_wallet.getnewaddress())["pubkey"] - - prv_multi_desc = descsum_create(f"wsh(multi(2,{xprv},{pub}))") - pub_multi_desc = descsum_create(f"wsh(multi(2,{xpub},{pub}))") - import_res = wallet.importdescriptors([{"desc": prv_multi_desc, "timestamp": "now"}]) - assert_equal(import_res[0]["success"], True) - - xpub_info = wallet.gethdkeys() - assert_equal(len(xpub_info), 1) - assert_equal(xpub_info[0]["xpub"], xpub) - found_desc = next((d for d in xpub_info[0]["descriptors"] if d["desc"] == pub_multi_desc), None) - assert found_desc is not None - assert_equal(found_desc["active"], False) - - -if __name__ == '__main__': - WalletGetHDKeyTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2023 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test wallet gethdkeys RPC.""") +) +from test_framework.descriptors import descsum_create) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_equal,) + assert_raises_rpc_error,) + assert_not_equal,) +)) +from test_framework.wallet_util import WalletUnlock) +) +) +class WalletGetHDKeyTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser, descriptors=True, legacy=False)) +) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 1) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def run_test(self):) + self.test_basic_gethdkeys()) + self.test_ranged_imports()) + self.test_lone_key_imports()) + self.test_ranged_multisig()) + self.test_mixed_multisig()) +) + def test_basic_gethdkeys(self):) + self.log.info("Test gethdkeys basics")) + self.nodes[0].createwallet("basic")) + wallet = self.nodes[0].get_wallet_rpc("basic")) + xpub_info = wallet.gethdkeys()) + assert_equal(len(xpub_info), 1)) + assert_equal(xpub_info[0]["has_private"], True)) +) + assert "xprv" not in xpub_info[0]) + xpub = xpub_info[0]["xpub"]) +) + xpub_info = wallet.gethdkeys(private=True)) + xprv = xpub_info[0]["xprv"]) + assert_equal(xpub_info[0]["xpub"], xpub)) + assert_equal(xpub_info[0]["has_private"], True)) +) + descs = wallet.listdescriptors(True)) + for desc in descs["descriptors"]:) + assert xprv in desc["desc"]) +) + self.log.info("HD pubkey can be retrieved from encrypted wallets")) + prev_xprv = xprv) + wallet.encryptwallet("pass")) + # HD key is rotated on encryption, there should now be 2 HD keys) + assert_equal(len(wallet.gethdkeys()), 2)) + # New key is active, should be able to get only that one and its descriptors) + xpub_info = wallet.gethdkeys(active_only=True)) + assert_equal(len(xpub_info), 1)) + assert_not_equal(xpub_info[0]["xpub"], xpub)) + assert "xprv" not in xpub_info[0]) + assert_equal(xpub_info[0]["has_private"], True)) +) + self.log.info("HD privkey can be retrieved from encrypted wallets")) + assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first", wallet.gethdkeys, private=True)) + with WalletUnlock(wallet, "pass"):) + xpub_info = wallet.gethdkeys(active_only=True, private=True)[0]) + assert_not_equal(xpub_info["xprv"], xprv)) + for desc in wallet.listdescriptors(True)["descriptors"]:) + if desc["active"]:) + # After encrypting, HD key was rotated and should appear in all active descriptors) + assert xpub_info["xprv"] in desc["desc"]) + else:) + # Inactive descriptors should have the previous HD key) + assert prev_xprv in desc["desc"]) +) + def test_ranged_imports(self):) + self.log.info("Keys of imported ranged descriptors appear in gethdkeys")) + def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + self.nodes[0].createwallet("imports")) + wallet = self.nodes[0].get_wallet_rpc("imports")) +) + xpub_info = wallet.gethdkeys()) + assert_equal(len(xpub_info), 1)) + active_xpub = xpub_info[0]["xpub"]) +) + import_xpub = def_wallet.gethdkeys(active_only=True)[0]["xpub"]) + desc_import = def_wallet.listdescriptors(True)["descriptors"]) + for desc in desc_import:) + desc["active"] = False) + wallet.importdescriptors(desc_import)) + assert_equal(wallet.gethdkeys(active_only=True), xpub_info)) +) + xpub_info = wallet.gethdkeys()) + assert_equal(len(xpub_info), 2)) + for x in xpub_info:) + if x["xpub"] == active_xpub:) + for desc in x["descriptors"]:) + assert_equal(desc["active"], True)) + elif x["xpub"] == import_xpub:) + for desc in x["descriptors"]:) + assert_equal(desc["active"], False)) + else:) + assert False) +) +) + def test_lone_key_imports(self):) + self.log.info("Non-HD keys do not appear in gethdkeys")) + self.nodes[0].createwallet("lonekey", blank=True)) + wallet = self.nodes[0].get_wallet_rpc("lonekey")) +) + assert_equal(wallet.gethdkeys(), [])) + wallet.importdescriptors([{"desc": descsum_create("wpkh(cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh)"), "timestamp": "now"}])) + assert_equal(wallet.gethdkeys(), [])) +) + self.log.info("HD keys of non-ranged descriptors should appear in gethdkeys")) + def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + xpub_info = def_wallet.gethdkeys(private=True)) + xpub = xpub_info[0]["xpub"]) + xprv = xpub_info[0]["xprv"]) + prv_desc = descsum_create(f"wpkh({xprv})")) + pub_desc = descsum_create(f"wpkh({xpub})")) + assert_equal(wallet.importdescriptors([{"desc": prv_desc, "timestamp": "now"}])[0]["success"], True)) + xpub_info = wallet.gethdkeys()) + assert_equal(len(xpub_info), 1)) + assert_equal(xpub_info[0]["xpub"], xpub)) + assert_equal(len(xpub_info[0]["descriptors"]), 1)) + assert_equal(xpub_info[0]["descriptors"][0]["desc"], pub_desc)) + assert_equal(xpub_info[0]["descriptors"][0]["active"], False)) +) + def test_ranged_multisig(self):) + self.log.info("HD keys of a multisig appear in gethdkeys")) + def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + self.nodes[0].createwallet("ranged_multisig")) + wallet = self.nodes[0].get_wallet_rpc("ranged_multisig")) +) + xpub1 = wallet.gethdkeys()[0]["xpub"]) + xprv1 = wallet.gethdkeys(private=True)[0]["xprv"]) + xpub2 = def_wallet.gethdkeys()[0]["xpub"]) +) + prv_multi_desc = descsum_create(f"wsh(multi(2,{xprv1}/*,{xpub2}/*))")) + pub_multi_desc = descsum_create(f"wsh(multi(2,{xpub1}/*,{xpub2}/*))")) + assert_equal(wallet.importdescriptors([{"desc": prv_multi_desc, "timestamp": "now"}])[0]["success"], True)) +) + xpub_info = wallet.gethdkeys()) + assert_equal(len(xpub_info), 2)) + for x in xpub_info:) + if x["xpub"] == xpub1:) + found_desc = next((d for d in xpub_info[0]["descriptors"] if d["desc"] == pub_multi_desc), None)) + assert found_desc is not None) + assert_equal(found_desc["active"], False)) + elif x["xpub"] == xpub2:) + assert_equal(len(x["descriptors"]), 1)) + assert_equal(x["descriptors"][0]["desc"], pub_multi_desc)) + assert_equal(x["descriptors"][0]["active"], False)) + else:) + assert False) +) + def test_mixed_multisig(self):) + self.log.info("Non-HD keys of a multisig do not appear in gethdkeys")) + def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + self.nodes[0].createwallet("single_multisig")) + wallet = self.nodes[0].get_wallet_rpc("single_multisig")) +) + xpub = wallet.gethdkeys()[0]["xpub"]) + xprv = wallet.gethdkeys(private=True)[0]["xprv"]) + pub = def_wallet.getaddressinfo(def_wallet.getnewaddress())["pubkey"]) +) + prv_multi_desc = descsum_create(f"wsh(multi(2,{xprv},{pub}))")) + pub_multi_desc = descsum_create(f"wsh(multi(2,{xpub},{pub}))")) + import_res = wallet.importdescriptors([{"desc": prv_multi_desc, "timestamp": "now"}])) + assert_equal(import_res[0]["success"], True)) +) + xpub_info = wallet.gethdkeys()) + assert_equal(len(xpub_info), 1)) + assert_equal(xpub_info[0]["xpub"], xpub)) + found_desc = next((d for d in xpub_info[0]["descriptors"] if d["desc"] == pub_multi_desc), None)) + assert found_desc is not None) + assert_equal(found_desc["active"], False)) +) +) +if __name__ == '__main__':) + WalletGetHDKeyTest(__file__).main()) diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py index 602a2f52c9ca53..ef3f34301bd638 100755 --- a/test/functional/wallet_hd.py +++ b/test/functional/wallet_hd.py @@ -1,284 +1,284 @@ -#!/usr/bin/env python3 -# Copyright (c) 2016-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test Hierarchical Deterministic wallet function.""" - -import shutil - -from test_framework.blocktools import COINBASE_MATURITY -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_raises_rpc_error, -) - - -class WalletHDTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.setup_clean_chain = True - self.num_nodes = 2 - self.extra_args = [[], ['-keypool=0']] - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - - self.supports_cli = False - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def run_test(self): - # Make sure we use hd, keep masterkeyid - hd_fingerprint = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdmasterfingerprint'] - assert_equal(len(hd_fingerprint), 8) - - # create an internal key - change_addr = self.nodes[1].getrawchangeaddress() - change_addrV = self.nodes[1].getaddressinfo(change_addr) - if self.options.descriptors: - assert_equal(change_addrV["hdkeypath"], "m/84h/1h/0h/1/0") - else: - assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key - - # Import a non-HD private key in the HD wallet - non_hd_add = 'bcrt1qmevj8zfx0wdvp05cqwkmr6mxkfx60yezwjksmt' - non_hd_key = 'cS9umN9w6cDMuRVYdbkfE4c7YUFLJRoXMfhQ569uY4odiQbVN8Rt' - self.nodes[1].importprivkey(non_hd_key) - - # This should be enough to keep the master key and the non-HD key - self.nodes[1].backupwallet(self.nodes[1].datadir_path / "hd.bak") - #self.nodes[1].dumpwallet(self.nodes[1].datadir_path / "hd.dump") - - # Derive some HD addresses and remember the last - # Also send funds to each add - self.generate(self.nodes[0], COINBASE_MATURITY + 1) - hd_add = None - NUM_HD_ADDS = 10 - for i in range(1, NUM_HD_ADDS + 1): - hd_add = self.nodes[1].getnewaddress() - hd_info = self.nodes[1].getaddressinfo(hd_add) - if self.options.descriptors: - assert_equal(hd_info["hdkeypath"], "m/84h/1h/0h/0/" + str(i)) - else: - assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i) + "'") - assert_equal(hd_info["hdmasterfingerprint"], hd_fingerprint) - self.nodes[0].sendtoaddress(hd_add, 1) - self.generate(self.nodes[0], 1) - self.nodes[0].sendtoaddress(non_hd_add, 1) - self.generate(self.nodes[0], 1) - - # create an internal key (again) - change_addr = self.nodes[1].getrawchangeaddress() - change_addrV = self.nodes[1].getaddressinfo(change_addr) - if self.options.descriptors: - assert_equal(change_addrV["hdkeypath"], "m/84h/1h/0h/1/1") - else: - assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key - - self.sync_all() - assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) - - self.log.info("Restore backup ...") - self.stop_node(1) - # we need to delete the complete chain directory - # otherwise node1 would auto-recover all funds in flag the keypool keys as used - shutil.rmtree(self.nodes[1].blocks_path) - shutil.rmtree(self.nodes[1].chain_path / "chainstate") - shutil.copyfile( - self.nodes[1].datadir_path / "hd.bak", - self.nodes[1].wallets_path / self.default_wallet_name / self.wallet_data_filename - ) - self.start_node(1) - - # Assert that derivation is deterministic - hd_add_2 = None - for i in range(1, NUM_HD_ADDS + 1): - hd_add_2 = self.nodes[1].getnewaddress() - hd_info_2 = self.nodes[1].getaddressinfo(hd_add_2) - if self.options.descriptors: - assert_equal(hd_info_2["hdkeypath"], "m/84h/1h/0h/0/" + str(i)) - else: - assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(i) + "'") - assert_equal(hd_info_2["hdmasterfingerprint"], hd_fingerprint) - assert_equal(hd_add, hd_add_2) - self.connect_nodes(0, 1) - self.sync_all() - - # Needs rescan - self.nodes[1].rescanblockchain() - assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) - - # Try a RPC based rescan - self.stop_node(1) - shutil.rmtree(self.nodes[1].blocks_path) - shutil.rmtree(self.nodes[1].chain_path / "chainstate") - shutil.copyfile( - self.nodes[1].datadir_path / "hd.bak", - self.nodes[1].wallets_path / self.default_wallet_name / self.wallet_data_filename - ) - self.start_node(1, extra_args=self.extra_args[1]) - self.connect_nodes(0, 1) - self.sync_all() - # Wallet automatically scans blocks older than key on startup - assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) - out = self.nodes[1].rescanblockchain(0, 1) - assert_equal(out['start_height'], 0) - assert_equal(out['stop_height'], 1) - out = self.nodes[1].rescanblockchain() - assert_equal(out['start_height'], 0) - assert_equal(out['stop_height'], self.nodes[1].getblockcount()) - assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1) - - # send a tx and make sure its using the internal chain for the changeoutput - txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) - outs = self.nodes[1].gettransaction(txid=txid, verbose=True)['decoded']['vout'] - keypath = "" - for out in outs: - if out['value'] != 1: - keypath = self.nodes[1].getaddressinfo(out['scriptPubKey']['address'])['hdkeypath'] - - if self.options.descriptors: - assert_equal(keypath[0:14], "m/84h/1h/0h/1/") - else: - assert_equal(keypath[0:7], "m/0'/1'") - - if not self.options.descriptors: - # Generate a new HD seed on node 1 and make sure it is set - orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] - self.nodes[1].sethdseed() - new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] - assert orig_masterkeyid != new_masterkeyid - addr = self.nodes[1].getnewaddress() - # Make sure the new address is the first from the keypool - assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/0\'') - self.nodes[1].keypoolrefill(1) # Fill keypool with 1 key - - # Set a new HD seed on node 1 without flushing the keypool - new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress()) - orig_masterkeyid = new_masterkeyid - self.nodes[1].sethdseed(False, new_seed) - new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid'] - assert orig_masterkeyid != new_masterkeyid - addr = self.nodes[1].getnewaddress() - assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo(addr)['hdseedid']) - # Make sure the new address continues previous keypool - assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/1\'') - - # Check that the next address is from the new seed - self.nodes[1].keypoolrefill(1) - next_addr = self.nodes[1].getnewaddress() - assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo(next_addr)['hdseedid']) - # Make sure the new address is not from previous keypool - assert_equal(self.nodes[1].getaddressinfo(next_addr)['hdkeypath'], 'm/0\'/0\'/0\'') - assert next_addr != addr - - # Sethdseed parameter validity - assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0) - assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif") - assert_raises_rpc_error(-3, "JSON value of type string is not of expected type bool", self.nodes[1].sethdseed, "Not_bool") - assert_raises_rpc_error(-3, "JSON value of type bool is not of expected type string", self.nodes[1].sethdseed, False, True) - assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed) - assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress())) - - self.log.info('Test sethdseed restoring with keys outside of the initial keypool') - self.generate(self.nodes[0], 10) - # Restart node 1 with keypool of 3 and a different wallet - self.nodes[1].createwallet(wallet_name='origin', blank=True) - self.restart_node(1, extra_args=['-keypool=3', '-wallet=origin']) - self.connect_nodes(0, 1) - - # sethdseed restoring and seeing txs to addresses out of the keypool - origin_rpc = self.nodes[1].get_wallet_rpc('origin') - seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress()) - origin_rpc.sethdseed(True, seed) - - self.nodes[1].createwallet(wallet_name='restore', blank=True) - restore_rpc = self.nodes[1].get_wallet_rpc('restore') - restore_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc - restore_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive - - self.nodes[1].createwallet(wallet_name='restore2', blank=True) - restore2_rpc = self.nodes[1].get_wallet_rpc('restore2') - restore2_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc - restore2_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive - - # Check persistence of inactive seed by reloading restore. restore2 is still loaded to test the case where the wallet is not reloaded - restore_rpc.unloadwallet() - self.nodes[1].loadwallet('restore') - restore_rpc = self.nodes[1].get_wallet_rpc('restore') - - # Empty origin keypool and get an address that is beyond the initial keypool - origin_rpc.getnewaddress() - origin_rpc.getnewaddress() - last_addr = origin_rpc.getnewaddress() # Last address of initial keypool - addr = origin_rpc.getnewaddress() # First address beyond initial keypool - - # Check that the restored seed has last_addr but does not have addr - info = restore_rpc.getaddressinfo(last_addr) - assert_equal(info['ismine'], True) - info = restore_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], False) - info = restore2_rpc.getaddressinfo(last_addr) - assert_equal(info['ismine'], True) - info = restore2_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], False) - # Check that the origin seed has addr - info = origin_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], True) - - # Send a transaction to addr, which is out of the initial keypool. - # The wallet that has set a new seed (restore_rpc) should not detect this transaction. - txid = self.nodes[0].sendtoaddress(addr, 1) - origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex']) - self.generate(self.nodes[0], 1) - origin_rpc.gettransaction(txid) - assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, txid) - out_of_kp_txid = txid - - # Send a transaction to last_addr, which is in the initial keypool. - # The wallet that has set a new seed (restore_rpc) should detect this transaction and generate 3 new keys from the initial seed. - # The previous transaction (out_of_kp_txid) should still not be detected as a rescan is required. - txid = self.nodes[0].sendtoaddress(last_addr, 1) - origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex']) - self.generate(self.nodes[0], 1) - origin_rpc.gettransaction(txid) - restore_rpc.gettransaction(txid) - assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, out_of_kp_txid) - restore2_rpc.gettransaction(txid) - assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore2_rpc.gettransaction, out_of_kp_txid) - - # After rescanning, restore_rpc should now see out_of_kp_txid and generate an additional key. - # addr should now be part of restore_rpc and be ismine - restore_rpc.rescanblockchain() - restore_rpc.gettransaction(out_of_kp_txid) - info = restore_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], True) - restore2_rpc.rescanblockchain() - restore2_rpc.gettransaction(out_of_kp_txid) - info = restore2_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], True) - - # Check again that 3 keys were derived. - # Empty keypool and get an address that is beyond the initial keypool - origin_rpc.getnewaddress() - origin_rpc.getnewaddress() - last_addr = origin_rpc.getnewaddress() - addr = origin_rpc.getnewaddress() - - # Check that the restored seed has last_addr but does not have addr - info = restore_rpc.getaddressinfo(last_addr) - assert_equal(info['ismine'], True) - info = restore_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], False) - info = restore2_rpc.getaddressinfo(last_addr) - assert_equal(info['ismine'], True) - info = restore2_rpc.getaddressinfo(addr) - assert_equal(info['ismine'], False) - - -if __name__ == '__main__': - WalletHDTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2016-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test Hierarchical Deterministic wallet function.""") +) +import shutil) +) +from test_framework.blocktools import COINBASE_MATURITY) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_raises_rpc_error,) +)) +) +) +class WalletHDTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.setup_clean_chain = True) + self.num_nodes = 2) + self.extra_args = [[], ['-keypool=0']]) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) +) + self.supports_cli = False) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def run_test(self):) + # Make sure we use hd, keep masterkeyid) + hd_fingerprint = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdmasterfingerprint']) + assert_equal(len(hd_fingerprint), 8)) +) + # create an internal key) + change_addr = self.nodes[1].getrawchangeaddress()) + change_addrV = self.nodes[1].getaddressinfo(change_addr)) + if self.options.descriptors:) + assert_equal(change_addrV["hdkeypath"], "m/84h/1h/0h/1/0")) + else:) + assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key) +) + # Import a non-HD private key in the HD wallet) + non_hd_add = 'bcrt1qmevj8zfx0wdvp05cqwkmr6mxkfx60yezwjksmt') + non_hd_key = 'cS9umN9w6cDMuRVYdbkfE4c7YUFLJRoXMfhQ569uY4odiQbVN8Rt') + self.nodes[1].importprivkey(non_hd_key)) +) + # This should be enough to keep the master key and the non-HD key) + self.nodes[1].backupwallet(self.nodes[1].datadir_path / "hd.bak")) + #self.nodes[1].dumpwallet(self.nodes[1].datadir_path / "hd.dump")) +) + # Derive some HD addresses and remember the last) + # Also send funds to each add) + self.generate(self.nodes[0], COINBASE_MATURITY + 1)) + hd_add = None) + NUM_HD_ADDS = 10) + for i in range(1, NUM_HD_ADDS + 1):) + hd_add = self.nodes[1].getnewaddress()) + hd_info = self.nodes[1].getaddressinfo(hd_add)) + if self.options.descriptors:) + assert_equal(hd_info["hdkeypath"], "m/84h/1h/0h/0/" + str(i))) + else:) + assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i) + "'")) + assert_equal(hd_info["hdmasterfingerprint"], hd_fingerprint)) + self.nodes[0].sendtoaddress(hd_add, 1)) + self.generate(self.nodes[0], 1)) + self.nodes[0].sendtoaddress(non_hd_add, 1)) + self.generate(self.nodes[0], 1)) +) + # create an internal key (again)) + change_addr = self.nodes[1].getrawchangeaddress()) + change_addrV = self.nodes[1].getaddressinfo(change_addr)) + if self.options.descriptors:) + assert_equal(change_addrV["hdkeypath"], "m/84h/1h/0h/1/1")) + else:) + assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key) +) + self.sync_all()) + assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)) +) + self.log.info("Restore backup ...")) + self.stop_node(1)) + # we need to delete the complete chain directory) + # otherwise node1 would auto-recover all funds in flag the keypool keys as used) + shutil.rmtree(self.nodes[1].blocks_path)) + shutil.rmtree(self.nodes[1].chain_path / "chainstate")) + shutil.copyfile() + self.nodes[1].datadir_path / "hd.bak",) + self.nodes[1].wallets_path / self.default_wallet_name / self.wallet_data_filename) + )) + self.start_node(1)) +) + # Assert that derivation is deterministic) + hd_add_2 = None) + for i in range(1, NUM_HD_ADDS + 1):) + hd_add_2 = self.nodes[1].getnewaddress()) + hd_info_2 = self.nodes[1].getaddressinfo(hd_add_2)) + if self.options.descriptors:) + assert_equal(hd_info_2["hdkeypath"], "m/84h/1h/0h/0/" + str(i))) + else:) + assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(i) + "'")) + assert_equal(hd_info_2["hdmasterfingerprint"], hd_fingerprint)) + assert_equal(hd_add, hd_add_2)) + self.connect_nodes(0, 1)) + self.sync_all()) +) + # Needs rescan) + self.nodes[1].rescanblockchain()) + assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)) +) + # Try a RPC based rescan) + self.stop_node(1)) + shutil.rmtree(self.nodes[1].blocks_path)) + shutil.rmtree(self.nodes[1].chain_path / "chainstate")) + shutil.copyfile() + self.nodes[1].datadir_path / "hd.bak",) + self.nodes[1].wallets_path / self.default_wallet_name / self.wallet_data_filename) + )) + self.start_node(1, extra_args=self.extra_args[1])) + self.connect_nodes(0, 1)) + self.sync_all()) + # Wallet automatically scans blocks older than key on startup) + assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)) + out = self.nodes[1].rescanblockchain(0, 1)) + assert_equal(out['start_height'], 0)) + assert_equal(out['stop_height'], 1)) + out = self.nodes[1].rescanblockchain()) + assert_equal(out['start_height'], 0)) + assert_equal(out['stop_height'], self.nodes[1].getblockcount())) + assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)) +) + # send a tx and make sure its using the internal chain for the changeoutput) + txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)) + outs = self.nodes[1].gettransaction(txid=txid, verbose=True)['decoded']['vout']) + keypath = "") + for out in outs:) + if out['value'],1:) + keypath = self.nodes[1].getaddressinfo(out['scriptPubKey']['address'])['hdkeypath']) +) + if self.options.descriptors:) + assert_equal(keypath[0:14], "m/84h/1h/0h/1/")) + else:) + assert_equal(keypath[0:7], "m/0'/1'")) +) + if not self.options.descriptors:) + # Generate a new HD seed on node 1 and make sure it is set) + orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']) + self.nodes[1].sethdseed()) + new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']) + assert_not_equal(orig_masterkeyid, new_masterkeyid)) + addr = self.nodes[1].getnewaddress()) + # Make sure the new address is the first from the keypool) + assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/0\'')) + self.nodes[1].keypoolrefill(1) # Fill keypool with 1 key) +) + # Set a new HD seed on node 1 without flushing the keypool) + new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())) + orig_masterkeyid = new_masterkeyid) + self.nodes[1].sethdseed(False, new_seed)) + new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']) + assert_not_equal(orig_masterkeyid, new_masterkeyid)) + addr = self.nodes[1].getnewaddress()) + assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo(addr)['hdseedid'])) + # Make sure the new address continues previous keypool) + assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/1\'')) +) + # Check that the next address is from the new seed) + self.nodes[1].keypoolrefill(1)) + next_addr = self.nodes[1].getnewaddress()) + assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo(next_addr)['hdseedid'])) + # Make sure the new address is not from previous keypool) + assert_equal(self.nodes[1].getaddressinfo(next_addr)['hdkeypath'], 'm/0\'/0\'/0\'')) + assert_not_equal(next_addr, addr)) +) + # Sethdseed parameter validity) + assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0)) + assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif")) + assert_raises_rpc_error(-3, "JSON value of type string is not of expected type bool", self.nodes[1].sethdseed, "Not_bool")) + assert_raises_rpc_error(-3, "JSON value of type bool is not of expected type string", self.nodes[1].sethdseed, False, True)) + assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed)) + assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress()))) +) + self.log.info('Test sethdseed restoring with keys outside of the initial keypool')) + self.generate(self.nodes[0], 10)) + # Restart node 1 with keypool of 3 and a different wallet) + self.nodes[1].createwallet(wallet_name='origin', blank=True)) + self.restart_node(1, extra_args=['-keypool=3', '-wallet=origin'])) + self.connect_nodes(0, 1)) +) + # sethdseed restoring and seeing txs to addresses out of the keypool) + origin_rpc = self.nodes[1].get_wallet_rpc('origin')) + seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())) + origin_rpc.sethdseed(True, seed)) +) + self.nodes[1].createwallet(wallet_name='restore', blank=True)) + restore_rpc = self.nodes[1].get_wallet_rpc('restore')) + restore_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc) + restore_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive) +) + self.nodes[1].createwallet(wallet_name='restore2', blank=True)) + restore2_rpc = self.nodes[1].get_wallet_rpc('restore2')) + restore2_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc) + restore2_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive) +) + # Check persistence of inactive seed by reloading restore. restore2 is still loaded to test the case where the wallet is not reloaded) + restore_rpc.unloadwallet()) + self.nodes[1].loadwallet('restore')) + restore_rpc = self.nodes[1].get_wallet_rpc('restore')) +) + # Empty origin keypool and get an address that is beyond the initial keypool) + origin_rpc.getnewaddress()) + origin_rpc.getnewaddress()) + last_addr = origin_rpc.getnewaddress() # Last address of initial keypool) + addr = origin_rpc.getnewaddress() # First address beyond initial keypool) +) + # Check that the restored seed has last_addr but does not have addr) + info = restore_rpc.getaddressinfo(last_addr)) + assert_equal(info['ismine'], True)) + info = restore_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], False)) + info = restore2_rpc.getaddressinfo(last_addr)) + assert_equal(info['ismine'], True)) + info = restore2_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], False)) + # Check that the origin seed has addr) + info = origin_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], True)) +) + # Send a transaction to addr, which is out of the initial keypool.) + # The wallet that has set a new seed (restore_rpc) should not detect this transaction.) + txid = self.nodes[0].sendtoaddress(addr, 1)) + origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex'])) + self.generate(self.nodes[0], 1)) + origin_rpc.gettransaction(txid)) + assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, txid)) + out_of_kp_txid = txid) +) + # Send a transaction to last_addr, which is in the initial keypool.) + # The wallet that has set a new seed (restore_rpc) should detect this transaction and generate 3 new keys from the initial seed.) + # The previous transaction (out_of_kp_txid) should still not be detected as a rescan is required.) + txid = self.nodes[0].sendtoaddress(last_addr, 1)) + origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex'])) + self.generate(self.nodes[0], 1)) + origin_rpc.gettransaction(txid)) + restore_rpc.gettransaction(txid)) + assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, out_of_kp_txid)) + restore2_rpc.gettransaction(txid)) + assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore2_rpc.gettransaction, out_of_kp_txid)) +) + # After rescanning, restore_rpc should now see out_of_kp_txid and generate an additional key.) + # addr should now be part of restore_rpc and be ismine) + restore_rpc.rescanblockchain()) + restore_rpc.gettransaction(out_of_kp_txid)) + info = restore_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], True)) + restore2_rpc.rescanblockchain()) + restore2_rpc.gettransaction(out_of_kp_txid)) + info = restore2_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], True)) +) + # Check again that 3 keys were derived.) + # Empty keypool and get an address that is beyond the initial keypool) + origin_rpc.getnewaddress()) + origin_rpc.getnewaddress()) + last_addr = origin_rpc.getnewaddress()) + addr = origin_rpc.getnewaddress()) +) + # Check that the restored seed has last_addr but does not have addr) + info = restore_rpc.getaddressinfo(last_addr)) + assert_equal(info['ismine'], True)) + info = restore_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], False)) + info = restore2_rpc.getaddressinfo(last_addr)) + assert_equal(info['ismine'], True)) + info = restore2_rpc.getaddressinfo(addr)) + assert_equal(info['ismine'], False)) +) +) +if __name__ == '__main__':) + WalletHDTest(__file__).main()) diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py index a969ec2709634b..2bdddbdc077cc6 100755 --- a/test/functional/wallet_keypool.py +++ b/test/functional/wallet_keypool.py @@ -1,230 +1,230 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet keypool and interaction with wallet encryption/locking.""" - -import time -from decimal import Decimal - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_not_equal, - assert_raises_rpc_error, -) -from test_framework.wallet_util import WalletUnlock - -class KeyPoolTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 1 - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def run_test(self): - nodes = self.nodes - addr_before_encrypting = nodes[0].getnewaddress() - addr_before_encrypting_data = nodes[0].getaddressinfo(addr_before_encrypting) - wallet_info_old = nodes[0].getwalletinfo() - if not self.options.descriptors: - assert addr_before_encrypting_data['hdseedid'] == wallet_info_old['hdseedid'] - - # Encrypt wallet and wait to terminate - nodes[0].encryptwallet('test') - if self.options.descriptors: - # Import hardened derivation only descriptors - nodes[0].walletpassphrase('test', 10) - nodes[0].importdescriptors([ - { - "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n", - "timestamp": "now", - "range": [0,0], - "active": True - }, - { - "desc": "pkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1h/*h)#a0nyvl0k", - "timestamp": "now", - "range": [0,0], - "active": True - }, - { - "desc": "sh(wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/2h/*h))#lmeu2axg", - "timestamp": "now", - "range": [0,0], - "active": True - }, - { - "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/3h/*h)#jkl636gm", - "timestamp": "now", - "range": [0,0], - "active": True, - "internal": True - }, - { - "desc": "pkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/4h/*h)#l3crwaus", - "timestamp": "now", - "range": [0,0], - "active": True, - "internal": True - }, - { - "desc": "sh(wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/5h/*h))#qg8wa75f", - "timestamp": "now", - "range": [0,0], - "active": True, - "internal": True - } - ]) - nodes[0].walletlock() - # Keep creating keys - addr = nodes[0].getnewaddress() - addr_data = nodes[0].getaddressinfo(addr) - wallet_info = nodes[0].getwalletinfo() - assert addr_before_encrypting_data['hdmasterfingerprint'] != addr_data['hdmasterfingerprint'] - if not self.options.descriptors: - assert addr_data['hdseedid'] == wallet_info['hdseedid'] - assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) - - # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min) - with WalletUnlock(nodes[0], 'test'): - nodes[0].keypoolrefill(6) - wi = nodes[0].getwalletinfo() - if self.options.descriptors: - assert_equal(wi['keypoolsize_hd_internal'], 24) - assert_equal(wi['keypoolsize'], 24) - else: - assert_equal(wi['keypoolsize_hd_internal'], 6) - assert_equal(wi['keypoolsize'], 6) - - # drain the internal keys - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - nodes[0].getrawchangeaddress() - # remember keypool sizes - wi = nodes[0].getwalletinfo() - kp_size_before = [wi['keypoolsize_hd_internal'], wi['keypoolsize']] - # the next one should fail - assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress) - # check that keypool sizes did not change - wi = nodes[0].getwalletinfo() - kp_size_after = [wi['keypoolsize_hd_internal'], wi['keypoolsize']] - assert_equal(kp_size_before, kp_size_after) - - # drain the external keys - addr = set() - addr.add(nodes[0].getnewaddress(address_type="bech32")) - addr.add(nodes[0].getnewaddress(address_type="bech32")) - addr.add(nodes[0].getnewaddress(address_type="bech32")) - addr.add(nodes[0].getnewaddress(address_type="bech32")) - addr.add(nodes[0].getnewaddress(address_type="bech32")) - addr.add(nodes[0].getnewaddress(address_type="bech32")) - assert len(addr) == 6 - # remember keypool sizes - wi = nodes[0].getwalletinfo() - kp_size_before = [wi['keypoolsize_hd_internal'], wi['keypoolsize']] - # the next one should fail - assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) - # check that keypool sizes did not change - wi = nodes[0].getwalletinfo() - kp_size_after = [wi['keypoolsize_hd_internal'], wi['keypoolsize']] - assert_equal(kp_size_before, kp_size_after) - - # refill keypool with three new addresses - nodes[0].walletpassphrase('test', 1) - nodes[0].keypoolrefill(3) - - # test walletpassphrase timeout - time.sleep(1.1) - assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0) - - # drain the keypool - for _ in range(3): - nodes[0].getnewaddress() - assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getnewaddress) - - with WalletUnlock(nodes[0], 'test'): - nodes[0].keypoolrefill(100) - wi = nodes[0].getwalletinfo() - if self.options.descriptors: - assert_equal(wi['keypoolsize_hd_internal'], 400) - assert_equal(wi['keypoolsize'], 400) - else: - assert_equal(wi['keypoolsize_hd_internal'], 100) - assert_equal(wi['keypoolsize'], 100) - - if not self.options.descriptors: - # Check that newkeypool entirely flushes the keypool - start_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath'] - start_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath'] - # flush keypool and get new addresses - nodes[0].newkeypool() - end_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath'] - end_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath'] - # The new keypath index should be 100 more than the old one - new_index = int(start_keypath.rsplit('/', 1)[1][:-1]) + 100 - new_change_index = int(start_change_keypath.rsplit('/', 1)[1][:-1]) + 100 - assert_equal(end_keypath, "m/0'/0'/" + str(new_index) + "'") - assert_equal(end_change_keypath, "m/0'/1'/" + str(new_change_index) + "'") - - # create a blank wallet - nodes[0].createwallet(wallet_name='w2', blank=True, disable_private_keys=True) - w2 = nodes[0].get_wallet_rpc('w2') - - # refer to initial wallet as w1 - w1 = nodes[0].get_wallet_rpc(self.default_wallet_name) - - # import private key and fund it - address = addr.pop() - desc = w1.getaddressinfo(address)['desc'] - if self.options.descriptors: - res = w2.importdescriptors([{'desc': desc, 'timestamp': 'now'}]) - else: - res = w2.importmulti([{'desc': desc, 'timestamp': 'now'}]) - assert_equal(res[0]['success'], True) - - with WalletUnlock(w1, 'test'): - res = w1.sendtoaddress(address=address, amount=0.00010000) - self.generate(nodes[0], 1) - destination = addr.pop() - - # Using a fee rate (10 sat / byte) well above the minimum relay rate - # creating a 5,000 sat transaction with change should not be possible - assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", w2.walletcreatefundedpsbt, inputs=[], outputs=[{addr.pop(): 0.00005000}], subtractFeeFromOutputs=[0], feeRate=0.00010) - - # creating a 10,000 sat transaction without change, with a manual input, should still be possible - res = w2.walletcreatefundedpsbt(inputs=w2.listunspent(), outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.00010) - assert_equal("psbt" in res, True) - - # creating a 10,000 sat transaction without change should still be possible - res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.00010) - assert_equal("psbt" in res, True) - # should work without subtractFeeFromOutputs if the exact fee is subtracted from the amount - res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00008900}], feeRate=0.00010) - assert_equal("psbt" in res, True) - - # dust change should be removed - res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00008800}], feeRate=0.00010) - assert_equal("psbt" in res, True) - - # create a transaction without change at the maximum fee rate, such that the output is still spendable: - res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.0008823) - assert_equal("psbt" in res, True) - assert_equal(res["fee"], Decimal("0.00009706")) - - # creating a 10,000 sat transaction with a manual change address should be possible - res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.00010, changeAddress=addr.pop()) - assert_equal("psbt" in res, True) - - if not self.options.descriptors: - msg = "Error: Private keys are disabled for this wallet" - assert_raises_rpc_error(-4, msg, w2.keypoolrefill, 100) - -if __name__ == '__main__': - KeyPoolTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2014-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the wallet keypool and interaction with wallet encryption/locking.""") +) +import time) +from decimal import Decimal) +) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_equal,) + assert_not_equal,) + assert_raises_rpc_error,) +)) +from test_framework.wallet_util import WalletUnlock) +) +class KeyPoolTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 1) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def run_test(self):) + nodes = self.nodes) + addr_before_encrypting = nodes[0].getnewaddress()) + addr_before_encrypting_data = nodes[0].getaddressinfo(addr_before_encrypting)) + wallet_info_old = nodes[0].getwalletinfo()) + if not self.options.descriptors:) + assert addr_before_encrypting_data['hdseedid'] == wallet_info_old['hdseedid']) +) + # Encrypt wallet and wait to terminate) + nodes[0].encryptwallet('test')) + if self.options.descriptors:) + # Import hardened derivation only descriptors) + nodes[0].walletpassphrase('test', 10)) + nodes[0].importdescriptors([) + {) + "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n",) + "timestamp": "now",) + "range": [0,0],) + "active": True) + },) + {) + "desc": "pkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1h/*h)#a0nyvl0k",) + "timestamp": "now",) + "range": [0,0],) + "active": True) + },) + {) + "desc": "sh(wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/2h/*h))#lmeu2axg",) + "timestamp": "now",) + "range": [0,0],) + "active": True) + },) + {) + "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/3h/*h)#jkl636gm",) + "timestamp": "now",) + "range": [0,0],) + "active": True,) + "internal": True) + },) + {) + "desc": "pkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/4h/*h)#l3crwaus",) + "timestamp": "now",) + "range": [0,0],) + "active": True,) + "internal": True) + },) + {) + "desc": "sh(wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/5h/*h))#qg8wa75f",) + "timestamp": "now",) + "range": [0,0],) + "active": True,) + "internal": True) + }) + ])) + nodes[0].walletlock()) + # Keep creating keys) + addr = nodes[0].getnewaddress()) + addr_data = nodes[0].getaddressinfo(addr)) + wallet_info = nodes[0].getwalletinfo()) + assert_not_equal(addr_before_encrypting_data['hdmasterfingerprint'], addr_data['hdmasterfingerprint'])) + if not self.options.descriptors:) + assert addr_data['hdseedid'] == wallet_info['hdseedid']) + assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)) +) + # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)) + with WalletUnlock(nodes[0], 'test'):) + nodes[0].keypoolrefill(6)) + wi = nodes[0].getwalletinfo()) + if self.options.descriptors:) + assert_equal(wi['keypoolsize_hd_internal'], 24)) + assert_equal(wi['keypoolsize'], 24)) + else:) + assert_equal(wi['keypoolsize_hd_internal'], 6)) + assert_equal(wi['keypoolsize'], 6)) +) + # drain the internal keys) + nodes[0].getrawchangeaddress()) + nodes[0].getrawchangeaddress()) + nodes[0].getrawchangeaddress()) + nodes[0].getrawchangeaddress()) + nodes[0].getrawchangeaddress()) + nodes[0].getrawchangeaddress()) + # remember keypool sizes) + wi = nodes[0].getwalletinfo()) + kp_size_before = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]) + # the next one should fail) + assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)) + # check that keypool sizes did not change) + wi = nodes[0].getwalletinfo()) + kp_size_after = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]) + assert_equal(kp_size_before, kp_size_after)) +) + # drain the external keys) + addr = set()) + addr.add(nodes[0].getnewaddress(address_type="bech32"))) + addr.add(nodes[0].getnewaddress(address_type="bech32"))) + addr.add(nodes[0].getnewaddress(address_type="bech32"))) + addr.add(nodes[0].getnewaddress(address_type="bech32"))) + addr.add(nodes[0].getnewaddress(address_type="bech32"))) + addr.add(nodes[0].getnewaddress(address_type="bech32"))) + assert len(addr) == 6) + # remember keypool sizes) + wi = nodes[0].getwalletinfo()) + kp_size_before = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]) + # the next one should fail) + assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)) + # check that keypool sizes did not change) + wi = nodes[0].getwalletinfo()) + kp_size_after = [wi['keypoolsize_hd_internal'], wi['keypoolsize']]) + assert_equal(kp_size_before, kp_size_after)) +) + # refill keypool with three new addresses) + nodes[0].walletpassphrase('test', 1)) + nodes[0].keypoolrefill(3)) +) + # test walletpassphrase timeout) + time.sleep(1.1)) + assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)) +) + # drain the keypool) + for _ in range(3):) + nodes[0].getnewaddress()) + assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getnewaddress)) +) + with WalletUnlock(nodes[0], 'test'):) + nodes[0].keypoolrefill(100)) + wi = nodes[0].getwalletinfo()) + if self.options.descriptors:) + assert_equal(wi['keypoolsize_hd_internal'], 400)) + assert_equal(wi['keypoolsize'], 400)) + else:) + assert_equal(wi['keypoolsize_hd_internal'], 100)) + assert_equal(wi['keypoolsize'], 100)) +) + if not self.options.descriptors:) + # Check that newkeypool entirely flushes the keypool) + start_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath']) + start_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath']) + # flush keypool and get new addresses) + nodes[0].newkeypool()) + end_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath']) + end_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath']) + # The new keypath index should be 100 more than the old one) + new_index = int(start_keypath.rsplit('/', 1)[1][:-1]) + 100) + new_change_index = int(start_change_keypath.rsplit('/', 1)[1][:-1]) + 100) + assert_equal(end_keypath, "m/0'/0'/" + str(new_index) + "'")) + assert_equal(end_change_keypath, "m/0'/1'/" + str(new_change_index) + "'")) +) + # create a blank wallet) + nodes[0].createwallet(wallet_name='w2', blank=True, disable_private_keys=True)) + w2 = nodes[0].get_wallet_rpc('w2')) +) + # refer to initial wallet as w1) + w1 = nodes[0].get_wallet_rpc(self.default_wallet_name)) +) + # import private key and fund it) + address = addr.pop()) + desc = w1.getaddressinfo(address)['desc']) + if self.options.descriptors:) + res = w2.importdescriptors([{'desc': desc, 'timestamp': 'now'}])) + else:) + res = w2.importmulti([{'desc': desc, 'timestamp': 'now'}])) + assert_equal(res[0]['success'], True)) +) + with WalletUnlock(w1, 'test'):) + res = w1.sendtoaddress(address=address, amount=0.00010000)) + self.generate(nodes[0], 1)) + destination = addr.pop()) +) + # Using a fee rate (10 sat / byte) well above the minimum relay rate) + # creating a 5,000 sat transaction with change should not be possible) + assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", w2.walletcreatefundedpsbt, inputs=[], outputs=[{addr.pop(): 0.00005000}], subtractFeeFromOutputs=[0], feeRate=0.00010)) +) + # creating a 10,000 sat transaction without change, with a manual input, should still be possible) + res = w2.walletcreatefundedpsbt(inputs=w2.listunspent(), outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.00010)) + assert_equal("psbt" in res, True)) +) + # creating a 10,000 sat transaction without change should still be possible) + res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.00010)) + assert_equal("psbt" in res, True)) + # should work without subtractFeeFromOutputs if the exact fee is subtracted from the amount) + res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00008900}], feeRate=0.00010)) + assert_equal("psbt" in res, True)) +) + # dust change should be removed) + res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00008800}], feeRate=0.00010)) + assert_equal("psbt" in res, True)) +) + # create a transaction without change at the maximum fee rate, such that the output is still spendable:) + res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.0008823)) + assert_equal("psbt" in res, True)) + assert_equal(res["fee"], Decimal("0.00009706"))) +) + # creating a 10,000 sat transaction with a manual change address should be possible) + res = w2.walletcreatefundedpsbt(inputs=[], outputs=[{destination: 0.00010000}], subtractFeeFromOutputs=[0], feeRate=0.00010, changeAddress=addr.pop())) + assert_equal("psbt" in res, True)) +) + if not self.options.descriptors:) + msg = "Error: Private keys are disabled for this wallet") + assert_raises_rpc_error(-4, msg, w2.keypoolrefill, 100)) +) +if __name__ == '__main__':) + KeyPoolTest(__file__).main()) diff --git a/test/functional/wallet_listdescriptors.py b/test/functional/wallet_listdescriptors.py index b4512c31118f23..80bb38d2b3bd75 100755 --- a/test/functional/wallet_listdescriptors.py +++ b/test/functional/wallet_listdescriptors.py @@ -1,140 +1,140 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the listdescriptors RPC.""" - -from test_framework.blocktools import ( - TIME_GENESIS_BLOCK, -) -from test_framework.descriptors import ( - descsum_create, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_raises_rpc_error, -) - - -class ListDescriptorsTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser, legacy=False) - - def set_test_params(self): - self.num_nodes = 1 - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - self.skip_if_no_sqlite() - - # do not create any wallet by default - def init_wallet(self, *, node): - return - - def run_test(self): - node = self.nodes[0] - assert_raises_rpc_error(-18, 'No wallet is loaded.', node.listdescriptors) - - if self.is_bdb_compiled(): - self.log.info('Test that the command is not available for legacy wallets.') - node.createwallet(wallet_name='w1', descriptors=False) - assert_raises_rpc_error(-4, 'listdescriptors is not available for non-descriptor wallets', node.listdescriptors) - - self.log.info('Test the command for empty descriptors wallet.') - node.createwallet(wallet_name='w2', blank=True, descriptors=True) - assert_equal(0, len(node.get_wallet_rpc('w2').listdescriptors()['descriptors'])) - - self.log.info('Test the command for a default descriptors wallet.') - node.createwallet(wallet_name='w3', descriptors=True) - result = node.get_wallet_rpc('w3').listdescriptors() - assert_equal("w3", result['wallet_name']) - assert_equal(8, len(result['descriptors'])) - assert_equal(8, len([d for d in result['descriptors'] if d['active']])) - assert_equal(4, len([d for d in result['descriptors'] if d['internal']])) - for item in result['descriptors']: - assert item['desc'] != '' - assert item['next_index'] == 0 - assert item['range'] == [0, 0] - assert item['timestamp'] is not None - - self.log.info('Test that descriptor strings are returned in lexicographically sorted order.') - descriptor_strings = [descriptor['desc'] for descriptor in result['descriptors']] - assert_equal(descriptor_strings, sorted(descriptor_strings)) - - self.log.info('Test descriptors with hardened derivations are listed in importable form.') - xprv = 'tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg' - xpub_acc = 'tpubDCMVLhErorrAGfApiJSJzEKwqeaf2z3NrkVMxgYQjZLzMjXMBeRw2muGNYbvaekAE8rUFLftyEar4LdrG2wXyyTJQZ26zptmeTEjPTaATts' - hardened_path = '/84h/1h/0h' - wallet = node.get_wallet_rpc('w2') - wallet.importdescriptors([{ - 'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'), - 'timestamp': TIME_GENESIS_BLOCK, - }]) - expected = { - 'wallet_name': 'w2', - 'descriptors': [ - {'desc': descsum_create('wpkh([80002067' + hardened_path + ']' + xpub_acc + '/0/*)'), - 'timestamp': TIME_GENESIS_BLOCK, - 'active': False, - 'range': [0, 0], - 'next': 0, - 'next_index': 0}, - ], - } - assert_equal(expected, wallet.listdescriptors()) - assert_equal(expected, wallet.listdescriptors(False)) - - self.log.info('Test list private descriptors') - expected_private = { - 'wallet_name': 'w2', - 'descriptors': [ - {'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'), - 'timestamp': TIME_GENESIS_BLOCK, - 'active': False, - 'range': [0, 0], - 'next': 0, - 'next_index': 0}, - ], - } - assert_equal(expected_private, wallet.listdescriptors(True)) - - self.log.info("Test listdescriptors with encrypted wallet") - wallet.encryptwallet("pass") - assert_equal(expected, wallet.listdescriptors()) - - self.log.info('Test list private descriptors with encrypted wallet') - assert_raises_rpc_error(-13, 'Please enter the wallet passphrase with walletpassphrase first.', wallet.listdescriptors, True) - wallet.walletpassphrase(passphrase="pass", timeout=1000000) - assert_equal(expected_private, wallet.listdescriptors(True)) - - self.log.info('Test list private descriptors with watch-only wallet') - node.createwallet(wallet_name='watch-only', descriptors=True, disable_private_keys=True) - watch_only_wallet = node.get_wallet_rpc('watch-only') - watch_only_wallet.importdescriptors([{ - 'desc': descsum_create('wpkh(' + xpub_acc + ')'), - 'timestamp': TIME_GENESIS_BLOCK, - }]) - assert_raises_rpc_error(-4, 'Can\'t get descriptor string', watch_only_wallet.listdescriptors, True) - - self.log.info('Test non-active non-range combo descriptor') - node.createwallet(wallet_name='w4', blank=True, descriptors=True) - wallet = node.get_wallet_rpc('w4') - wallet.importdescriptors([{ - 'desc': descsum_create('combo(' + node.get_deterministic_priv_key().key + ')'), - 'timestamp': TIME_GENESIS_BLOCK, - }]) - expected = { - 'wallet_name': 'w4', - 'descriptors': [ - {'active': False, - 'desc': 'combo(0227d85ba011276cf25b51df6a188b75e604b38770a462b2d0e9fb2fc839ef5d3f)#np574htj', - 'timestamp': TIME_GENESIS_BLOCK}, - ] - } - assert_equal(expected, wallet.listdescriptors()) - - -if __name__ == '__main__': - ListDescriptorsTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2014-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the listdescriptors RPC.""") +) +from test_framework.blocktools import () + TIME_GENESIS_BLOCK,) +)) +from test_framework.descriptors import () + descsum_create,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_raises_rpc_error,) +)) +) +) +class ListDescriptorsTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser, legacy=False)) +) + def set_test_params(self):) + self.num_nodes = 1) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) + self.skip_if_no_sqlite()) +) + # do not create any wallet by default) + def init_wallet(self, *, node):) + return) +) + def run_test(self):) + node = self.nodes[0]) + assert_raises_rpc_error(-18, 'No wallet is loaded.', node.listdescriptors)) +) + if self.is_bdb_compiled():) + self.log.info('Test that the command is not available for legacy wallets.')) + node.createwallet(wallet_name='w1', descriptors=False)) + assert_raises_rpc_error(-4, 'listdescriptors is not available for non-descriptor wallets', node.listdescriptors)) +) + self.log.info('Test the command for empty descriptors wallet.')) + node.createwallet(wallet_name='w2', blank=True, descriptors=True)) + assert_equal(0, len(node.get_wallet_rpc('w2').listdescriptors()['descriptors']))) +) + self.log.info('Test the command for a default descriptors wallet.')) + node.createwallet(wallet_name='w3', descriptors=True)) + result = node.get_wallet_rpc('w3').listdescriptors()) + assert_equal("w3", result['wallet_name'])) + assert_equal(8, len(result['descriptors']))) + assert_equal(8, len([d for d in result['descriptors'] if d['active']]))) + assert_equal(4, len([d for d in result['descriptors'] if d['internal']]))) + for item in result['descriptors']:) + assert_not_equal(item['desc'], '')) + assert item['next_index'] == 0) + assert item['range'] == [0, 0]) + assert item['timestamp'] is not None) +) + self.log.info('Test that descriptor strings are returned in lexicographically sorted order.')) + descriptor_strings = [descriptor['desc'] for descriptor in result['descriptors']]) + assert_equal(descriptor_strings, sorted(descriptor_strings))) +) + self.log.info('Test descriptors with hardened derivations are listed in importable form.')) + xprv = 'tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg') + xpub_acc = 'tpubDCMVLhErorrAGfApiJSJzEKwqeaf2z3NrkVMxgYQjZLzMjXMBeRw2muGNYbvaekAE8rUFLftyEar4LdrG2wXyyTJQZ26zptmeTEjPTaATts') + hardened_path = '/84h/1h/0h') + wallet = node.get_wallet_rpc('w2')) + wallet.importdescriptors([{) + 'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'),) + 'timestamp': TIME_GENESIS_BLOCK,) + }])) + expected = {) + 'wallet_name': 'w2',) + 'descriptors': [) + {'desc': descsum_create('wpkh([80002067' + hardened_path + ']' + xpub_acc + '/0/*)'),) + 'timestamp': TIME_GENESIS_BLOCK,) + 'active': False,) + 'range': [0, 0],) + 'next': 0,) + 'next_index': 0},) + ],) + }) + assert_equal(expected, wallet.listdescriptors())) + assert_equal(expected, wallet.listdescriptors(False))) +) + self.log.info('Test list private descriptors')) + expected_private = {) + 'wallet_name': 'w2',) + 'descriptors': [) + {'desc': descsum_create('wpkh(' + xprv + hardened_path + '/0/*)'),) + 'timestamp': TIME_GENESIS_BLOCK,) + 'active': False,) + 'range': [0, 0],) + 'next': 0,) + 'next_index': 0},) + ],) + }) + assert_equal(expected_private, wallet.listdescriptors(True))) +) + self.log.info("Test listdescriptors with encrypted wallet")) + wallet.encryptwallet("pass")) + assert_equal(expected, wallet.listdescriptors())) +) + self.log.info('Test list private descriptors with encrypted wallet')) + assert_raises_rpc_error(-13, 'Please enter the wallet passphrase with walletpassphrase first.', wallet.listdescriptors, True)) + wallet.walletpassphrase(passphrase="pass", timeout=1000000)) + assert_equal(expected_private, wallet.listdescriptors(True))) +) + self.log.info('Test list private descriptors with watch-only wallet')) + node.createwallet(wallet_name='watch-only', descriptors=True, disable_private_keys=True)) + watch_only_wallet = node.get_wallet_rpc('watch-only')) + watch_only_wallet.importdescriptors([{) + 'desc': descsum_create('wpkh(' + xpub_acc + ')'),) + 'timestamp': TIME_GENESIS_BLOCK,) + }])) + assert_raises_rpc_error(-4, 'Can\'t get descriptor string', watch_only_wallet.listdescriptors, True)) +) + self.log.info('Test non-active non-range combo descriptor')) + node.createwallet(wallet_name='w4', blank=True, descriptors=True)) + wallet = node.get_wallet_rpc('w4')) + wallet.importdescriptors([{) + 'desc': descsum_create('combo(' + node.get_deterministic_priv_key().key + ')'),) + 'timestamp': TIME_GENESIS_BLOCK,) + }])) + expected = {) + 'wallet_name': 'w4',) + 'descriptors': [) + {'active': False,) + 'desc': 'combo(0227d85ba011276cf25b51df6a188b75e604b38770a462b2d0e9fb2fc839ef5d3f)#np574htj',) + 'timestamp': TIME_GENESIS_BLOCK},) + ]) + }) + assert_equal(expected, wallet.listdescriptors())) +) +) +if __name__ == '__main__':) + ListDescriptorsTest(__file__).main()) diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index b1874b7bd72ef7..10de49e8c0ed57 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -1,334 +1,334 @@ -#!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the listtransactions API.""" - -from decimal import Decimal -import os -import shutil - -from test_framework.messages import ( - COIN, - tx_from_hex, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_array_result, - assert_equal, - assert_raises_rpc_error, -) - - -class ListTransactionsTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 3 - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - self.extra_args = [["-walletrbf=0"]] * self.num_nodes - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def run_test(self): - self.log.info("Test simple send from node0 to node1") - txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) - self.sync_all() - assert_array_result(self.nodes[0].listtransactions(), - {"txid": txid}, - {"category": "send", "amount": Decimal("-0.1"), "confirmations": 0, "trusted": True}) - assert_array_result(self.nodes[1].listtransactions(), - {"txid": txid}, - {"category": "receive", "amount": Decimal("0.1"), "confirmations": 0, "trusted": False}) - self.log.info("Test confirmations change after mining a block") - blockhash = self.generate(self.nodes[0], 1)[0] - blockheight = self.nodes[0].getblockheader(blockhash)['height'] - assert_array_result(self.nodes[0].listtransactions(), - {"txid": txid}, - {"category": "send", "amount": Decimal("-0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight}) - assert_array_result(self.nodes[1].listtransactions(), - {"txid": txid}, - {"category": "receive", "amount": Decimal("0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight}) - - self.log.info("Test send-to-self on node0") - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) - assert_array_result(self.nodes[0].listtransactions(), - {"txid": txid, "category": "send"}, - {"amount": Decimal("-0.2")}) - assert_array_result(self.nodes[0].listtransactions(), - {"txid": txid, "category": "receive"}, - {"amount": Decimal("0.2")}) - - self.log.info("Test sendmany from node1: twice to self, twice to node0") - send_to = {self.nodes[0].getnewaddress(): 0.11, - self.nodes[1].getnewaddress(): 0.22, - self.nodes[0].getnewaddress(): 0.33, - self.nodes[1].getnewaddress(): 0.44} - txid = self.nodes[1].sendmany("", send_to) - self.sync_all() - assert_array_result(self.nodes[1].listtransactions(), - {"category": "send", "amount": Decimal("-0.11")}, - {"txid": txid}) - assert_array_result(self.nodes[0].listtransactions(), - {"category": "receive", "amount": Decimal("0.11")}, - {"txid": txid}) - assert_array_result(self.nodes[1].listtransactions(), - {"category": "send", "amount": Decimal("-0.22")}, - {"txid": txid}) - assert_array_result(self.nodes[1].listtransactions(), - {"category": "receive", "amount": Decimal("0.22")}, - {"txid": txid}) - assert_array_result(self.nodes[1].listtransactions(), - {"category": "send", "amount": Decimal("-0.33")}, - {"txid": txid}) - assert_array_result(self.nodes[0].listtransactions(), - {"category": "receive", "amount": Decimal("0.33")}, - {"txid": txid}) - assert_array_result(self.nodes[1].listtransactions(), - {"category": "send", "amount": Decimal("-0.44")}, - {"txid": txid}) - assert_array_result(self.nodes[1].listtransactions(), - {"category": "receive", "amount": Decimal("0.44")}, - {"txid": txid}) - - if not self.options.descriptors: - # include_watchonly is a legacy wallet feature, so don't test it for descriptor wallets - self.log.info("Test 'include_watchonly' feature (legacy wallet)") - pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey'] - multisig = self.nodes[1].createmultisig(1, [pubkey]) - self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) - txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) - self.generate(self.nodes[1], 1) - assert_equal(len(self.nodes[0].listtransactions(label="watchonly", include_watchonly=True)), 1) - assert_equal(len(self.nodes[0].listtransactions(dummy="watchonly", include_watchonly=True)), 1) - assert len(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=False)) == 0 - assert_array_result(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=True), - {"category": "receive", "amount": Decimal("0.1")}, - {"txid": txid, "label": "watchonly"}) - - self.run_rbf_opt_in_test() - self.run_externally_generated_address_test() - self.run_coinjoin_test() - self.run_invalid_parameters_test() - self.test_op_return() - - def run_rbf_opt_in_test(self): - """Test the opt-in-rbf flag for sent and received transactions.""" - - def is_opt_in(node, txid): - """Check whether a transaction signals opt-in RBF itself.""" - rawtx = node.getrawtransaction(txid, 1) - for x in rawtx["vin"]: - if x["sequence"] < 0xfffffffe: - return True - return False - - def get_unconfirmed_utxo_entry(node, txid_to_match): - """Find an unconfirmed output matching a certain txid.""" - utxo = node.listunspent(0, 0) - for i in utxo: - if i["txid"] == txid_to_match: - return i - return None - - self.log.info("Test txs w/o opt-in RBF (bip125-replaceable=no)") - # Chain a few transactions that don't opt in. - txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) - assert not is_opt_in(self.nodes[0], txid_1) - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"}) - self.sync_mempools() - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"}) - - # Tx2 will build off tx1, still not opting in to RBF. - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1) - assert_equal(utxo_to_use["safe"], True) - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) - assert_equal(utxo_to_use["safe"], False) - - # Create tx2 using createrawtransaction - inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}] - outputs = {self.nodes[0].getnewaddress(): 0.999} - tx2 = self.nodes[1].createrawtransaction(inputs=inputs, outputs=outputs, replaceable=False) - tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"] - txid_2 = self.nodes[1].sendrawtransaction(tx2_signed) - - # ...and check the result - assert not is_opt_in(self.nodes[1], txid_2) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"}) - self.sync_mempools() - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"}) - - self.log.info("Test txs with opt-in RBF (bip125-replaceable=yes)") - # Tx3 will opt-in to RBF - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) - inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}] - outputs = {self.nodes[1].getnewaddress(): 0.998} - tx3 = self.nodes[0].createrawtransaction(inputs, outputs) - tx3_modified = tx_from_hex(tx3) - tx3_modified.vin[0].nSequence = 0 - tx3 = tx3_modified.serialize().hex() - tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex'] - txid_3 = self.nodes[0].sendrawtransaction(tx3_signed) - - assert is_opt_in(self.nodes[0], txid_3) - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"}) - self.sync_mempools() - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"}) - - # Tx4 will chain off tx3. Doesn't signal itself, but depends on one - # that does. - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3) - inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}] - outputs = {self.nodes[0].getnewaddress(): 0.997} - tx4 = self.nodes[1].createrawtransaction(inputs=inputs, outputs=outputs, replaceable=False) - tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"] - txid_4 = self.nodes[1].sendrawtransaction(tx4_signed) - - assert not is_opt_in(self.nodes[1], txid_4) - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"}) - self.sync_mempools() - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"}) - - self.log.info("Test tx with unknown RBF state (bip125-replaceable=unknown)") - # Replace tx3, and check that tx4 becomes unknown - tx3_b = tx3_modified - tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee - tx3_b = tx3_b.serialize().hex() - tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex'] - txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, 0) - assert is_opt_in(self.nodes[0], txid_3b) - - assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"}) - self.sync_mempools() - assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"}) - - self.log.info("Test bip125-replaceable status with gettransaction RPC") - for n in self.nodes[0:2]: - assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") - assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") - assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes") - assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") - assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") - - self.log.info("Test bip125-replaceable status with listsinceblock") - for n in self.nodes[0:2]: - txs = {tx['txid']: tx['bip125-replaceable'] for tx in n.listsinceblock()['transactions']} - assert_equal(txs[txid_1], "no") - assert_equal(txs[txid_2], "no") - assert_equal(txs[txid_3], "yes") - assert_equal(txs[txid_3b], "yes") - assert_equal(txs[txid_4], "unknown") - - self.log.info("Test mined transactions are no longer bip125-replaceable") - self.generate(self.nodes[0], 1) - assert txid_3b not in self.nodes[0].getrawmempool() - assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") - assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown") - - def run_externally_generated_address_test(self): - """Test behavior when receiving address is not in the address book.""" - - self.log.info("Setup the same wallet on two nodes") - # refill keypool otherwise the second node wouldn't recognize addresses generated on the first nodes - self.nodes[0].keypoolrefill(1000) - self.stop_nodes() - wallet0 = os.path.join(self.nodes[0].chain_path, self.default_wallet_name, "wallet.dat") - wallet2 = os.path.join(self.nodes[2].chain_path, self.default_wallet_name, "wallet.dat") - shutil.copyfile(wallet0, wallet2) - self.start_nodes() - # reconnect nodes - self.connect_nodes(0, 1) - self.connect_nodes(1, 2) - self.connect_nodes(2, 0) - - addr1 = self.nodes[0].getnewaddress("pizza1", 'legacy') - addr2 = self.nodes[0].getnewaddress("pizza2", 'p2sh-segwit') - addr3 = self.nodes[0].getnewaddress("pizza3", 'bech32') - - self.log.info("Send to externally generated addresses") - # send to an address beyond the next to be generated to test the keypool gap - self.nodes[1].sendtoaddress(addr3, "0.001") - self.generate(self.nodes[1], 1) - - # send to an address that is already marked as used due to the keypool gap mechanics - self.nodes[1].sendtoaddress(addr2, "0.001") - self.generate(self.nodes[1], 1) - - # send to self transaction - self.nodes[0].sendtoaddress(addr1, "0.001") - self.generate(self.nodes[0], 1) - - self.log.info("Verify listtransactions is the same regardless of where the address was generated") - transactions0 = self.nodes[0].listtransactions() - transactions2 = self.nodes[2].listtransactions() - - # normalize results: remove fields that normally could differ and sort - def normalize_list(txs): - for tx in txs: - tx.pop('label', None) - tx.pop('time', None) - tx.pop('timereceived', None) - txs.sort(key=lambda x: x['txid']) - - normalize_list(transactions0) - normalize_list(transactions2) - assert_equal(transactions0, transactions2) - - self.log.info("Verify labels are persistent on the node that generated the addresses") - assert_equal(['pizza1'], self.nodes[0].getaddressinfo(addr1)['labels']) - assert_equal(['pizza2'], self.nodes[0].getaddressinfo(addr2)['labels']) - assert_equal(['pizza3'], self.nodes[0].getaddressinfo(addr3)['labels']) - - def run_coinjoin_test(self): - self.log.info('Check "coin-join" transaction') - input_0 = next(i for i in self.nodes[0].listunspent(query_options={"minimumAmount": 0.2}, include_unsafe=False)) - input_1 = next(i for i in self.nodes[1].listunspent(query_options={"minimumAmount": 0.2}, include_unsafe=False)) - raw_hex = self.nodes[0].createrawtransaction( - inputs=[ - { - "txid": input_0["txid"], - "vout": input_0["vout"], - }, - { - "txid": input_1["txid"], - "vout": input_1["vout"], - }, - ], - outputs={ - self.nodes[0].getnewaddress(): 0.123, - self.nodes[1].getnewaddress(): 0.123, - }, - ) - raw_hex = self.nodes[0].signrawtransactionwithwallet(raw_hex)["hex"] - raw_hex = self.nodes[1].signrawtransactionwithwallet(raw_hex)["hex"] - txid_join = self.nodes[0].sendrawtransaction(hexstring=raw_hex, maxfeerate=0) - fee_join = self.nodes[0].getmempoolentry(txid_join)["fees"]["base"] - # Fee should be correct: assert_equal(fee_join, self.nodes[0].gettransaction(txid_join)['fee']) - # But it is not, see for example https://github.com/bitcoin/bitcoin/issues/14136: - assert fee_join != self.nodes[0].gettransaction(txid_join)["fee"] - - def run_invalid_parameters_test(self): - self.log.info("Test listtransactions RPC parameter validity") - assert_raises_rpc_error(-8, 'Label argument must be a valid label name or "*".', self.nodes[0].listtransactions, label="") - self.nodes[0].listtransactions(label="*") - assert_raises_rpc_error(-8, "Negative count", self.nodes[0].listtransactions, count=-1) - assert_raises_rpc_error(-8, "Negative from", self.nodes[0].listtransactions, skip=-1) - - def test_op_return(self): - """Test if OP_RETURN outputs will be displayed correctly.""" - raw_tx = self.nodes[0].createrawtransaction([], [{'data': 'aa'}]) - funded_tx = self.nodes[0].fundrawtransaction(raw_tx) - signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex']) - tx_id = self.nodes[0].sendrawtransaction(signed_tx['hex']) - - op_ret_tx = [tx for tx in self.nodes[0].listtransactions() if tx['txid'] == tx_id][0] - - assert 'address' not in op_ret_tx - - -if __name__ == '__main__': - ListTransactionsTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2014-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the listtransactions API.""") +) +from decimal import Decimal) +import os) +import shutil) +) +from test_framework.messages import () + COIN,) + tx_from_hex,) +)) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_array_result,) + assert_equal,) + assert_raises_rpc_error,) +)) +) +) +class ListTransactionsTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 3) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) + self.extra_args = [["-walletrbf=0"]] * self.num_nodes) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def run_test(self):) + self.log.info("Test simple send from node0 to node1")) + txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)) + self.sync_all()) + assert_array_result(self.nodes[0].listtransactions(),) + {"txid": txid},) + {"category": "send", "amount": Decimal("-0.1"), "confirmations": 0, "trusted": True})) + assert_array_result(self.nodes[1].listtransactions(),) + {"txid": txid},) + {"category": "receive", "amount": Decimal("0.1"), "confirmations": 0, "trusted": False})) + self.log.info("Test confirmations change after mining a block")) + blockhash = self.generate(self.nodes[0], 1)[0]) + blockheight = self.nodes[0].getblockheader(blockhash)['height']) + assert_array_result(self.nodes[0].listtransactions(),) + {"txid": txid},) + {"category": "send", "amount": Decimal("-0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})) + assert_array_result(self.nodes[1].listtransactions(),) + {"txid": txid},) + {"category": "receive", "amount": Decimal("0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})) +) + self.log.info("Test send-to-self on node0")) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)) + assert_array_result(self.nodes[0].listtransactions(),) + {"txid": txid, "category": "send"},) + {"amount": Decimal("-0.2")})) + assert_array_result(self.nodes[0].listtransactions(),) + {"txid": txid, "category": "receive"},) + {"amount": Decimal("0.2")})) +) + self.log.info("Test sendmany from node1: twice to self, twice to node0")) + send_to = {self.nodes[0].getnewaddress(): 0.11,) + self.nodes[1].getnewaddress(): 0.22,) + self.nodes[0].getnewaddress(): 0.33,) + self.nodes[1].getnewaddress(): 0.44}) + txid = self.nodes[1].sendmany("", send_to)) + self.sync_all()) + assert_array_result(self.nodes[1].listtransactions(),) + {"category": "send", "amount": Decimal("-0.11")},) + {"txid": txid})) + assert_array_result(self.nodes[0].listtransactions(),) + {"category": "receive", "amount": Decimal("0.11")},) + {"txid": txid})) + assert_array_result(self.nodes[1].listtransactions(),) + {"category": "send", "amount": Decimal("-0.22")},) + {"txid": txid})) + assert_array_result(self.nodes[1].listtransactions(),) + {"category": "receive", "amount": Decimal("0.22")},) + {"txid": txid})) + assert_array_result(self.nodes[1].listtransactions(),) + {"category": "send", "amount": Decimal("-0.33")},) + {"txid": txid})) + assert_array_result(self.nodes[0].listtransactions(),) + {"category": "receive", "amount": Decimal("0.33")},) + {"txid": txid})) + assert_array_result(self.nodes[1].listtransactions(),) + {"category": "send", "amount": Decimal("-0.44")},) + {"txid": txid})) + assert_array_result(self.nodes[1].listtransactions(),) + {"category": "receive", "amount": Decimal("0.44")},) + {"txid": txid})) +) + if not self.options.descriptors:) + # include_watchonly is a legacy wallet feature, so don't test it for descriptor wallets) + self.log.info("Test 'include_watchonly' feature (legacy wallet)")) + pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']) + multisig = self.nodes[1].createmultisig(1, [pubkey])) + self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)) + txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)) + self.generate(self.nodes[1], 1)) + assert_equal(len(self.nodes[0].listtransactions(label="watchonly", include_watchonly=True)), 1)) + assert_equal(len(self.nodes[0].listtransactions(dummy="watchonly", include_watchonly=True)), 1)) + assert len(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=False)) == 0) + assert_array_result(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=True),) + {"category": "receive", "amount": Decimal("0.1")},) + {"txid": txid, "label": "watchonly"})) +) + self.run_rbf_opt_in_test()) + self.run_externally_generated_address_test()) + self.run_coinjoin_test()) + self.run_invalid_parameters_test()) + self.test_op_return()) +) + def run_rbf_opt_in_test(self):) + """Test the opt-in-rbf flag for sent and received transactions.""") +) + def is_opt_in(node, txid):) + """Check whether a transaction signals opt-in RBF itself.""") + rawtx = node.getrawtransaction(txid, 1)) + for x in rawtx["vin"]:) + if x["sequence"] < 0xfffffffe:) + return True) + return False) +) + def get_unconfirmed_utxo_entry(node, txid_to_match):) + """Find an unconfirmed output matching a certain txid.""") + utxo = node.listunspent(0, 0)) + for i in utxo:) + if i["txid"] == txid_to_match:) + return i) + return None) +) + self.log.info("Test txs w/o opt-in RBF (bip125-replaceable=no)")) + # Chain a few transactions that don't opt in.) + txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)) + assert not is_opt_in(self.nodes[0], txid_1)) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})) + self.sync_mempools()) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})) +) + # Tx2 will build off tx1, still not opting in to RBF.) + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)) + assert_equal(utxo_to_use["safe"], True)) + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)) + assert_equal(utxo_to_use["safe"], False)) +) + # Create tx2 using createrawtransaction) + inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]) + outputs = {self.nodes[0].getnewaddress(): 0.999}) + tx2 = self.nodes[1].createrawtransaction(inputs=inputs, outputs=outputs, replaceable=False)) + tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]) + txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)) +) + # ...and check the result) + assert not is_opt_in(self.nodes[1], txid_2)) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})) + self.sync_mempools()) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})) +) + self.log.info("Test txs with opt-in RBF (bip125-replaceable=yes)")) + # Tx3 will opt-in to RBF) + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)) + inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]) + outputs = {self.nodes[1].getnewaddress(): 0.998}) + tx3 = self.nodes[0].createrawtransaction(inputs, outputs)) + tx3_modified = tx_from_hex(tx3)) + tx3_modified.vin[0].nSequence = 0) + tx3 = tx3_modified.serialize().hex()) + tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']) + txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)) +) + assert is_opt_in(self.nodes[0], txid_3)) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})) + self.sync_mempools()) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})) +) + # Tx4 will chain off tx3. Doesn't signal itself, but depends on one) + # that does.) + utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)) + inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]) + outputs = {self.nodes[0].getnewaddress(): 0.997}) + tx4 = self.nodes[1].createrawtransaction(inputs=inputs, outputs=outputs, replaceable=False)) + tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]) + txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)) +) + assert not is_opt_in(self.nodes[1], txid_4)) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})) + self.sync_mempools()) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})) +) + self.log.info("Test tx with unknown RBF state (bip125-replaceable=unknown)")) + # Replace tx3, and check that tx4 becomes unknown) + tx3_b = tx3_modified) + tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee) + tx3_b = tx3_b.serialize().hex()) + tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']) + txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, 0)) + assert is_opt_in(self.nodes[0], txid_3b)) +) + assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})) + self.sync_mempools()) + assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})) +) + self.log.info("Test bip125-replaceable status with gettransaction RPC")) + for n in self.nodes[0:2]:) + assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")) + assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")) + assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")) + assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")) + assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")) +) + self.log.info("Test bip125-replaceable status with listsinceblock")) + for n in self.nodes[0:2]:) + txs = {tx['txid']: tx['bip125-replaceable'] for tx in n.listsinceblock()['transactions']}) + assert_equal(txs[txid_1], "no")) + assert_equal(txs[txid_2], "no")) + assert_equal(txs[txid_3], "yes")) + assert_equal(txs[txid_3b], "yes")) + assert_equal(txs[txid_4], "unknown")) +) + self.log.info("Test mined transactions are no longer bip125-replaceable")) + self.generate(self.nodes[0], 1)) + assert txid_3b not in self.nodes[0].getrawmempool()) + assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")) + assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")) +) + def run_externally_generated_address_test(self):) + """Test behavior when receiving address is not in the address book.""") +) + self.log.info("Setup the same wallet on two nodes")) + # refill keypool otherwise the second node wouldn't recognize addresses generated on the first nodes) + self.nodes[0].keypoolrefill(1000)) + self.stop_nodes()) + wallet0 = os.path.join(self.nodes[0].chain_path, self.default_wallet_name, "wallet.dat")) + wallet2 = os.path.join(self.nodes[2].chain_path, self.default_wallet_name, "wallet.dat")) + shutil.copyfile(wallet0, wallet2)) + self.start_nodes()) + # reconnect nodes) + self.connect_nodes(0, 1)) + self.connect_nodes(1, 2)) + self.connect_nodes(2, 0)) +) + addr1 = self.nodes[0].getnewaddress("pizza1", 'legacy')) + addr2 = self.nodes[0].getnewaddress("pizza2", 'p2sh-segwit')) + addr3 = self.nodes[0].getnewaddress("pizza3", 'bech32')) +) + self.log.info("Send to externally generated addresses")) + # send to an address beyond the next to be generated to test the keypool gap) + self.nodes[1].sendtoaddress(addr3, "0.001")) + self.generate(self.nodes[1], 1)) +) + # send to an address that is already marked as used due to the keypool gap mechanics) + self.nodes[1].sendtoaddress(addr2, "0.001")) + self.generate(self.nodes[1], 1)) +) + # send to self transaction) + self.nodes[0].sendtoaddress(addr1, "0.001")) + self.generate(self.nodes[0], 1)) +) + self.log.info("Verify listtransactions is the same regardless of where the address was generated")) + transactions0 = self.nodes[0].listtransactions()) + transactions2 = self.nodes[2].listtransactions()) +) + # normalize results: remove fields that normally could differ and sort) + def normalize_list(txs):) + for tx in txs:) + tx.pop('label', None)) + tx.pop('time', None)) + tx.pop('timereceived', None)) + txs.sort(key=lambda x: x['txid'])) +) + normalize_list(transactions0)) + normalize_list(transactions2)) + assert_equal(transactions0, transactions2)) +) + self.log.info("Verify labels are persistent on the node that generated the addresses")) + assert_equal(['pizza1'], self.nodes[0].getaddressinfo(addr1)['labels'])) + assert_equal(['pizza2'], self.nodes[0].getaddressinfo(addr2)['labels'])) + assert_equal(['pizza3'], self.nodes[0].getaddressinfo(addr3)['labels'])) +) + def run_coinjoin_test(self):) + self.log.info('Check "coin-join" transaction')) + input_0 = next(i for i in self.nodes[0].listunspent(query_options={"minimumAmount": 0.2}, include_unsafe=False))) + input_1 = next(i for i in self.nodes[1].listunspent(query_options={"minimumAmount": 0.2}, include_unsafe=False))) + raw_hex = self.nodes[0].createrawtransaction() + inputs=[) + {) + "txid": input_0["txid"],) + "vout": input_0["vout"],) + },) + {) + "txid": input_1["txid"],) + "vout": input_1["vout"],) + },) + ],) + outputs={) + self.nodes[0].getnewaddress(): 0.123,) + self.nodes[1].getnewaddress(): 0.123,) + },) + )) + raw_hex = self.nodes[0].signrawtransactionwithwallet(raw_hex)["hex"]) + raw_hex = self.nodes[1].signrawtransactionwithwallet(raw_hex)["hex"]) + txid_join = self.nodes[0].sendrawtransaction(hexstring=raw_hex, maxfeerate=0)) + fee_join = self.nodes[0].getmempoolentry(txid_join)["fees"]["base"]) + # Fee should be correct: assert_equal(fee_join, self.nodes[0].gettransaction(txid_join)['fee'])) + # But it is not, see for example https://github.com/bitcoin/bitcoin/issues/14136:) + assert_not_equal(fee_join, self.nodes[0].gettransaction(txid_join)["fee"])) +) + def run_invalid_parameters_test(self):) + self.log.info("Test listtransactions RPC parameter validity")) + assert_raises_rpc_error(-8, 'Label argument must be a valid label name or "*".', self.nodes[0].listtransactions, label="")) + self.nodes[0].listtransactions(label="*")) + assert_raises_rpc_error(-8, "Negative count", self.nodes[0].listtransactions, count=-1)) + assert_raises_rpc_error(-8, "Negative from", self.nodes[0].listtransactions, skip=-1)) +) + def test_op_return(self):) + """Test if OP_RETURN outputs will be displayed correctly.""") + raw_tx = self.nodes[0].createrawtransaction([], [{'data': 'aa'}])) + funded_tx = self.nodes[0].fundrawtransaction(raw_tx)) + signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])) + tx_id = self.nodes[0].sendrawtransaction(signed_tx['hex'])) +) + op_ret_tx = [tx for tx in self.nodes[0].listtransactions() if tx['txid'] == tx_id][0]) +) + assert 'address' not in op_ret_tx) +) +) +if __name__ == '__main__':) + ListTransactionsTest(__file__).main()) diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py index f1785e6336c194..193f7ab47501b4 100755 --- a/test/functional/wallet_reorgsrestore.py +++ b/test/functional/wallet_reorgsrestore.py @@ -1,105 +1,105 @@ -#!/usr/bin/env python3 -# Copyright (c) 2019-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -"""Test tx status in case of reorgs while wallet being shutdown. - -Wallet txn status rely on block connection/disconnection for its -accuracy. In case of reorgs happening while wallet being shutdown -block updates are not going to be received. At wallet loading, we -check against chain if confirmed txn are still in chain and change -their status if block in which they have been included has been -disconnected. -""" - -from decimal import Decimal -import shutil - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, -) - -class ReorgsRestoreTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 3 - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def run_test(self): - # Send a tx from which to conflict outputs later - txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) - self.generate(self.nodes[0], 1) - - # Disconnect node1 from others to reorg its chain later - self.disconnect_nodes(0, 1) - self.disconnect_nodes(1, 2) - self.connect_nodes(0, 2) - - # Send a tx to be unconfirmed later - txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) - tx = self.nodes[0].gettransaction(txid) - self.generate(self.nodes[0], 4, sync_fun=self.no_op) - self.sync_blocks([self.nodes[0], self.nodes[2]]) - tx_before_reorg = self.nodes[0].gettransaction(txid) - assert_equal(tx_before_reorg["confirmations"], 4) - - # Disconnect node0 from node2 to broadcast a conflict on their respective chains - self.disconnect_nodes(0, 2) - nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10")) - inputs = [] - inputs.append({"txid": txid_conflict_from, "vout": nA}) - outputs_1 = {} - outputs_2 = {} - - # Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from - outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998") - outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998") - conflicted = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_1)) - conflicting = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_2)) - - conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"]) - self.generate(self.nodes[0], 1, sync_fun=self.no_op) - conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"]) - self.generate(self.nodes[2], 9, sync_fun=self.no_op) - - # Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted - self.connect_nodes(0, 2) - self.sync_blocks([self.nodes[0], self.nodes[2]]) - conflicted = self.nodes[0].gettransaction(conflicted_txid) - conflicting = self.nodes[0].gettransaction(conflicting_txid) - assert_equal(conflicted["confirmations"], -9) - assert_equal(conflicted["walletconflicts"][0], conflicting["txid"]) - - # Node0 wallet is shutdown - self.restart_node(0) - - # The block chain re-orgs and the tx is included in a different block - self.generate(self.nodes[1], 9, sync_fun=self.no_op) - self.nodes[1].sendrawtransaction(tx["hex"]) - self.generate(self.nodes[1], 1, sync_fun=self.no_op) - self.nodes[1].sendrawtransaction(conflicted["hex"]) - self.generate(self.nodes[1], 1, sync_fun=self.no_op) - - # Node0 wallet file is loaded on longest sync'ed node1 - self.stop_node(1) - self.nodes[0].backupwallet(self.nodes[0].datadir_path / 'wallet.bak') - shutil.copyfile(self.nodes[0].datadir_path / 'wallet.bak', self.nodes[1].chain_path / self.default_wallet_name / self.wallet_data_filename) - self.start_node(1) - tx_after_reorg = self.nodes[1].gettransaction(txid) - # Check that normal confirmed tx is confirmed again but with different blockhash - assert_equal(tx_after_reorg["confirmations"], 2) - assert tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"] - conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid) - # Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx - assert_equal(conflicted_after_reorg["confirmations"], 1) - assert conflicting["blockhash"] != conflicted_after_reorg["blockhash"] - -if __name__ == '__main__': - ReorgsRestoreTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2019-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +) +"""Test tx status in case of reorgs while wallet being shutdown.) +) +Wallet txn status rely on block connection/disconnection for its) +accuracy. In case of reorgs happening while wallet being shutdown) +block updates are not going to be received. At wallet loading, we) +check against chain if confirmed txn are still in chain and change) +their status if block in which they have been included has been) +disconnected.) +""") +) +from decimal import Decimal) +import shutil) +) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) +)) +) +class ReorgsRestoreTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 3) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def run_test(self):) + # Send a tx from which to conflict outputs later) + txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))) + self.generate(self.nodes[0], 1)) +) + # Disconnect node1 from others to reorg its chain later) + self.disconnect_nodes(0, 1)) + self.disconnect_nodes(1, 2)) + self.connect_nodes(0, 2)) +) + # Send a tx to be unconfirmed later) + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))) + tx = self.nodes[0].gettransaction(txid)) + self.generate(self.nodes[0], 4, sync_fun=self.no_op)) + self.sync_blocks([self.nodes[0], self.nodes[2]])) + tx_before_reorg = self.nodes[0].gettransaction(txid)) + assert_equal(tx_before_reorg["confirmations"], 4)) +) + # Disconnect node0 from node2 to broadcast a conflict on their respective chains) + self.disconnect_nodes(0, 2)) + nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10"))) + inputs = []) + inputs.append({"txid": txid_conflict_from, "vout": nA})) + outputs_1 = {}) + outputs_2 = {}) +) + # Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from) + outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")) + outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")) + conflicted = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_1))) + conflicting = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_2))) +) + conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])) + self.generate(self.nodes[0], 1, sync_fun=self.no_op)) + conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])) + self.generate(self.nodes[2], 9, sync_fun=self.no_op)) +) + # Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted) + self.connect_nodes(0, 2)) + self.sync_blocks([self.nodes[0], self.nodes[2]])) + conflicted = self.nodes[0].gettransaction(conflicted_txid)) + conflicting = self.nodes[0].gettransaction(conflicting_txid)) + assert_equal(conflicted["confirmations"], -9)) + assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])) +) + # Node0 wallet is shutdown) + self.restart_node(0)) +) + # The block chain re-orgs and the tx is included in a different block) + self.generate(self.nodes[1], 9, sync_fun=self.no_op)) + self.nodes[1].sendrawtransaction(tx["hex"])) + self.generate(self.nodes[1], 1, sync_fun=self.no_op)) + self.nodes[1].sendrawtransaction(conflicted["hex"])) + self.generate(self.nodes[1], 1, sync_fun=self.no_op)) +) + # Node0 wallet file is loaded on longest sync'ed node1) + self.stop_node(1)) + self.nodes[0].backupwallet(self.nodes[0].datadir_path / 'wallet.bak')) + shutil.copyfile(self.nodes[0].datadir_path / 'wallet.bak', self.nodes[1].chain_path / self.default_wallet_name / self.wallet_data_filename)) + self.start_node(1)) + tx_after_reorg = self.nodes[1].gettransaction(txid)) + # Check that normal confirmed tx is confirmed again but with different blockhash) + assert_equal(tx_after_reorg["confirmations"], 2)) + assert_not_equal(tx_before_reorg["blockhash"], tx_after_reorg["blockhash"])) + conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)) + # Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx) + assert_equal(conflicted_after_reorg["confirmations"], 1)) + assert_not_equal(conflicting["blockhash"], conflicted_after_reorg["blockhash"])) +) +if __name__ == '__main__':) + ReorgsRestoreTest(__file__).main()) diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py index 4be6ae9ba1cd0f..43e54358e6bcc0 100755 --- a/test/functional/wallet_send.py +++ b/test/functional/wallet_send.py @@ -1,616 +1,616 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the send RPC command.""" - -from decimal import Decimal, getcontext -from itertools import product - -from test_framework.authproxy import JSONRPCException -from test_framework.descriptors import descsum_create -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_not_equal, - assert_equal, - assert_fee_amount, - assert_greater_than, - assert_raises_rpc_error, - count_bytes, -) -from test_framework.wallet_util import ( - calculate_input_weight, - generate_keypair, -) - - -class WalletSendTest(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - - def set_test_params(self): - self.num_nodes = 2 - # whitelist peers to speed up tx relay / mempool sync - self.noban_tx_relay = True - self.extra_args = [ - ["-walletrbf=1"], - ["-walletrbf=1"] - ] - getcontext().prec = 8 # Satoshi precision for Decimal - - def skip_test_if_missing_module(self): - self.skip_if_no_wallet() - - def test_send(self, from_wallet, to_wallet=None, amount=None, data=None, - arg_conf_target=None, arg_estimate_mode=None, arg_fee_rate=None, - conf_target=None, estimate_mode=None, fee_rate=None, add_to_wallet=None, psbt=None, - inputs=None, add_inputs=None, include_unsafe=None, change_address=None, change_position=None, change_type=None, - include_watching=None, locktime=None, lock_unspents=None, replaceable=None, subtract_fee_from_outputs=None, - expect_error=None, solving_data=None, minconf=None): - assert (amount is None) != (data is None) - - from_balance_before = from_wallet.getbalances()["mine"]["trusted"] - if include_unsafe: - from_balance_before += from_wallet.getbalances()["mine"]["untrusted_pending"] - - if to_wallet is None: - assert amount is None - else: - to_untrusted_pending_before = to_wallet.getbalances()["mine"]["untrusted_pending"] - - if amount: - dest = to_wallet.getnewaddress() - outputs = {dest: amount} - else: - outputs = {"data": data} - - # Construct options dictionary - options = {} - if add_to_wallet is not None: - options["add_to_wallet"] = add_to_wallet - else: - if psbt: - add_to_wallet = False - else: - add_to_wallet = from_wallet.getwalletinfo()["private_keys_enabled"] # Default value - if psbt is not None: - options["psbt"] = psbt - if conf_target is not None: - options["conf_target"] = conf_target - if estimate_mode is not None: - options["estimate_mode"] = estimate_mode - if fee_rate is not None: - options["fee_rate"] = fee_rate - if inputs is not None: - options["inputs"] = inputs - if add_inputs is not None: - options["add_inputs"] = add_inputs - if include_unsafe is not None: - options["include_unsafe"] = include_unsafe - if change_address is not None: - options["change_address"] = change_address - if change_position is not None: - options["change_position"] = change_position - if change_type is not None: - options["change_type"] = change_type - if include_watching is not None: - options["include_watching"] = include_watching - if locktime is not None: - options["locktime"] = locktime - if lock_unspents is not None: - options["lock_unspents"] = lock_unspents - if replaceable is None: - replaceable = True # default - else: - options["replaceable"] = replaceable - if subtract_fee_from_outputs is not None: - options["subtract_fee_from_outputs"] = subtract_fee_from_outputs - if solving_data is not None: - options["solving_data"] = solving_data - if minconf is not None: - options["minconf"] = minconf - - if len(options.keys()) == 0: - options = None - - if expect_error is None: - res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options) - else: - try: - assert_raises_rpc_error(expect_error[0], expect_error[1], from_wallet.send, - outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options) - except AssertionError: - # Provide debug info if the test fails - self.log.error("Unexpected successful result:") - self.log.error(arg_conf_target) - self.log.error(arg_estimate_mode) - self.log.error(arg_fee_rate) - self.log.error(options) - res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options) - self.log.error(res) - if "txid" in res and add_to_wallet: - self.log.error("Transaction details:") - try: - tx = from_wallet.gettransaction(res["txid"]) - self.log.error(tx) - self.log.error("testmempoolaccept (transaction may already be in mempool):") - self.log.error(from_wallet.testmempoolaccept([tx["hex"]])) - except JSONRPCException as exc: - self.log.error(exc) - - raise - - return - - if locktime: - return res - - if from_wallet.getwalletinfo()["private_keys_enabled"] and not include_watching: - assert_equal(res["complete"], True) - assert "txid" in res - else: - assert_equal(res["complete"], False) - assert not "txid" in res - assert "psbt" in res - - from_balance = from_wallet.getbalances()["mine"]["trusted"] - if include_unsafe: - from_balance += from_wallet.getbalances()["mine"]["untrusted_pending"] - - if add_to_wallet and not include_watching: - # Ensure transaction exists in the wallet: - tx = from_wallet.gettransaction(res["txid"]) - assert tx - assert_equal(tx["bip125-replaceable"], "yes" if replaceable else "no") - # Ensure transaction exists in the mempool: - tx = from_wallet.getrawtransaction(res["txid"], True) - assert tx - if amount: - if subtract_fee_from_outputs: - assert_equal(from_balance_before - from_balance, amount) - else: - assert_greater_than(from_balance_before - from_balance, amount) - else: - assert next((out for out in tx["vout"] if out["scriptPubKey"]["asm"] == "OP_RETURN 35"), None) - else: - assert_equal(from_balance_before, from_balance) - - if to_wallet: - self.sync_mempools() - if add_to_wallet: - if not subtract_fee_from_outputs: - assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before + Decimal(amount if amount else 0)) - else: - assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before) - - return res - - def run_test(self): - self.log.info("Setup wallets...") - # w0 is a wallet with coinbase rewards - w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name) - # w1 is a regular wallet - self.nodes[1].createwallet(wallet_name="w1") - w1 = self.nodes[1].get_wallet_rpc("w1") - # w2 contains the private keys for w3 - self.nodes[1].createwallet(wallet_name="w2", blank=True) - w2 = self.nodes[1].get_wallet_rpc("w2") - xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v" - xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg" - if self.options.descriptors: - w2.importdescriptors([{ - "desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"), - "timestamp": "now", - "range": [0, 100], - "active": True - },{ - "desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"), - "timestamp": "now", - "range": [0, 100], - "active": True, - "internal": True - }]) - else: - w2.sethdseed(True) - - # w3 is a watch-only wallet, based on w2 - self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True) - w3 = self.nodes[1].get_wallet_rpc("w3") - if self.options.descriptors: - # Match the privkeys in w2 for descriptors - res = w3.importdescriptors([{ - "desc": descsum_create("wpkh(" + xpub + "/0/0/*)"), - "timestamp": "now", - "range": [0, 100], - "keypool": True, - "active": True, - "watchonly": True - },{ - "desc": descsum_create("wpkh(" + xpub + "/0/1/*)"), - "timestamp": "now", - "range": [0, 100], - "keypool": True, - "active": True, - "internal": True, - "watchonly": True - }]) - assert_equal(res, [{"success": True}, {"success": True}]) - - for _ in range(3): - a2_receive = w2.getnewaddress() - if not self.options.descriptors: - # Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors - a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation - res = w3.importmulti([{ - "desc": w2.getaddressinfo(a2_receive)["desc"], - "timestamp": "now", - "keypool": True, - "watchonly": True - },{ - "desc": w2.getaddressinfo(a2_change)["desc"], - "timestamp": "now", - "keypool": True, - "internal": True, - "watchonly": True - }]) - assert_equal(res, [{"success": True}, {"success": True}]) - - w0.sendtoaddress(a2_receive, 10) # fund w3 - self.generate(self.nodes[0], 1) - - if not self.options.descriptors: - # w4 has private keys enabled, but only contains watch-only keys (from w2) - # This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet. - self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False) - w4 = self.nodes[1].get_wallet_rpc("w4") - for _ in range(3): - a2_receive = w2.getnewaddress() - res = w4.importmulti([{ - "desc": w2.getaddressinfo(a2_receive)["desc"], - "timestamp": "now", - "keypool": False, - "watchonly": True - }]) - assert_equal(res, [{"success": True}]) - - w0.sendtoaddress(a2_receive, 10) # fund w4 - self.generate(self.nodes[0], 1) - - self.log.info("Send to address...") - self.test_send(from_wallet=w0, to_wallet=w1, amount=1) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True) - - self.log.info("Don't broadcast...") - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False) - assert res["hex"] - - self.log.info("Return PSBT...") - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, psbt=True) - assert res["psbt"] - - self.log.info("Create transaction that spends to address, but don't broadcast...") - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False) - # conf_target & estimate_mode can be set as argument or option - res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical", add_to_wallet=False) - res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=1, estimate_mode="economical", add_to_wallet=False) - assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"], - self.nodes[1].decodepsbt(res2["psbt"])["fee"]) - # but not at the same time - for mode in ["unset", "economical", "conservative"]: - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical", - conf_target=1, estimate_mode=mode, add_to_wallet=False, - expect_error=(-8, "Pass conf_target and estimate_mode either as arguments or in the options object, but not both")) - - self.log.info("Create PSBT from watch-only wallet w3, sign with w2...") - res = self.test_send(from_wallet=w3, to_wallet=w1, amount=1) - res = w2.walletprocesspsbt(res["psbt"]) - assert res["complete"] - - if not self.options.descriptors: - # Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet. - # This is specifically testing that w4 ignores its own private keys and creates a psbt with send - # which is not something that needs to be tested in descriptor wallets. - self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...") - self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds")) - res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False) - res = w2.walletprocesspsbt(res["psbt"]) - assert res["complete"] - - self.log.info("Create OP_RETURN...") - self.test_send(from_wallet=w0, to_wallet=w1, amount=1) - self.test_send(from_wallet=w0, data="Hello World", expect_error=(-8, "Data must be hexadecimal string (not 'Hello World')")) - self.test_send(from_wallet=w0, data="23") - res = self.test_send(from_wallet=w3, data="23") - res = w2.walletprocesspsbt(res["psbt"]) - assert res["complete"] - - self.log.info("Test setting explicit fee rate") - res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate="1", add_to_wallet=False) - res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate="1", add_to_wallet=False) - assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"], self.nodes[1].decodepsbt(res2["psbt"])["fee"]) - - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=7, add_to_wallet=False) - fee = self.nodes[1].decodepsbt(res["psbt"])["fee"] - assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00007")) - - # "unset" and None are treated the same for estimate_mode - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=2, estimate_mode="unset", add_to_wallet=False) - fee = self.nodes[1].decodepsbt(res["psbt"])["fee"] - assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00002")) - - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=4.531, add_to_wallet=False) - fee = self.nodes[1].decodepsbt(res["psbt"])["fee"] - assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00004531")) - - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=3, add_to_wallet=False) - fee = self.nodes[1].decodepsbt(res["psbt"])["fee"] - assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00003")) - - # Test that passing fee_rate as both an argument and an option raises. - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=1, fee_rate=1, add_to_wallet=False, - expect_error=(-8, "Pass the fee_rate either as an argument, or in the options object, but not both")) - - assert_raises_rpc_error(-8, "Use fee_rate (sat/vB) instead of feeRate", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"feeRate": 0.01}) - - assert_raises_rpc_error(-3, "Unexpected key totalFee", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"totalFee": 0.01}) - - for target, mode in product([-1, 0, 1009], ["economical", "conservative"]): - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode, - expect_error=(-8, "Invalid conf_target, must be between 1 and 1008")) # max value of 1008 per src/policy/fees.h - msg = 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"' - for target, mode in product([-1, 0], ["btc/kb", "sat/b"]): - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode, expect_error=(-8, msg)) - for mode in ["", "foo", Decimal("3.141592")]: - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode=mode, expect_error=(-8, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=0.1, arg_estimate_mode=mode, expect_error=(-8, msg)) - assert_raises_rpc_error(-8, msg, w0.send, {w1.getnewaddress(): 1}, 0.1, mode) - - for mode in ["economical", "conservative"]: - for k, v in {"string": "true", "bool": True, "object": {"foo": "bar"}}.items(): - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=v, estimate_mode=mode, - expect_error=(-3, f"JSON value of type {k} for field conf_target is not of expected type number")) - - # Test setting explicit fee rate just below the minimum of 1 sat/vB. - self.log.info("Explicit fee rate raises RPC error 'fee rate too low' if fee_rate of 0.99999999 is passed") - msg = "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)" - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=0.999, expect_error=(-4, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=0.999, expect_error=(-4, msg)) - - self.log.info("Explicit fee rate raises if invalid fee_rate is passed") - # Test fee_rate with zero values. - msg = "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)" - for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]: - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=zero_value, expect_error=(-4, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=zero_value, expect_error=(-4, msg)) - msg = "Invalid amount" - # Test fee_rate values that don't pass fixed-point parsing checks. - for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]: - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg)) - # Test fee_rate values that cannot be represented in sat/vB. - for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999]: - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg)) - # Test fee_rate out of range (negative number). - msg = "Amount out of range" - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=-1, expect_error=(-3, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=-1, expect_error=(-3, msg)) - # Test type error. - msg = "Amount is not a number or string" - for invalid_value in [True, {"foo": "bar"}]: - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg)) - - # TODO: Return hex if fee rate is below -maxmempool - # res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode="sat/b", add_to_wallet=False) - # assert res["hex"] - # hex = res["hex"] - # res = self.nodes[0].testmempoolaccept([hex]) - # assert not res[0]["allowed"] - # assert_equal(res[0]["reject-reason"], "...") # low fee - # assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.000001")) - - self.log.info("If inputs are specified, do not automatically add more...") - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[], add_to_wallet=False) - assert res["complete"] - utxo1 = w0.listunspent()[0] - assert_equal(utxo1["amount"], 50) - ERR_NOT_ENOUGH_PRESET_INPUTS = "The preselected coins total amount does not cover the transaction target. " \ - "Please allow other inputs to be automatically selected or include more coins manually" - self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], - expect_error=(-4, ERR_NOT_ENOUGH_PRESET_INPUTS)) - self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=False, - expect_error=(-4, ERR_NOT_ENOUGH_PRESET_INPUTS)) - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=True, add_to_wallet=False) - assert res["complete"] - - self.log.info("Manual change address and position...") - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, change_address="not an address", - expect_error=(-5, "Change address must be a valid bitcoin address")) - change_address = w0.getnewaddress() - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address) - assert res["complete"] - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address, change_position=0) - assert res["complete"] - assert_equal(self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["address"], change_address) - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_type="legacy", change_position=0) - assert res["complete"] - change_address = self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["address"] - assert change_address[0] == "m" or change_address[0] == "n" - - self.log.info("Set lock time...") - height = self.nodes[0].getblockchaininfo()["blocks"] - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, locktime=height + 1) - assert res["complete"] - assert res["txid"] - txid = res["txid"] - # Although the wallet finishes the transaction, it can't be added to the mempool yet: - hex = self.nodes[0].gettransaction(res["txid"])["hex"] - res = self.nodes[0].testmempoolaccept([hex]) - assert not res[0]["allowed"] - assert_equal(res[0]["reject-reason"], "non-final") - # It shouldn't be confirmed in the next block - self.generate(self.nodes[0], 1) - assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 0) - # The mempool should allow it now: - res = self.nodes[0].testmempoolaccept([hex]) - assert res[0]["allowed"] - # Don't wait for wallet to add it to the mempool: - res = self.nodes[0].sendrawtransaction(hex) - self.generate(self.nodes[0], 1) - assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 1) - - self.log.info("Lock unspents...") - utxo1 = w0.listunspent()[0] - assert_greater_than(utxo1["amount"], 1) - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False, lock_unspents=True) - assert res["complete"] - locked_coins = w0.listlockunspent() - assert_equal(len(locked_coins), 1) - # Locked coins are automatically unlocked when manually selected - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False) - assert res["complete"] - - self.log.info("Replaceable...") - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=True) - assert res["complete"] - assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "yes") - res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=False) - assert res["complete"] - assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "no") - - self.log.info("Subtract fee from output") - self.test_send(from_wallet=w0, to_wallet=w1, amount=1, subtract_fee_from_outputs=[0]) - - self.log.info("Include unsafe inputs") - self.nodes[1].createwallet(wallet_name="w5") - w5 = self.nodes[1].get_wallet_rpc("w5") - self.test_send(from_wallet=w0, to_wallet=w5, amount=2) - self.test_send(from_wallet=w5, to_wallet=w0, amount=1, expect_error=(-4, "Insufficient funds")) - res = self.test_send(from_wallet=w5, to_wallet=w0, amount=1, include_unsafe=True) - assert res["complete"] - - self.log.info("Minconf") - self.nodes[1].createwallet(wallet_name="minconfw") - minconfw= self.nodes[1].get_wallet_rpc("minconfw") - self.test_send(from_wallet=w0, to_wallet=minconfw, amount=2) - self.generate(self.nodes[0], 3) - self.test_send(from_wallet=minconfw, to_wallet=w0, amount=1, minconf=4, expect_error=(-4, "Insufficient funds")) - self.test_send(from_wallet=minconfw, to_wallet=w0, amount=1, minconf=-4, expect_error=(-8, "Negative minconf")) - res = self.test_send(from_wallet=minconfw, to_wallet=w0, amount=1, minconf=3) - assert res["complete"] - - self.log.info("External outputs") - privkey, _ = generate_keypair(wif=True) - - self.nodes[1].createwallet("extsend") - ext_wallet = self.nodes[1].get_wallet_rpc("extsend") - self.nodes[1].createwallet("extfund") - ext_fund = self.nodes[1].get_wallet_rpc("extfund") - - # Make a weird but signable script. sh(wsh(pkh())) descriptor accomplishes this - desc = descsum_create("sh(wsh(pkh({})))".format(privkey)) - if self.options.descriptors: - res = ext_fund.importdescriptors([{"desc": desc, "timestamp": "now"}]) - else: - res = ext_fund.importmulti([{"desc": desc, "timestamp": "now"}]) - assert res[0]["success"] - addr = self.nodes[0].deriveaddresses(desc)[0] - addr_info = ext_fund.getaddressinfo(addr) - - self.nodes[0].sendtoaddress(addr, 10) - self.nodes[0].sendtoaddress(ext_wallet.getnewaddress(), 10) - self.generate(self.nodes[0], 6) - ext_utxo = ext_fund.listunspent(addresses=[addr])[0] - - # An external input without solving data should result in an error - self.test_send(from_wallet=ext_wallet, to_wallet=self.nodes[0], amount=15, inputs=[ext_utxo], add_inputs=True, psbt=True, include_watching=True, expect_error=(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"]))) - - # But funding should work when the solving data is provided - res = self.test_send(from_wallet=ext_wallet, to_wallet=self.nodes[0], amount=15, inputs=[ext_utxo], add_inputs=True, psbt=True, include_watching=True, solving_data={"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"], addr_info["embedded"]["embedded"]["scriptPubKey"]]}) - signed = ext_wallet.walletprocesspsbt(res["psbt"]) - signed = ext_fund.walletprocesspsbt(res["psbt"]) - assert signed["complete"] - - res = self.test_send(from_wallet=ext_wallet, to_wallet=self.nodes[0], amount=15, inputs=[ext_utxo], add_inputs=True, psbt=True, include_watching=True, solving_data={"descriptors": [desc]}) - signed = ext_wallet.walletprocesspsbt(res["psbt"]) - signed = ext_fund.walletprocesspsbt(res["psbt"]) - assert signed["complete"] - - dec = self.nodes[0].decodepsbt(signed["psbt"]) - for i, txin in enumerate(dec["tx"]["vin"]): - if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]: - input_idx = i - break - psbt_in = dec["inputs"][input_idx] - scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else "" - witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None - input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex) - - # Input weight error conditions - assert_raises_rpc_error( - -8, - "Input weights should be specified in inputs rather than in options.", - ext_wallet.send, - outputs={self.nodes[0].getnewaddress(): 15}, - options={"inputs": [ext_utxo], "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]} - ) - - target_fee_rate_sat_vb = 10 - # Funding should also work when input weights are provided - res = self.test_send( - from_wallet=ext_wallet, - to_wallet=self.nodes[0], - amount=15, - inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}], - add_inputs=True, - psbt=True, - include_watching=True, - fee_rate=target_fee_rate_sat_vb - ) - signed = ext_wallet.walletprocesspsbt(res["psbt"]) - signed = ext_fund.walletprocesspsbt(res["psbt"]) - assert signed["complete"] - testres = self.nodes[0].testmempoolaccept([signed["hex"]])[0] - assert_equal(testres["allowed"], True) - actual_fee_rate_sat_vb = Decimal(testres["fees"]["base"]) * Decimal(1e8) / Decimal(testres["vsize"]) - # Due to ECDSA signatures not always being the same length, the actual fee rate may be slightly different - # but rounded to nearest integer, it should be the same as the target fee rate - assert_equal(round(actual_fee_rate_sat_vb), target_fee_rate_sat_vb) - - # Check tx creation size limits - self.test_weight_limits() - - def test_weight_limits(self): - self.log.info("Test weight limits") - - self.nodes[1].createwallet("test_weight_limits") - wallet = self.nodes[1].get_wallet_rpc("test_weight_limits") - - # Generate future inputs; 272 WU per input (273 when high-s). - # Picking 1471 inputs will exceed the max standard tx weight. - outputs = [] - for _ in range(1472): - outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1}) - self.nodes[0].send(outputs=outputs) - self.generate(self.nodes[0], 1) - - # 1) Try to fund transaction only using the preset inputs - inputs = wallet.listunspent() - assert_raises_rpc_error(-4, "Transaction too large", - wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}], options={"inputs": inputs, "add_inputs": False}) - - # 2) Let the wallet fund the transaction - assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs", - wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}]) - - # 3) Pre-select some inputs and let the wallet fill-up the remaining amount - inputs = inputs[0:1000] - assert_raises_rpc_error(-4, "The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs", - wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}], options={"inputs": inputs, "add_inputs": True}) - - self.nodes[1].unloadwallet("test_weight_limits") - - -if __name__ == '__main__': - WalletSendTest(__file__).main() +#!/usr/bin/env python3) +# Copyright (c) 2020-2022 The Bitcoin Core developers) +# Distributed under the MIT software license, see the accompanying) +# file COPYING or http://www.opensource.org/licenses/mit-license.php.) +"""Test the send RPC command.""") +) +from decimal import Decimal, getcontext) +from itertools import product) +) +from test_framework.authproxy import JSONRPCException) +from test_framework.descriptors import descsum_create) +from test_framework.test_framework import BitcoinTestFramework) +from test_framework.util import () + assert_not_equal,) + assert_equal,) + assert_fee_amount,) + assert_greater_than,) + assert_raises_rpc_error,) + count_bytes,) +)) +from test_framework.wallet_util import () + calculate_input_weight,) + generate_keypair,) +)) +) +) +class WalletSendTest(BitcoinTestFramework):) + def add_options(self, parser):) + self.add_wallet_options(parser)) +) + def set_test_params(self):) + self.num_nodes = 2) + # whitelist peers to speed up tx relay / mempool sync) + self.noban_tx_relay = True) + self.extra_args = [) + ["-walletrbf=1"],) + ["-walletrbf=1"]) + ]) + getcontext().prec = 8 # Satoshi precision for Decimal) +) + def skip_test_if_missing_module(self):) + self.skip_if_no_wallet()) +) + def test_send(self, from_wallet, to_wallet=None, amount=None, data=None,) + arg_conf_target=None, arg_estimate_mode=None, arg_fee_rate=None,) + conf_target=None, estimate_mode=None, fee_rate=None, add_to_wallet=None, psbt=None,) + inputs=None, add_inputs=None, include_unsafe=None, change_address=None, change_position=None, change_type=None,) + include_watching=None, locktime=None, lock_unspents=None, replaceable=None, subtract_fee_from_outputs=None,) + expect_error=None, solving_data=None, minconf=None):) + assert_not_equal((amount is None), (data is None))) +) + from_balance_before = from_wallet.getbalances()["mine"]["trusted"]) + if include_unsafe:) + from_balance_before += from_wallet.getbalances()["mine"]["untrusted_pending"]) +) + if to_wallet is None:) + assert amount is None) + else:) + to_untrusted_pending_before = to_wallet.getbalances()["mine"]["untrusted_pending"]) +) + if amount:) + dest = to_wallet.getnewaddress()) + outputs = {dest: amount}) + else:) + outputs = {"data": data}) +) + # Construct options dictionary) + options = {}) + if add_to_wallet is not None:) + options["add_to_wallet"] = add_to_wallet) + else:) + if psbt:) + add_to_wallet = False) + else:) + add_to_wallet = from_wallet.getwalletinfo()["private_keys_enabled"] # Default value) + if psbt is not None:) + options["psbt"] = psbt) + if conf_target is not None:) + options["conf_target"] = conf_target) + if estimate_mode is not None:) + options["estimate_mode"] = estimate_mode) + if fee_rate is not None:) + options["fee_rate"] = fee_rate) + if inputs is not None:) + options["inputs"] = inputs) + if add_inputs is not None:) + options["add_inputs"] = add_inputs) + if include_unsafe is not None:) + options["include_unsafe"] = include_unsafe) + if change_address is not None:) + options["change_address"] = change_address) + if change_position is not None:) + options["change_position"] = change_position) + if change_type is not None:) + options["change_type"] = change_type) + if include_watching is not None:) + options["include_watching"] = include_watching) + if locktime is not None:) + options["locktime"] = locktime) + if lock_unspents is not None:) + options["lock_unspents"] = lock_unspents) + if replaceable is None:) + replaceable = True # default) + else:) + options["replaceable"] = replaceable) + if subtract_fee_from_outputs is not None:) + options["subtract_fee_from_outputs"] = subtract_fee_from_outputs) + if solving_data is not None:) + options["solving_data"] = solving_data) + if minconf is not None:) + options["minconf"] = minconf) +) + if len(options.keys()) == 0:) + options = None) +) + if expect_error is None:) + res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)) + else:) + try:) + assert_raises_rpc_error(expect_error[0], expect_error[1], from_wallet.send,) + outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)) + except AssertionError:) + # Provide debug info if the test fails) + self.log.error("Unexpected successful result:")) + self.log.error(arg_conf_target)) + self.log.error(arg_estimate_mode)) + self.log.error(arg_fee_rate)) + self.log.error(options)) + res = from_wallet.send(outputs=outputs, conf_target=arg_conf_target, estimate_mode=arg_estimate_mode, fee_rate=arg_fee_rate, options=options)) + self.log.error(res)) + if "txid" in res and add_to_wallet:) + self.log.error("Transaction details:")) + try:) + tx = from_wallet.gettransaction(res["txid"])) + self.log.error(tx)) + self.log.error("testmempoolaccept (transaction may already be in mempool):")) + self.log.error(from_wallet.testmempoolaccept([tx["hex"]]))) + except JSONRPCException as exc:) + self.log.error(exc)) +) + raise) +) + return) +) + if locktime:) + return res) +) + if from_wallet.getwalletinfo()["private_keys_enabled"] and not include_watching:) + assert_equal(res["complete"], True)) + assert "txid" in res) + else:) + assert_equal(res["complete"], False)) + assert not "txid" in res) + assert "psbt" in res) +) + from_balance = from_wallet.getbalances()["mine"]["trusted"]) + if include_unsafe:) + from_balance += from_wallet.getbalances()["mine"]["untrusted_pending"]) +) + if add_to_wallet and not include_watching:) + # Ensure transaction exists in the wallet:) + tx = from_wallet.gettransaction(res["txid"])) + assert tx) + assert_equal(tx["bip125-replaceable"], "yes" if replaceable else "no")) + # Ensure transaction exists in the mempool:) + tx = from_wallet.getrawtransaction(res["txid"], True)) + assert tx) + if amount:) + if subtract_fee_from_outputs:) + assert_equal(from_balance_before - from_balance, amount)) + else:) + assert_greater_than(from_balance_before - from_balance, amount)) + else:) + assert next((out for out in tx["vout"] if out["scriptPubKey"]["asm"] == "OP_RETURN 35"), None)) + else:) + assert_equal(from_balance_before, from_balance)) +) + if to_wallet:) + self.sync_mempools()) + if add_to_wallet:) + if not subtract_fee_from_outputs:) + assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before + Decimal(amount if amount else 0))) + else:) + assert_equal(to_wallet.getbalances()["mine"]["untrusted_pending"], to_untrusted_pending_before)) +) + return res) +) + def run_test(self):) + self.log.info("Setup wallets...")) + # w0 is a wallet with coinbase rewards) + w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)) + # w1 is a regular wallet) + self.nodes[1].createwallet(wallet_name="w1")) + w1 = self.nodes[1].get_wallet_rpc("w1")) + # w2 contains the private keys for w3) + self.nodes[1].createwallet(wallet_name="w2", blank=True)) + w2 = self.nodes[1].get_wallet_rpc("w2")) + xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v") + xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg") + if self.options.descriptors:) + w2.importdescriptors([{) + "desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"),) + "timestamp": "now",) + "range": [0, 100],) + "active": True) + },{) + "desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"),) + "timestamp": "now",) + "range": [0, 100],) + "active": True,) + "internal": True) + }])) + else:) + w2.sethdseed(True)) +) + # w3 is a watch-only wallet, based on w2) + self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True)) + w3 = self.nodes[1].get_wallet_rpc("w3")) + if self.options.descriptors:) + # Match the privkeys in w2 for descriptors) + res = w3.importdescriptors([{) + "desc": descsum_create("wpkh(" + xpub + "/0/0/*)"),) + "timestamp": "now",) + "range": [0, 100],) + "keypool": True,) + "active": True,) + "watchonly": True) + },{) + "desc": descsum_create("wpkh(" + xpub + "/0/1/*)"),) + "timestamp": "now",) + "range": [0, 100],) + "keypool": True,) + "active": True,) + "internal": True,) + "watchonly": True) + }])) + assert_equal(res, [{"success": True}, {"success": True}])) +) + for _ in range(3):) + a2_receive = w2.getnewaddress()) + if not self.options.descriptors:) + # Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors) + a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation) + res = w3.importmulti([{) + "desc": w2.getaddressinfo(a2_receive)["desc"],) + "timestamp": "now",) + "keypool": True,) + "watchonly": True) + },{) + "desc": w2.getaddressinfo(a2_change)["desc"],) + "timestamp": "now",) + "keypool": True,) + "internal": True,) + "watchonly": True) + }])) + assert_equal(res, [{"success": True}, {"success": True}])) +) + w0.sendtoaddress(a2_receive, 10) # fund w3) + self.generate(self.nodes[0], 1)) +) + if not self.options.descriptors:) + # w4 has private keys enabled, but only contains watch-only keys (from w2)) + # This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet.) + self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False)) + w4 = self.nodes[1].get_wallet_rpc("w4")) + for _ in range(3):) + a2_receive = w2.getnewaddress()) + res = w4.importmulti([{) + "desc": w2.getaddressinfo(a2_receive)["desc"],) + "timestamp": "now",) + "keypool": False,) + "watchonly": True) + }])) + assert_equal(res, [{"success": True}])) +) + w0.sendtoaddress(a2_receive, 10) # fund w4) + self.generate(self.nodes[0], 1)) +) + self.log.info("Send to address...")) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1)) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True)) +) + self.log.info("Don't broadcast...")) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False)) + assert res["hex"]) +) + self.log.info("Return PSBT...")) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, psbt=True)) + assert res["psbt"]) +) + self.log.info("Create transaction that spends to address, but don't broadcast...")) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False)) + # conf_target & estimate_mode can be set as argument or option) + res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical", add_to_wallet=False)) + res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=1, estimate_mode="economical", add_to_wallet=False)) + assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"],) + self.nodes[1].decodepsbt(res2["psbt"])["fee"])) + # but not at the same time) + for mode in ["unset", "economical", "conservative"]:) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=1, arg_estimate_mode="economical",) + conf_target=1, estimate_mode=mode, add_to_wallet=False,) + expect_error=(-8, "Pass conf_target and estimate_mode either as arguments or in the options object, but not both"))) +) + self.log.info("Create PSBT from watch-only wallet w3, sign with w2...")) + res = self.test_send(from_wallet=w3, to_wallet=w1, amount=1)) + res = w2.walletprocesspsbt(res["psbt"])) + assert res["complete"]) +) + if not self.options.descriptors:) + # Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet.) + # This is specifically testing that w4 ignores its own private keys and creates a psbt with send) + # which is not something that needs to be tested in descriptor wallets.) + self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...")) + self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds"))) + res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False)) + res = w2.walletprocesspsbt(res["psbt"])) + assert res["complete"]) +) + self.log.info("Create OP_RETURN...")) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1)) + self.test_send(from_wallet=w0, data="Hello World", expect_error=(-8, "Data must be hexadecimal string (not 'Hello World')"))) + self.test_send(from_wallet=w0, data="23")) + res = self.test_send(from_wallet=w3, data="23")) + res = w2.walletprocesspsbt(res["psbt"])) + assert res["complete"]) +) + self.log.info("Test setting explicit fee rate")) + res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate="1", add_to_wallet=False)) + res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate="1", add_to_wallet=False)) + assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"], self.nodes[1].decodepsbt(res2["psbt"])["fee"])) +) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=7, add_to_wallet=False)) + fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]) + assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00007"))) +) + # "unset" and None are treated the same for estimate_mode) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=2, estimate_mode="unset", add_to_wallet=False)) + fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]) + assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00002"))) +) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=4.531, add_to_wallet=False)) + fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]) + assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00004531"))) +) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=3, add_to_wallet=False)) + fee = self.nodes[1].decodepsbt(res["psbt"])["fee"]) + assert_fee_amount(fee, count_bytes(res["hex"]), Decimal("0.00003"))) +) + # Test that passing fee_rate as both an argument and an option raises.) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=1, fee_rate=1, add_to_wallet=False,) + expect_error=(-8, "Pass the fee_rate either as an argument, or in the options object, but not both"))) +) + assert_raises_rpc_error(-8, "Use fee_rate (sat/vB) instead of feeRate", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"feeRate": 0.01})) +) + assert_raises_rpc_error(-3, "Unexpected key totalFee", w0.send, {w1.getnewaddress(): 1}, 6, "conservative", 1, {"totalFee": 0.01})) +) + for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode,) + expect_error=(-8, "Invalid conf_target, must be between 1 and 1008")) # max value of 1008 per src/policy/fees.h) + msg = 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"') + for target, mode in product([-1, 0], ["btc/kb", "sat/b"]):) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=target, estimate_mode=mode, expect_error=(-8, msg))) + for mode in ["", "foo", Decimal("3.141592")]:) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode=mode, expect_error=(-8, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_conf_target=0.1, arg_estimate_mode=mode, expect_error=(-8, msg))) + assert_raises_rpc_error(-8, msg, w0.send, {w1.getnewaddress(): 1}, 0.1, mode)) +) + for mode in ["economical", "conservative"]:) + for k, v in {"string": "true", "bool": True, "object": {"foo": "bar"}}.items():) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=v, estimate_mode=mode,) + expect_error=(-3, f"JSON value of type {k} for field conf_target is not of expected type number"))) +) + # Test setting explicit fee rate just below the minimum of 1 sat/vB.) + self.log.info("Explicit fee rate raises RPC error 'fee rate too low' if fee_rate of 0.99999999 is passed")) + msg = "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)") + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=0.999, expect_error=(-4, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=0.999, expect_error=(-4, msg))) +) + self.log.info("Explicit fee rate raises if invalid fee_rate is passed")) + # Test fee_rate with zero values.) + msg = "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)") + for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]:) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=zero_value, expect_error=(-4, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=zero_value, expect_error=(-4, msg))) + msg = "Invalid amount") + # Test fee_rate values that don't pass fixed-point parsing checks.) + for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg))) + # Test fee_rate values that cannot be represented in sat/vB.) + for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999]:) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg))) + # Test fee_rate out of range (negative number).) + msg = "Amount out of range") + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=-1, expect_error=(-3, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=-1, expect_error=(-3, msg))) + # Test type error.) + msg = "Amount is not a number or string") + for invalid_value in [True, {"foo": "bar"}]:) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=invalid_value, expect_error=(-3, msg))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=invalid_value, expect_error=(-3, msg))) +) + # TODO: Return hex if fee rate is below -maxmempool) + # res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, conf_target=0.1, estimate_mode="sat/b", add_to_wallet=False)) + # assert res["hex"]) + # hex = res["hex"]) + # res = self.nodes[0].testmempoolaccept([hex])) + # assert not res[0]["allowed"]) + # assert_equal(res[0]["reject-reason"], "...") # low fee) + # assert_fee_amount(fee, Decimal(len(res["hex"]) / 2), Decimal("0.000001"))) +) + self.log.info("If inputs are specified, do not automatically add more...")) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[], add_to_wallet=False)) + assert res["complete"]) + utxo1 = w0.listunspent()[0]) + assert_equal(utxo1["amount"], 50)) + ERR_NOT_ENOUGH_PRESET_INPUTS = "The preselected coins total amount does not cover the transaction target. " \) + "Please allow other inputs to be automatically selected or include more coins manually") + self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1],) + expect_error=(-4, ERR_NOT_ENOUGH_PRESET_INPUTS))) + self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=False,) + expect_error=(-4, ERR_NOT_ENOUGH_PRESET_INPUTS))) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=51, inputs=[utxo1], add_inputs=True, add_to_wallet=False)) + assert res["complete"]) +) + self.log.info("Manual change address and position...")) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, change_address="not an address",) + expect_error=(-5, "Change address must be a valid bitcoin address"))) + change_address = w0.getnewaddress()) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address)) + assert res["complete"]) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_address=change_address, change_position=0)) + assert res["complete"]) + assert_equal(self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["address"], change_address)) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=False, change_type="legacy", change_position=0)) + assert res["complete"]) + change_address = self.nodes[0].decodepsbt(res["psbt"])["tx"]["vout"][0]["scriptPubKey"]["address"]) + assert change_address[0] == "m" or change_address[0] == "n") +) + self.log.info("Set lock time...")) + height = self.nodes[0].getblockchaininfo()["blocks"]) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, locktime=height + 1)) + assert res["complete"]) + assert res["txid"]) + txid = res["txid"]) + # Although the wallet finishes the transaction, it can't be added to the mempool yet:) + hex = self.nodes[0].gettransaction(res["txid"])["hex"]) + res = self.nodes[0].testmempoolaccept([hex])) + assert not res[0]["allowed"]) + assert_equal(res[0]["reject-reason"], "non-final")) + # It shouldn't be confirmed in the next block) + self.generate(self.nodes[0], 1)) + assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 0)) + # The mempool should allow it now:) + res = self.nodes[0].testmempoolaccept([hex])) + assert res[0]["allowed"]) + # Don't wait for wallet to add it to the mempool:) + res = self.nodes[0].sendrawtransaction(hex)) + self.generate(self.nodes[0], 1)) + assert_equal(self.nodes[0].gettransaction(txid)["confirmations"], 1)) +) + self.log.info("Lock unspents...")) + utxo1 = w0.listunspent()[0]) + assert_greater_than(utxo1["amount"], 1)) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False, lock_unspents=True)) + assert res["complete"]) + locked_coins = w0.listlockunspent()) + assert_equal(len(locked_coins), 1)) + # Locked coins are automatically unlocked when manually selected) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, inputs=[utxo1], add_to_wallet=False)) + assert res["complete"]) +) + self.log.info("Replaceable...")) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=True)) + assert res["complete"]) + assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "yes")) + res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True, replaceable=False)) + assert res["complete"]) + assert_equal(self.nodes[0].gettransaction(res["txid"])["bip125-replaceable"], "no")) +) + self.log.info("Subtract fee from output")) + self.test_send(from_wallet=w0, to_wallet=w1, amount=1, subtract_fee_from_outputs=[0])) +) + self.log.info("Include unsafe inputs")) + self.nodes[1].createwallet(wallet_name="w5")) + w5 = self.nodes[1].get_wallet_rpc("w5")) + self.test_send(from_wallet=w0, to_wallet=w5, amount=2)) + self.test_send(from_wallet=w5, to_wallet=w0, amount=1, expect_error=(-4, "Insufficient funds"))) + res = self.test_send(from_wallet=w5, to_wallet=w0, amount=1, include_unsafe=True)) + assert res["complete"]) +) + self.log.info("Minconf")) + self.nodes[1].createwallet(wallet_name="minconfw")) + minconfw= self.nodes[1].get_wallet_rpc("minconfw")) + self.test_send(from_wallet=w0, to_wallet=minconfw, amount=2)) + self.generate(self.nodes[0], 3)) + self.test_send(from_wallet=minconfw, to_wallet=w0, amount=1, minconf=4, expect_error=(-4, "Insufficient funds"))) + self.test_send(from_wallet=minconfw, to_wallet=w0, amount=1, minconf=-4, expect_error=(-8, "Negative minconf"))) + res = self.test_send(from_wallet=minconfw, to_wallet=w0, amount=1, minconf=3)) + assert res["complete"]) +) + self.log.info("External outputs")) + privkey, _ = generate_keypair(wif=True)) +) + self.nodes[1].createwallet("extsend")) + ext_wallet = self.nodes[1].get_wallet_rpc("extsend")) + self.nodes[1].createwallet("extfund")) + ext_fund = self.nodes[1].get_wallet_rpc("extfund")) +) + # Make a weird but signable script. sh(wsh(pkh())) descriptor accomplishes this) + desc = descsum_create("sh(wsh(pkh({})))".format(privkey))) + if self.options.descriptors:) + res = ext_fund.importdescriptors([{"desc": desc, "timestamp": "now"}])) + else:) + res = ext_fund.importmulti([{"desc": desc, "timestamp": "now"}])) + assert res[0]["success"]) + addr = self.nodes[0].deriveaddresses(desc)[0]) + addr_info = ext_fund.getaddressinfo(addr)) +) + self.nodes[0].sendtoaddress(addr, 10)) + self.nodes[0].sendtoaddress(ext_wallet.getnewaddress(), 10)) + self.generate(self.nodes[0], 6)) + ext_utxo = ext_fund.listunspent(addresses=[addr])[0]) +) + # An external input without solving data should result in an error) + self.test_send(from_wallet=ext_wallet, to_wallet=self.nodes[0], amount=15, inputs=[ext_utxo], add_inputs=True, psbt=True, include_watching=True, expect_error=(-4, "Not solvable pre-selected input COutPoint(%s, %s)" % (ext_utxo["txid"][0:10], ext_utxo["vout"])))) +) + # But funding should work when the solving data is provided) + res = self.test_send(from_wallet=ext_wallet, to_wallet=self.nodes[0], amount=15, inputs=[ext_utxo], add_inputs=True, psbt=True, include_watching=True, solving_data={"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"], addr_info["embedded"]["embedded"]["scriptPubKey"]]})) + signed = ext_wallet.walletprocesspsbt(res["psbt"])) + signed = ext_fund.walletprocesspsbt(res["psbt"])) + assert signed["complete"]) +) + res = self.test_send(from_wallet=ext_wallet, to_wallet=self.nodes[0], amount=15, inputs=[ext_utxo], add_inputs=True, psbt=True, include_watching=True, solving_data={"descriptors": [desc]})) + signed = ext_wallet.walletprocesspsbt(res["psbt"])) + signed = ext_fund.walletprocesspsbt(res["psbt"])) + assert signed["complete"]) +) + dec = self.nodes[0].decodepsbt(signed["psbt"])) + for i, txin in enumerate(dec["tx"]["vin"]):) + if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:) + input_idx = i) + break) + psbt_in = dec["inputs"][input_idx]) + scriptsig_hex = psbt_in["final_scriptSig"]["hex"] if "final_scriptSig" in psbt_in else "") + witness_stack_hex = psbt_in["final_scriptwitness"] if "final_scriptwitness" in psbt_in else None) + input_weight = calculate_input_weight(scriptsig_hex, witness_stack_hex)) +) + # Input weight error conditions) + assert_raises_rpc_error() + -8,) + "Input weights should be specified in inputs rather than in options.",) + ext_wallet.send,) + outputs={self.nodes[0].getnewaddress(): 15},) + options={"inputs": [ext_utxo], "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}) + )) +) + target_fee_rate_sat_vb = 10) + # Funding should also work when input weights are provided) + res = self.test_send() + from_wallet=ext_wallet,) + to_wallet=self.nodes[0],) + amount=15,) + inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],) + add_inputs=True,) + psbt=True,) + include_watching=True,) + fee_rate=target_fee_rate_sat_vb) + )) + signed = ext_wallet.walletprocesspsbt(res["psbt"])) + signed = ext_fund.walletprocesspsbt(res["psbt"])) + assert signed["complete"]) + testres = self.nodes[0].testmempoolaccept([signed["hex"]])[0]) + assert_equal(testres["allowed"], True)) + actual_fee_rate_sat_vb = Decimal(testres["fees"]["base"]) * Decimal(1e8) / Decimal(testres["vsize"])) + # Due to ECDSA signatures not always being the same length, the actual fee rate may be slightly different) + # but rounded to nearest integer, it should be the same as the target fee rate) + assert_equal(round(actual_fee_rate_sat_vb), target_fee_rate_sat_vb)) +) + # Check tx creation size limits) + self.test_weight_limits()) +) + def test_weight_limits(self):) + self.log.info("Test weight limits")) +) + self.nodes[1].createwallet("test_weight_limits")) + wallet = self.nodes[1].get_wallet_rpc("test_weight_limits")) +) + # Generate future inputs; 272 WU per input (273 when high-s).) + # Picking 1471 inputs will exceed the max standard tx weight.) + outputs = []) + for _ in range(1472):) + outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1})) + self.nodes[0].send(outputs=outputs)) + self.generate(self.nodes[0], 1)) +) + # 1) Try to fund transaction only using the preset inputs) + inputs = wallet.listunspent()) + assert_raises_rpc_error(-4, "Transaction too large",) + wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}], options={"inputs": inputs, "add_inputs": False})) +) + # 2) Let the wallet fund the transaction) + assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",) + wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}])) +) + # 3) Pre-select some inputs and let the wallet fill-up the remaining amount) + inputs = inputs[0:1000]) + assert_raises_rpc_error(-4, "The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",) + wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}], options={"inputs": inputs, "add_inputs": True})) +) + self.nodes[1].unloadwallet("test_weight_limits")) +) +) +if __name__ == '__main__':) + WalletSendTest(__file__).main())