From fafa61ddd8f33713a84e05cea329f6eea2ef63e1 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Thu, 27 Oct 2022 23:06:39 +0000 Subject: [PATCH 01/24] Assert that we are building for little-endian since big-endian is currently not supported. Set a stack size of 64MB upfront Misc improvements --- build.zig | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/build.zig b/build.zig index 0195cbc..acd34d8 100644 --- a/build.zig +++ b/build.zig @@ -11,7 +11,10 @@ const pkgs = struct { const LMDB_PATH = "./deps/lmdb/libraries/liblmdb/"; pub fn build(b: *std.build.Builder) void { - // Standard target options allows the person running `zig build` to choose + comptime { + //Big endian systems not currently supported + std.debug.assert(builtin.target.cpu.arch.endian() == .Little); + } // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. @@ -41,10 +44,10 @@ pub fn build(b: *std.build.Builder) void { exe.linkLibrary(lmdb); exe.addIncludePath(LMDB_PATH); exe.install(); + exe.stack_size = 1024 * 1024 * 64; switch (mode) { - .Debug => {}, - else => { + .ReleaseFast => { lmdb.link_function_sections = true; lmdb.red_zone = true; lmdb.want_lto = true; @@ -59,6 +62,7 @@ pub fn build(b: *std.build.Builder) void { exe.strip = true; } }, + else => {}, } const run_cmd = exe.run(); run_cmd.step.dependOn(b.getInstallStep()); From ba03ca7a70ce262bebdd16a5166d38519dcdf03b Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Thu, 27 Oct 2022 23:13:31 +0000 Subject: [PATCH 02/24] Use Type aliases inplace of primative types move constants from utils module to Blockchain change newChain signature, remove wallet path parameter change mineBlock signature, return the mined block for caching skip signing/verifing coinbase transactions since it doesn't contain real inputs Move Block iteration into BlockIterator struct in Iterator module Change signature of getRawBytes to alway get bytes starting from 0 --- src/Block.zig | 29 ++++++++++--------- src/Blockchain.zig | 34 +++++++++++++++------- src/Iterator.zig | 70 ++++++++++++++++++++++++--------------------- src/Transaction.zig | 11 +++---- src/serializer.zig | 13 +++++---- src/utils.zig | 4 --- 6 files changed, 86 insertions(+), 75 deletions(-) diff --git a/src/Block.zig b/src/Block.zig index 6ca4744..4d38555 100644 --- a/src/Block.zig +++ b/src/Block.zig @@ -2,13 +2,14 @@ const std = @import("std"); const fmt = std.fmt; const mem = std.mem; const Blake3 = std.crypto.hash.Blake3; +const Hash = [Blake3.digest_length]u8; const testing = std.testing; const Block = @This(); const Transaction = @import("Transaction.zig"); //TARGET_ZERO_BITS must be a multiple of 4 and it determines the number of zeros in the target hash which determines difficult -//The higer TARGET_ZERO_BITS the harder or time consuming it is to find a hash +//The higher the TARGET_ZERO_BITS the harder or time consuming it is to find a hash //NOTE: when we define a target adjusting algorithm this won't be a global constant anymore //it specifies the target hash which is used to check hashes which are valid //a block is only accepted by the network if its hash meets the network's difficulty target @@ -20,16 +21,16 @@ timestamp: i64, //Thus miners must discover by brute force the "nonce" that, when included in the block, results in an acceptable hash. nonce: usize = 0, //stores the hash of the previous block -previous_hash: [32]u8, +previous_hash: Hash, //hash of the current block -hash: [32]u8 = undefined, +hash: Hash = undefined, //the actual valuable information contained in the block .eg Transactions transactions: std.ArrayListUnmanaged(Transaction), //difficulty bits is the block header storing the difficulty at which the block was mined -difficulty_bits: u7 = TARGET_ZERO_BITS, //u7 limit value from 0 to 127 since we can't have a difficult equal in bitsize to the hashsize which is 256 +difficulty_bits: u7 = TARGET_ZERO_BITS, //u7 limit value from 0 to 127 since we can't have a difficult equal in bitsize to the hashsize which is 255 ///mine a new block -pub fn newBlock(arena: std.mem.Allocator, previous_hash: [32]u8, transactions: []const Transaction) Block { +pub fn newBlock(arena: std.mem.Allocator, previous_hash: Hash, transactions: []const Transaction) Block { var new_block = Block{ .timestamp = std.time.timestamp(), .transactions = std.ArrayListUnmanaged(Transaction){}, @@ -43,7 +44,7 @@ pub fn newBlock(arena: std.mem.Allocator, previous_hash: [32]u8, transactions: [ } pub fn genesisBlock(arena: std.mem.Allocator, coinbase: Transaction) Block { - return newBlock(arena, .{'\x00'} ** 32, &.{coinbase}); + return newBlock(arena, std.mem.zeroes(Hash), &.{coinbase}); } ///Validate POW @@ -58,9 +59,9 @@ pub fn validate(block: Block) bool { fn hashBlock(self: Block, nonce: usize) u256 { //TODO : optimize the sizes of these buffers base on the base and use exactly the amount that is needed - var time_buf: [16]u8 = undefined; + var time_buf: [8]u8 = undefined; var bits_buf: [3]u8 = undefined; - var nonce_buf: [16]u8 = undefined; + var nonce_buf: [8]u8 = undefined; const timestamp = fmt.bufPrintIntToSlice(&time_buf, self.timestamp, 16, .lower, .{}); const difficulty_bits = fmt.bufPrintIntToSlice(&bits_buf, self.difficulty_bits, 16, .lower, .{}); @@ -68,7 +69,7 @@ fn hashBlock(self: Block, nonce: usize) u256 { var buf: [4096]u8 = undefined; - //timestamp ,previous_hash and hash form the BlockHeader + //timestamp ,previous_hash and hash from the BlockHeader const block_headers = fmt.bufPrint(&buf, "{[previous_hash]s}{[transactions]s}{[timestamp]s}{[difficulty_bits]s}{[nonce]s}", .{ .previous_hash = self.previous_hash, .transactions = self.hashTxs(), @@ -77,7 +78,7 @@ fn hashBlock(self: Block, nonce: usize) u256 { .nonce = nonce_val, }) catch unreachable; - var hash: [Blake3.digest_length]u8 = undefined; + var hash: Hash = undefined; Blake3.hash(block_headers, &hash, .{}); const hash_int = mem.bytesToValue(u256, hash[0..]); @@ -96,7 +97,7 @@ fn getTargetHash(target_dificulty: u7) u256 { ///Proof of Work mining algorithm ///The usize returned is the nonce with which a valid block was mined -pub fn POW(block: Block) struct { hash: [32]u8, nonce: usize } { +pub fn POW(block: Block) struct { hash: Hash, nonce: usize } { const target_hash = getTargetHash(block.difficulty_bits); var nonce: usize = 0; @@ -105,7 +106,7 @@ pub fn POW(block: Block) struct { hash: [32]u8, nonce: usize } { const hash_int = block.hashBlock(nonce); if (hash_int < target_hash) { - return .{ .hash = @bitCast([32]u8, hash_int), .nonce = nonce }; + return .{ .hash = @bitCast(Hash, hash_int), .nonce = nonce }; } else { nonce += 1; } @@ -113,7 +114,7 @@ pub fn POW(block: Block) struct { hash: [32]u8, nonce: usize } { unreachable; } -fn hashTxs(self: Block) [32]u8 { +fn hashTxs(self: Block) Hash { var txhashes: []u8 = &.{}; var buf: [2048]u8 = undefined; @@ -124,7 +125,7 @@ fn hashTxs(self: Block) [32]u8 { txhashes = std.mem.concat(allocator, u8, &[_][]const u8{ txhashes, txn.id[0..] }) catch unreachable; } - var hash: [Blake3.digest_length]u8 = undefined; + var hash: Hash = undefined; Blake3.hash(txhashes, &hash, .{}); return hash; } diff --git a/src/Blockchain.zig b/src/Blockchain.zig index 24818de..28d7254 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -23,11 +23,16 @@ const BLOCK_DB = utils.BLOCK_DB; const WALLET = "wallet.dat"; const LAST = utils.LAST; const OutputIndex = usize; -const TxMap = std.AutoHashMap(Transaction.TxID, OutputIndex); +pub const TxMap = std.AutoHashMap(Transaction.TxID, OutputIndex); +pub const Hash = [Blake3.digest_length]u8; + +pub const BLOCK_DB = "blocks"; +pub const LAST = "last"; +pub const WALLET_STORAGE = "db/wallet.dat"; //READ: https://en.bitcoin.it/wiki/Block_hashing_algorithm https://en.bitcoin.it/wiki/Proof_of_work https://en.bitcoin.it/wiki/Hashcash -last_hash: [Blake3.digest_length]u8, +last_hash: Hash, db: Lmdb, arena: std.mem.Allocator, wallet_path: []const u8, @@ -37,9 +42,8 @@ pub fn getChain(db: Lmdb, arena: std.mem.Allocator) BlockChain { const txn = db.startTxn(.rw, BLOCK_DB); defer txn.commitTxns(); - if (txn.get([Blake3.digest_length]u8, LAST)) |last_block_hash| { - const wallet_path = txn.getAlloc([]const u8, arena, WALLET) catch unreachable; - return .{ .last_hash = last_block_hash, .db = db, .arena = arena, .wallet_path = wallet_path }; + if (txn.get(Hash, LAST)) |last_block_hash| { + return .{ .last_hash = last_block_hash, .db = db, .arena = arena }; } else |_| { std.log.err("create a blockchain with creatchain command before using any other command", .{}); std.process.exit(1); @@ -47,7 +51,7 @@ pub fn getChain(db: Lmdb, arena: std.mem.Allocator) BlockChain { } ///create a new BlockChain -pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address, wallet_path: []const u8) BlockChain { +pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) BlockChain { if (!Wallet.validateAddress(address)) { std.log.err("blockchain address {s} is invalid", .{address}); std.process.exit(4); @@ -56,7 +60,7 @@ pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address, wa var fba = std.heap.FixedBufferAllocator.init(&buf); const allocator = fba.allocator(); - const coinbase_tx = Transaction.initCoinBaseTx(allocator, address, wallet_path); + const coinbase_tx = Transaction.initCoinBaseTx(allocator, address, WALLET_STORAGE); const genesis_block = Block.genesisBlock(allocator, coinbase_tx); const txn = db.startTxn(.rw, BLOCK_DB); @@ -78,11 +82,11 @@ pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address, wa }); info("You get a reward of RBC {d} for mining the coinbase transaction", .{Transaction.SUBSIDY}); - return .{ .last_hash = genesis_block.hash, .db = db, .arena = arena, .wallet_path = wallet_path }; + return .{ .last_hash = genesis_block.hash, .db = db, .arena = arena }; } ///add a new Block to the BlockChain -pub fn mineBlock(bc: *BlockChain, transactions: []const Transaction) void { +pub fn mineBlock(bc: *BlockChain, transactions: []const Transaction) Block { for (transactions) |tx| { assert(bc.verifyTx(tx) == true); } @@ -91,7 +95,7 @@ pub fn mineBlock(bc: *BlockChain, transactions: []const Transaction) void { var fba = std.heap.FixedBufferAllocator.init(&buf); const allocator = fba.allocator(); - const new_block = Block.newBlock(allocator, bc.last_hash, transactions); + const new_block = Block.newBlock(bc.arena, bc.last_hash, transactions); std.log.info("new transaction is '{X}'", .{fh(fmtHash(new_block.hash)[0..])}); assert(new_block.validate() == true); @@ -102,6 +106,8 @@ pub fn mineBlock(bc: *BlockChain, transactions: []const Transaction) void { txn.putAlloc(allocator, new_block.hash[0..], new_block) catch unreachable; txn.update(LAST, new_block.hash) catch unreachable; bc.last_hash = new_block.hash; + + return new_block; } ///find unspent transactions @@ -188,7 +194,7 @@ fn newUTx(self: BlockChain, amount: usize, from: Wallets.Address, to: Wallets.Ad //Build a list of inputs //for each found output an input referencing it is created. var itr = unspent_output.iterator(); - const wallets = Wallets.getWallets(self.arena, self.wallet_path); + const wallets = Wallets.getWallets(self.arena, WALLET_STORAGE); const froms_wallet = wallets.getWallet(from); while (itr.next()) |kv| { @@ -266,6 +272,9 @@ fn findTx(self: BlockChain, tx_id: Transaction.TxID) Transaction { ///take a transaction `tx` finds all previous transactions it references and sign it with KeyPair `wallet_keys` fn signTx(self: BlockChain, tx: *Transaction, wallet_keys: Wallet.KeyPair) void { + //Coinbase transactions are not signed because they don't contain real inputs + if (tx.isCoinBaseTx()) return; + var buf: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buf); @@ -280,6 +289,9 @@ fn signTx(self: BlockChain, tx: *Transaction, wallet_keys: Wallet.KeyPair) void ///take a transaction `tx` finds transactions it references and verify it fn verifyTx(self: BlockChain, tx: Transaction) bool { + if (tx.isCoinBaseTx()) { + return true; + } var buf: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buf); diff --git a/src/Iterator.zig b/src/Iterator.zig index cc46756..b31a44d 100644 --- a/src/Iterator.zig +++ b/src/Iterator.zig @@ -1,52 +1,56 @@ const std = @import("std"); const Block = @import("Block.zig"); const Lmdb = @import("Lmdb.zig"); -const Iterator = @This(); const info = std.log.info; const fh = std.fmt.fmtSliceHexUpper; const utils = @import("utils.zig"); const fmtHash = utils.fmtHash; -const BLOCK_DB = utils.BLOCK_DB; +const Blockchain = @import("Blockchain.zig"); +const BLOCK_DB = Blockchain.BLOCK_DB; +const Hash = Blockchain.Hash; -arena: std.mem.Allocator, -db: Lmdb, -//Notice that an iterator initially points at the tip of a blockchain, thus blocks will be obtained from top to bottom, from newest to oldest. -current_hash: [32]u8, +pub const BlockIterator = struct { + arena: std.mem.Allocator, + db: Lmdb, + //Notice that an iterator initially points at the tip of a blockchain, thus blocks will be obtained from top to bottom, from newest to oldest. + current_hash: Hash, -pub fn iterator(fba: std.mem.Allocator, db: Lmdb, last_hash: [32]u8) Iterator { - return .{ .arena = fba, .db = db, .current_hash = last_hash }; -} + pub fn iterator(fba: std.mem.Allocator, db: Lmdb, last_hash: Hash) BlockIterator { + return .{ .arena = fba, .db = db, .current_hash = last_hash }; + } -///the returned usize is the address of the Block in memory -///the ptr can be obtained with @intToPtr -pub fn next(self: *Iterator) ?Block { - const txn = self.db.startTxn(.ro, BLOCK_DB); - defer txn.doneReading(); + ///the returned usize is the address of the Block in memory + ///the ptr can be obtained with @intToPtr + pub fn next(self: *BlockIterator) ?Block { + const txn = self.db.startTxn(.{ .rw = false }, BLOCK_DB); + defer txn.doneReading(); - if (txn.getAlloc(Block, self.arena, self.current_hash[0..])) |current_block| { - self.current_hash = current_block.previous_hash; + if (txn.getAlloc(Block, self.arena, self.current_hash[0..])) |current_block| { + self.current_hash = current_block.previous_hash; - return current_block; - // return @ptrToInt(current_block); - } else |_| { - return null; + return current_block; + // return @ptrToInt(current_block); + } else |_| { + return null; + } } -} -pub fn print(chain_iter: *Iterator) void { - //TODO:work on converting hashes to Big endian which is usually the expected form for display - //improve the hex formating - info("starting blockchain iteration\n", .{}); - while (chain_iter.next()) |current_block| { - // const current_block = @intToPtr(*Block, block); - info("previous hash is '{X}'", .{fh(fmtHash(current_block.previous_hash)[0..])}); - info("hash of current block is '{X}'", .{fh(fmtHash(current_block.hash)[0..])}); - info("nonce is {}", .{current_block.nonce}); - info("POW: {}\n\n", .{current_block.validate()}); + pub fn print(chain_iter: *BlockIterator) void { + //TODO:work on converting hashes to Big endian which is usually the expected form for display + //improve the hex formating + info("starting blockchain iteration\n", .{}); + while (chain_iter.next()) |current_block| { + // const current_block = @intToPtr(*Block, block); + info("previous hash is '{X}'", .{fh(fmtHash(current_block.previous_hash)[0..])}); + info("hash of current block is '{X}'", .{fh(fmtHash(current_block.hash)[0..])}); + info("nonce is {}", .{current_block.nonce}); + info("POW: {}\n\n", .{current_block.validate()}); + } + info("done", .{}); } - info("done", .{}); -} +}; + // const Self = @This(); // const cast = @import("serializer.zig").cast; // diff --git a/src/Transaction.zig b/src/Transaction.zig index 4c34c75..bcfe8e8 100644 --- a/src/Transaction.zig +++ b/src/Transaction.zig @@ -68,10 +68,10 @@ pub const SUBSIDY = 10; //A coinbase transaction is a special type of transactions, which doesn’t require previously existing outputs. //This is the reward miners get for mining new blocks. -pub fn initCoinBaseTx(arena: Allocator, to: Wallets.Address, wallet_path: []const u8) Transaction { +pub fn initCoinBaseTx(arena: Allocator, miners_address: Wallets.Address, wallet_path: []const u8) Transaction { var inlist = InList{}; const wallets = Wallets.getWallets(arena, wallet_path); - const tos_wallet = wallets.getWallet(to); + const tos_wallet = wallets.getWallet(miners_address); inlist.append( arena, TxInput{ @@ -83,7 +83,7 @@ pub fn initCoinBaseTx(arena: Allocator, to: Wallets.Address, wallet_path: []cons ) catch unreachable; var outlist = OutList{}; - outlist.append(arena, TxOutput{ .value = SUBSIDY, .pub_key_hash = Wallet.getPubKeyHash(to) }) catch unreachable; + outlist.append(arena, TxOutput{ .value = SUBSIDY, .pub_key_hash = Wallet.getPubKeyHash(miners_address) }) catch unreachable; var tx = Transaction{ .id = undefined, .tx_in = inlist, .tx_out = outlist }; tx.setId(); @@ -101,9 +101,6 @@ pub fn initCoinBaseTx(arena: Allocator, to: Wallets.Address, wallet_path: []cons ///in order to sign a transaction, we need to access the outputs referenced in the inputs of the transaction , thus ///we need the transactions that store these outputs. `prev_txs` pub fn sign(self: *Transaction, wallet_keys: Wallet.KeyPair, prev_txs: PrevTxMap, fba: Allocator) void { - //Coinbase transactions are not signed because they don't contain real inputs - if (self.isCoinBaseTx()) return; - //A trimmed copy will be signed, not a full transaction: //The copy will include all the inputs and outputs, but TxInput.sig and TxInput.pub_key are empty var trimmed_tx_copy = self.trimmedCopy(fba); @@ -144,7 +141,7 @@ pub fn verify(self: Transaction, prev_txs: PrevTxMap, fba: Allocator) bool { if (Wallets.Ed25519.verify(value_in.sig, trimmed_tx_copy.id[0..], value_in.pub_key)) |_| {} else |err| { std.log.info("public key has a value of {}", .{value_in}); - std.log.err("{s} occured while verifying the transaction", .{@errorName(err)}); + std.log.err("{s} occurred while verifying the transaction", .{@errorName(err)}); return false; } } diff --git a/src/serializer.zig b/src/serializer.zig index b655383..f7893d4 100644 --- a/src/serializer.zig +++ b/src/serializer.zig @@ -20,15 +20,16 @@ pub fn serialize(data: anytype) [HASH_SIZE + @sizeOf(@TypeOf(data))]u8 { return serialized_data; } -fn getRawBytes(data: ?*anyopaque, start: usize, size: usize) []const u8 { - return @ptrCast([*]u8, data.?)[start..size]; +/// get bytes starting from `0` to `len` +pub fn getRawBytes(data: ?*anyopaque, len: usize) []const u8 { + return @ptrCast([*]const u8, data.?)[0..len]; } ///deserialize bytes representing data as `T` ///use when no allocation is required .ie data doesn't contain ptr or slice -pub fn deserialize(comptime T: type, data: ?*anyopaque, size: usize) T { +pub fn deserialize(comptime T: type, data: ?*anyopaque, len: usize) T { // return std.mem.bytesAsSlice(T, getBytes(data.?, size))[0]; - const serialized_data = getRawBytes(data, 0, size); + const serialized_data = getRawBytes(data, len); var fbr = std.io.fixedBufferStream(serialized_data); fbr.seekTo(0) catch unreachable; @@ -39,9 +40,9 @@ pub fn deserialize(comptime T: type, data: ?*anyopaque, size: usize) T { ///deserialize types with require allocation ///recommend you use a `fixedBufferAllocator` -pub fn deserializeAlloc(comptime T: type, fballocator: std.mem.Allocator, data: ?*anyopaque, size: usize) T { +pub fn deserializeAlloc(comptime T: type, fballocator: std.mem.Allocator, data: ?*anyopaque, len: usize) T { // return std.mem.bytesAsSlice(T, getBytes(data.?, size))[0]; - const serialized_data = getRawBytes(data, 0, size); + const serialized_data = getRawBytes(data, len); var fbr = std.io.fixedBufferStream(serialized_data); fbr.seekTo(0) catch unreachable; diff --git a/src/utils.zig b/src/utils.zig index 6588ebf..1336081 100644 --- a/src/utils.zig +++ b/src/utils.zig @@ -1,7 +1,3 @@ -pub const BLOCK_DB = "blocks"; - -pub const LAST = "last"; - pub fn fmtHash(hash: [32]u8) [32]u8 { const hash_int = @bitCast(u256, hash); const big_end_hash_int = @byteSwap(hash_int); From de43fd32fe35401f84c99b6e897465e80f6d4666 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Fri, 28 Oct 2022 11:48:27 +0000 Subject: [PATCH 03/24] Organize Exit codes --- src/Blockchain.zig | 22 ++++++++++++---------- src/Cli.zig | 7 +++++-- src/Wallets.zig | 11 ++++++++--- src/main.zig | 4 ++++ src/utils.zig | 8 ++++++++ 5 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/Blockchain.zig b/src/Blockchain.zig index 28d7254..3e24c7a 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -18,10 +18,10 @@ const utils = @import("utils.zig"); const Wallet = Wallets.Wallet; const Address = Wallets.Address; +const BlockIterator = Iterator.BlockIterator; const fmtHash = utils.fmtHash; -const BLOCK_DB = utils.BLOCK_DB; -const WALLET = "wallet.dat"; -const LAST = utils.LAST; +const ExitCodes = utils.ExitCodes; + const OutputIndex = usize; pub const TxMap = std.AutoHashMap(Transaction.TxID, OutputIndex); pub const Hash = [Blake3.digest_length]u8; @@ -46,7 +46,7 @@ pub fn getChain(db: Lmdb, arena: std.mem.Allocator) BlockChain { return .{ .last_hash = last_block_hash, .db = db, .arena = arena }; } else |_| { std.log.err("create a blockchain with creatchain command before using any other command", .{}); - std.process.exit(1); + std.process.exit(@enumToInt(ExitCodes.blockchain_not_found)); } } @@ -54,7 +54,7 @@ pub fn getChain(db: Lmdb, arena: std.mem.Allocator) BlockChain { pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) BlockChain { if (!Wallet.validateAddress(address)) { std.log.err("blockchain address {s} is invalid", .{address}); - std.process.exit(4); + std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); } var buf: [1024 * 6]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buf); @@ -68,8 +68,10 @@ pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) Bl txn.put(LAST, genesis_block.hash) catch |newchain_err| switch (newchain_err) { error.KeyAlreadyExist => { - std.log.err("Attempting to create new chain at an address '{s}' which already contains a chain", .{address}); - std.process.exit(1); + std.log.err("Attempting to create a new blockchain at address '{s}' while a blockchain already exist", .{ + address, + }); + std.process.exit(@enumToInt(ExitCodes.blockchain_already_exist)); }, else => unreachable, }; @@ -188,7 +190,7 @@ fn newUTx(self: BlockChain, amount: usize, from: Wallets.Address, to: Wallets.Ad if (accumulated_amount < amount) { std.log.err("not enough funds to transfer RBC {d} from '{s}' to '{s}'", .{ amount, from, to }); - std.process.exit(2); + std.process.exit(@enumToInt(ExitCodes.insufficient_wallet_balance)); } //Build a list of inputs @@ -324,11 +326,11 @@ pub fn sendValue(self: *BlockChain, amount: usize, from: Wallets.Address, to: Wa if (!Wallet.validateAddress(from)) { std.log.err("sender address {s} is invalid", .{from}); - std.process.exit(4); + std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); } if (!Wallet.validateAddress(to)) { std.log.err("recipient address {s} is invalid", .{to}); - std.process.exit(4); + std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); } var new_transaction = self.newUTx(amount, from, to); diff --git a/src/Cli.zig b/src/Cli.zig index cf5876a..d69944e 100644 --- a/src/Cli.zig +++ b/src/Cli.zig @@ -3,7 +3,10 @@ const BlockChain = @import("Blockchain.zig"); const Wallets = @import("Wallets.zig"); const Iterator = @import("Iterator.zig"); const Lmdb = @import("Lmdb.zig"); -const WALLET_STORAGE = "db/wallet.dat"; +const UTXOcache = @import("UTXOcache.zig"); +const ExitCodes = @import("utils.zig").ExitCodes; +const BlockIterator = Iterator.BlockIterator; +const WALLET_STORAGE = BlockChain.WALLET_STORAGE; const Cli = @This(); @@ -152,5 +155,5 @@ fn printUsage(cmd: Cmd) void { , .{}); }, } - std.process.exit(7); + std.process.exit(@enumToInt(ExitCodes.invalid_cli_argument)); } diff --git a/src/Wallets.zig b/src/Wallets.zig index 9ced82b..8216e20 100644 --- a/src/Wallets.zig +++ b/src/Wallets.zig @@ -6,6 +6,7 @@ const Blake2b160 = crypto.hash.blake2.Blake2b160; const base64 = std.base64; const serializer = @import("s2s"); +const ExitCodes = @import("utils.zig").ExitCodes; pub const ADDR_CKSUM_LEN = 4; //meaning 4 u8 values making up 32bit pub const PUB_KEY_HASH_LEN = Blake2b160.digest_length; @@ -65,7 +66,11 @@ pub fn getAddresses(wallets: Wallets) []const Address { ///get the wallet associated with this address pub fn getWallet(self: Wallets, address: Address) Wallet { - return self.wallets.get(address).?; + return self.wallets.get(address) orelse { + std.log.err("The wallet address specified '{s}' does not exit", .{address}); + std.log.err("Create a wallet with the 'createwallet' command", .{}); + std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + }; } ///load saved wallet data @@ -88,8 +93,8 @@ fn loadWallets(self: *Wallets) void { self.wallets.putNoClobber(wallet_key, wallet_value) catch unreachable; } } -//TODO: oraganize exit codes -//TODO: a way to efficiently save wallets .ie something like write only part which aren't already in the file +//TODO: a way to efficiently save wallets .ie something like write only part which aren't already in the file or use +//the db so that we can keep track the last wallet address and iterate from that point and store the values ///save wallets to `wallet_path` field fn saveWallets(self: Wallets) void { const file = std.fs.cwd().openFile(self.wallet_path, .{ .mode = .write_only }) catch |err| switch (err) { diff --git a/src/main.zig b/src/main.zig index c8e21cc..2da1125 100644 --- a/src/main.zig +++ b/src/main.zig @@ -9,6 +9,10 @@ const gpa = if (builtin.link_libc and builtin.mode != .Debug) else default_allocator.allocator(); +// TODO: improve memory usage and recycling at appropiate places. +// set buffers in local scope based on the sizeof the struct or types stored or allocated +//TODO: rethink allocations and memory management pattern used,maybe pass the allocator type so you can free memory +//if the data generated at the step won't be used again or isn't useful again pub fn main() !void { defer if (builtin.mode == .Debug) { _ = default_allocator.deinit(); diff --git a/src/utils.zig b/src/utils.zig index 1336081..6c25e64 100644 --- a/src/utils.zig +++ b/src/utils.zig @@ -3,3 +3,11 @@ pub fn fmtHash(hash: [32]u8) [32]u8 { const big_end_hash_int = @byteSwap(hash_int); return @bitCast([32]u8, big_end_hash_int); } + +pub const ExitCodes = enum { + blockchain_not_found, + blockchain_already_exist, + invalid_wallet_address, + insufficient_wallet_balance, + invalid_cli_argument, +}; From afaa4173991b123b6655c877222ee43b7c0f5b19 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Fri, 6 Jan 2023 17:56:07 +0000 Subject: [PATCH 04/24] Fix flag changes not getting commited when using named dbs --- deps/lmdb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/lmdb b/deps/lmdb index 1b113f2..8c2cba5 160000 --- a/deps/lmdb +++ b/deps/lmdb @@ -1 +1 @@ -Subproject commit 1b113f2c94940bedbe86332f0c802db031814749 +Subproject commit 8c2cba525dfc0ad034349929c0520c5788bc4604 From 5e1cb9d10b4b1ca39bb6db4de6a73a32b7304de1 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Fri, 6 Jan 2023 18:01:23 +0000 Subject: [PATCH 05/24] Update submodules Use official s2s instead of my clone This is because after the changes to packed struct in the latest version of zig my changes aren't relevant anymore update lmdb with fix for committing flag changes when using named dbs --- .gitmodules | 4 ++-- deps/lmdb | 2 +- deps/s2s | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitmodules b/.gitmodules index eb7dc1a..abe570c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "deps/s2s"] path = deps/s2s - url = https://github.com/Ultra-Code/s2s + url = git@github.com:ziglibs/s2s.git [submodule "deps/lmdb"] path = deps/lmdb - url = git@github.com:LMDB/lmdb.git + url = git@github.com:Ultra-Code/lmdb.git diff --git a/deps/lmdb b/deps/lmdb index 8c2cba5..ce001a3 160000 --- a/deps/lmdb +++ b/deps/lmdb @@ -1 +1 @@ -Subproject commit 8c2cba525dfc0ad034349929c0520c5788bc4604 +Subproject commit ce001a311d8fb16afbf13df2a1e21d505cb477cb diff --git a/deps/s2s b/deps/s2s index b58025d..8fc312f 160000 --- a/deps/s2s +++ b/deps/s2s @@ -1 +1 @@ -Subproject commit b58025d5097be8ec29a3ae91c23347702c312d50 +Subproject commit 8fc312f40b178f1493d1713a57b6abde856bd5e6 From 857d584abca0655dd5b08be021573cbc4ce3e581 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Wed, 8 Feb 2023 13:59:50 +0000 Subject: [PATCH 06/24] Update build.zig to use zig's new api --- build.zig | 65 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/build.zig b/build.zig index acd34d8..e1542eb 100644 --- a/build.zig +++ b/build.zig @@ -1,20 +1,18 @@ const std = @import("std"); const builtin = @import("builtin"); -const Pkg = std.build.Pkg; -const pkgs = struct { - const s2s = Pkg{ - .name = "s2s", - .source = .{ .path = "./deps/s2s/s2s.zig" }, - .dependencies = &[_]Pkg{}, - }; -}; -const LMDB_PATH = "./deps/lmdb/libraries/liblmdb/"; +const Build = std.Build; -pub fn build(b: *std.build.Builder) void { - comptime { - //Big endian systems not currently supported - std.debug.assert(builtin.target.cpu.arch.endian() == .Little); - } +comptime { + //Big endian systems not currently supported + std.debug.assert(builtin.target.cpu.arch.endian() == .Little); +} + +pub fn build(b: *Build) void { + const s2s_module = b.createModule(.{ + .source_file = .{ .path = "./deps/s2s/s2s.zig" }, + }); + + const LMDB_PATH = "./deps/lmdb/libraries/liblmdb/"; // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. @@ -22,31 +20,41 @@ pub fn build(b: *std.build.Builder) void { // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. - const mode = b.standardReleaseOptions(); + const optimize = b.standardOptimizeOption(.{}); //Add lmdb library for embeded key/value store const cflags = [_][]const u8{ "-pthread", "-std=c2x" }; const lmdb_sources = [_][]const u8{ LMDB_PATH ++ "mdb.c", LMDB_PATH ++ "midl.c" }; - const lmdb = b.addStaticLibrary("lmdb", null); - lmdb.setTarget(target); - lmdb.setBuildMode(mode); + const lmdb = b.addStaticLibrary(.{ + .name = "lmdb", + .target = target, + .optimize = optimize, + }); lmdb.addCSourceFiles(&lmdb_sources, &cflags); lmdb.linkLibC(); lmdb.install(); const target_name = target.allocDescription(b.allocator) catch unreachable; - const exe_name = std.fmt.allocPrint(b.allocator, "{[program]s}-{[target]s}", .{ .program = "recblock", .target = target_name }) catch unreachable; + const exe_name = std.fmt.allocPrint(b.allocator, "{[program]s}-{[target]s}", .{ + .program = "recblock", + .target = target_name, + }) catch unreachable; - const exe = b.addExecutable(exe_name, "src/main.zig"); - exe.setTarget(target); - exe.setBuildMode(mode); - exe.addPackage(pkgs.s2s); + const exe = b.addExecutable(.{ + .name = exe_name, + .root_source_file = .{ + .path = "src/main.zig", + }, + .target = target, + .optimize = optimize, + }); + exe.addModule("s2s", s2s_module); exe.linkLibrary(lmdb); exe.addIncludePath(LMDB_PATH); exe.install(); exe.stack_size = 1024 * 1024 * 64; - switch (mode) { + switch (optimize) { .ReleaseFast => { lmdb.link_function_sections = true; lmdb.red_zone = true; @@ -74,10 +82,11 @@ pub fn build(b: *std.build.Builder) void { run_step.dependOn(&lmdb.step); run_step.dependOn(&run_cmd.step); - const exe_tests = b.addTest("src/main.zig"); - exe_tests.setTarget(target); - exe_tests.setBuildMode(mode); - exe_tests.addPackage(pkgs.s2s); + const exe_tests = b.addTest(.{ + .root_source_file = .{ .path = "src/main.zig" }, + .target = target, + }); + exe_tests.addModule("s2s", s2s_module); exe_tests.linkLibrary(lmdb); exe_tests.addIncludePath(LMDB_PATH); From 693bbac33344200ef54865b41288aa9bb2d647e3 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 7 May 2023 07:25:08 +0000 Subject: [PATCH 07/24] feat(s2s): add s2s as a zig package Add the required build.zig.zon file Update build.zig to use the new install and run artifact api --- .gitmodules | 3 --- build.zig | 10 ++++------ build.zig.zon | 10 ++++++++++ deps/s2s | 1 - 4 files changed, 14 insertions(+), 10 deletions(-) create mode 100644 build.zig.zon delete mode 160000 deps/s2s diff --git a/.gitmodules b/.gitmodules index abe570c..4283423 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "deps/s2s"] - path = deps/s2s - url = git@github.com:ziglibs/s2s.git [submodule "deps/lmdb"] path = deps/lmdb url = git@github.com:Ultra-Code/lmdb.git diff --git a/build.zig b/build.zig index e1542eb..ac80905 100644 --- a/build.zig +++ b/build.zig @@ -8,9 +8,7 @@ comptime { } pub fn build(b: *Build) void { - const s2s_module = b.createModule(.{ - .source_file = .{ .path = "./deps/s2s/s2s.zig" }, - }); + const s2s_module = b.dependency("s2s", .{}).module("s2s"); const LMDB_PATH = "./deps/lmdb/libraries/liblmdb/"; // what target to build for. Here we do not override the defaults, which @@ -32,7 +30,7 @@ pub fn build(b: *Build) void { }); lmdb.addCSourceFiles(&lmdb_sources, &cflags); lmdb.linkLibC(); - lmdb.install(); + b.installArtifact(lmdb); const target_name = target.allocDescription(b.allocator) catch unreachable; const exe_name = std.fmt.allocPrint(b.allocator, "{[program]s}-{[target]s}", .{ @@ -51,7 +49,7 @@ pub fn build(b: *Build) void { exe.addModule("s2s", s2s_module); exe.linkLibrary(lmdb); exe.addIncludePath(LMDB_PATH); - exe.install(); + b.installArtifact(exe); exe.stack_size = 1024 * 1024 * 64; switch (optimize) { @@ -72,7 +70,7 @@ pub fn build(b: *Build) void { }, else => {}, } - const run_cmd = exe.run(); + const run_cmd = b.addRunArtifact(exe); run_cmd.step.dependOn(b.getInstallStep()); if (b.args) |args| { run_cmd.addArgs(args); diff --git a/build.zig.zon b/build.zig.zon new file mode 100644 index 0000000..dfe20bc --- /dev/null +++ b/build.zig.zon @@ -0,0 +1,10 @@ +.{ + .name = "recblock", + .version = "0.1.0", + .dependencies = .{ + .s2s = .{ + .url = "https://github.com/ziglibs/s2s/archive/6484f786b1830115601cd9ffb51bccec551ac6be.tar.gz", + .hash = "1220f6c17bde3f75ac79f29b5572f75fc35275b552b5b398d42df133fa038a29b396", + }, + }, +} diff --git a/deps/s2s b/deps/s2s deleted file mode 160000 index 8fc312f..0000000 --- a/deps/s2s +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8fc312f40b178f1493d1713a57b6abde856bd5e6 From 59261dda65b6ac1e8b9fa5a3179947d40c9c1152 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 7 May 2023 21:08:29 +0000 Subject: [PATCH 08/24] Overhaul of Lmdb, Blockchain module Initial implementation of UTXOcache and it integration with Blockchain Overhaul of mdb_put related operations Separate start transaction and openDb actions Improve API of startTxn and openDb with insight from lmdb bug report Refactor and make put, update, remove and del operations consistent with the help of options and DRY principles Add logic for specifying the appropriate insert flags add DbFlags struct to simplify dbflags related operations Make some declaration public so the can be used in other modules Improve ensureValidState assertions Rename some utility functions dbKey and dbValue Improve TxnType Add personal crafted test from my local copy of s2s Set main fba buffer size to 7MiB Add some todos Use new @memcpy builtin syntax --- src/Blockchain.zig | 166 +++++++---------- src/Lmdb.zig | 455 ++++++++++++++++++++++++++++++++++++--------- src/UTXOcache.zig | 213 +++++++++++++++++++++ src/main.zig | 5 +- src/serializer.zig | 10 +- 5 files changed, 650 insertions(+), 199 deletions(-) create mode 100644 src/UTXOcache.zig diff --git a/src/Blockchain.zig b/src/Blockchain.zig index 3e24c7a..e5e273c 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -15,6 +15,7 @@ const Lmdb = @import("Lmdb.zig"); const Iterator = @import("Iterator.zig"); const Wallets = @import("Wallets.zig"); const utils = @import("utils.zig"); +const UTXOcache = @import("UTXOcache.zig"); const Wallet = Wallets.Wallet; const Address = Wallets.Address; @@ -30,19 +31,21 @@ pub const BLOCK_DB = "blocks"; pub const LAST = "last"; pub const WALLET_STORAGE = "db/wallet.dat"; -//READ: https://en.bitcoin.it/wiki/Block_hashing_algorithm https://en.bitcoin.it/wiki/Proof_of_work https://en.bitcoin.it/wiki/Hashcash +//READ: https://en.bitcoin.it/wiki/Block_hashing_algorithm +//https://en.bitcoin.it/wiki/Proof_of_work https://en.bitcoin.it/wiki/Hashcash last_hash: Hash, db: Lmdb, arena: std.mem.Allocator, -wallet_path: []const u8, //TODO:organise and document exit codes -pub fn getChain(db: Lmdb, arena: std.mem.Allocator) BlockChain { - const txn = db.startTxn(.rw, BLOCK_DB); - defer txn.commitTxns(); +pub fn getChain(lmdb: Lmdb, arena: std.mem.Allocator) BlockChain { + const txn = lmdb.startTxn(); - if (txn.get(Hash, LAST)) |last_block_hash| { + const db = lmdb.openDb(txn, BLOCK_DB); + defer db.commitTxns(); + + if (db.get(Hash, LAST)) |last_block_hash| { return .{ .last_hash = last_block_hash, .db = db, .arena = arena }; } else |_| { std.log.err("create a blockchain with creatchain command before using any other command", .{}); @@ -51,7 +54,7 @@ pub fn getChain(db: Lmdb, arena: std.mem.Allocator) BlockChain { } ///create a new BlockChain -pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) BlockChain { +pub fn newChain(lmdb: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) BlockChain { if (!Wallet.validateAddress(address)) { std.log.err("blockchain address {s} is invalid", .{address}); std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); @@ -63,10 +66,13 @@ pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) Bl const coinbase_tx = Transaction.initCoinBaseTx(allocator, address, WALLET_STORAGE); const genesis_block = Block.genesisBlock(allocator, coinbase_tx); - const txn = db.startTxn(.rw, BLOCK_DB); - defer txn.commitTxns(); + const txn = lmdb.startTxn(); + + lmdb.setDbOpt(txn, .{ .rw = true, .dup = true }, BLOCK_DB); + const db = lmdb.openDb(txn, BLOCK_DB); + defer db.commitTxns(); - txn.put(LAST, genesis_block.hash) catch |newchain_err| switch (newchain_err) { + db.put(LAST, genesis_block.hash) catch |newchain_err| switch (newchain_err) { error.KeyAlreadyExist => { std.log.err("Attempting to create a new blockchain at address '{s}' while a blockchain already exist", .{ address, @@ -75,14 +81,13 @@ pub fn newChain(db: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) Bl }, else => unreachable, }; - txn.putAlloc(allocator, WALLET, wallet_path) catch unreachable; - txn.putAlloc(allocator, genesis_block.hash[0..], genesis_block) catch unreachable; + db.putAlloc(allocator, genesis_block.hash[0..], genesis_block) catch unreachable; info("new blockchain is create with address '{s}'\nhash of the created blockchain is '{X}'", .{ address, fh(fmtHash(genesis_block.hash)[0..]), }); - info("You get a reward of RBC {d} for mining the coinbase transaction", .{Transaction.SUBSIDY}); + info("You get a reward of RBC {d} for mining the transaction", .{Transaction.SUBSIDY}); return .{ .last_hash = genesis_block.hash, .db = db, .arena = arena }; } @@ -102,33 +107,34 @@ pub fn mineBlock(bc: *BlockChain, transactions: []const Transaction) Block { assert(new_block.validate() == true); - const txn = bc.db.startTxn(.rw, BLOCK_DB); - defer txn.commitTxns(); + const txn = bc.db.startTxn(); + const db = bc.db.openDb(txn, BLOCK_DB); + defer db.commitTxns(); - txn.putAlloc(allocator, new_block.hash[0..], new_block) catch unreachable; - txn.update(LAST, new_block.hash) catch unreachable; + db.putAlloc(allocator, new_block.hash[0..], new_block) catch unreachable; + db.update(LAST, new_block.hash) catch unreachable; bc.last_hash = new_block.hash; return new_block; } -///find unspent transactions +///find all unspent transactions and map them with their Transaction.TxID //TODO: add test for *UTX* and Tx Output fn's -fn findUTxs(bc: BlockChain, pub_key_hash: Wallets.PublicKeyHash) []const Transaction { +pub fn findAndMapAllTxIDsToUTxOs(bc: BlockChain) std.AutoArrayHashMap(Transaction.TxID, []const Transaction.TxOutput) { //TODO: find a way to cap the max stack usage //INITIA_IDEA: copy relevant data and free blocks var buf: [1024 * 950]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(buf[0..]); const allocator = fba.allocator(); - var unspent_txos = std.ArrayList(Transaction).init(bc.arena); + var unspent_txos = std.AutoArrayHashMap(Transaction.TxID, []const Transaction.TxOutput).init(bc.arena); var spent_txos = TxMap.init(allocator); - var bc_itr = Iterator.iterator(allocator, bc.db, bc.last_hash); + var bc_itr = BlockIterator.iterator(bc.arena, bc.db, bc.last_hash); while (bc_itr.next()) |block| { for (block.transactions.items) |tx| { - output: for (tx.tx_out.items) |txoutput, txindex| { + output: for (tx.tx_out.items, 0..) |txoutput, txindex| { //was the output spent? We skip those that were referenced in inputs (their values were moved to //other outputs, thus we cannot count them) if (spent_txos.get(tx.id)) |spent_output_index| { @@ -139,8 +145,16 @@ fn findUTxs(bc: BlockChain, pub_key_hash: Wallets.PublicKeyHash) []const Transac //If an output was locked by the same pub_key_hash we’re searching unspent transaction outputs for, //then this is the output we want - if (txoutput.isLockedWithKey(pub_key_hash)) { - unspent_txos.append(tx) catch unreachable; + // if (txoutput.isLockedWithKey(pub_key_hash)) { + + // unspent_txos.append(tx) catch unreachable; + // } + if (unspent_txos.get(tx.id)) |output| { + const outputs = std.mem.concat(bc.arena, Transaction.TxOutput, &.{ output, &.{txoutput} }) catch unreachable; + unspent_txos.putNoClobber(tx.id, outputs) catch unreachable; + } else { + const txoutput_copy = bc.arena.dupe(Transaction.TxOutput, &.{txoutput}) catch unreachable; + unspent_txos.putNoClobber(tx.id, txoutput_copy) catch unreachable; } } @@ -148,9 +162,10 @@ fn findUTxs(bc: BlockChain, pub_key_hash: Wallets.PublicKeyHash) []const Transac //to coinbase transactions, since they don’t unlock outputs) if (!tx.isCoinBaseTx()) { for (tx.tx_in.items) |txinput| { - if (txinput.usesKey(pub_key_hash)) { - spent_txos.putNoClobber(txinput.out_id, txinput.out_index) catch unreachable; - } + // if (txinput.usesKey(pub_key_hash)) { + // spent_txos.putNoClobber(txinput.out_id, txinput.out_index) catch unreachable; + // } + spent_txos.putNoClobber(txinput.out_id, txinput.out_index) catch unreachable; } } } @@ -159,34 +174,33 @@ fn findUTxs(bc: BlockChain, pub_key_hash: Wallets.PublicKeyHash) []const Transac break; } } - return unspent_txos.toOwnedSlice(); -} -///find unspent transaction outputs -fn findUTxOs(self: BlockChain, pub_key_hash: Wallets.PublicKeyHash) []const Transaction.TxOutput { - var tx_output_list = std.ArrayList(Transaction.TxOutput).init(self.arena); + return unspent_txos; +} - const unspent_txs = self.findUTxs(pub_key_hash); +///finds a transaction by its ID.This is used to build the `PrevTxMap` +fn findTx(self: BlockChain, tx_id: Transaction.TxID) Transaction { + var itr = BlockIterator.iterator(self.arena, self.db, self.last_hash); - for (unspent_txs) |tx| { - for (tx.tx_out.items) |output| { - if (output.isLockedWithKey(pub_key_hash)) { - tx_output_list.append(output) catch unreachable; - } + while (itr.next()) |block| { + for (block.transactions.items) |tx| { + if (std.mem.eql(u8, tx.id[0..], tx_id[0..])) return tx; } + if (block.previous_hash[0] == '\x00') break; } - return tx_output_list.toOwnedSlice(); + unreachable; } ///create a new Transaction by moving value from one address to another -fn newUTx(self: BlockChain, amount: usize, from: Wallets.Address, to: Wallets.Address) Transaction { +fn newUTx(self: BlockChain, utxo_cache: UTXOcache, amount: usize, from: Wallets.Address, to: Wallets.Address) Transaction { var input = std.ArrayListUnmanaged(Transaction.TxInput){}; var output = std.ArrayListUnmanaged(Transaction.TxOutput){}; //Before creating new outputs, we first have to find all unspent outputs and ensure that they store enough value. - const spendable_txns = self.findSpendableOutputs(Wallet.getPubKeyHash(from), amount); + const spendable_txns = utxo_cache.findSpendableOutputs(Wallet.getPubKeyHash(from), amount); const accumulated_amount = spendable_txns.accumulated_amount; var unspent_output = spendable_txns.unspent_output; + std.log.debug("spendable amount is {d}", .{accumulated_amount}); if (accumulated_amount < amount) { std.log.err("not enough funds to transfer RBC {d} from '{s}' to '{s}'", .{ amount, from, to }); @@ -230,48 +244,6 @@ fn newUTx(self: BlockChain, amount: usize, from: Wallets.Address, to: Wallets.Ad return newtx; } -fn findSpendableOutputs(self: BlockChain, pub_key_hash: Wallets.PublicKeyHash, amount: usize) struct { - accumulated_amount: usize, - unspent_output: TxMap, -} { - var unspent_output = TxMap.init(self.arena); - - const unspentTxs = self.findUTxs(pub_key_hash); - - var accumulated_amount: usize = 0; - - // //The method iterates over all unspent transactions and accumulates their values. - spendables: for (unspentTxs) |tx| { - //When the accumulated value is more or equals to the amount we want to transfer, it stops and returns the - //accumulated value and output indices grouped by transaction IDs. We don’t want to take more than we’re going to spend. - for (tx.tx_out.items) |output, out_index| { - if (output.isLockedWithKey(pub_key_hash) and accumulated_amount < amount) { - accumulated_amount += output.value; - unspent_output.putNoClobber(tx.id, out_index) catch unreachable; - - if (accumulated_amount >= amount) { - break :spendables; - } - } - } - } - - return .{ .accumulated_amount = accumulated_amount, .unspent_output = unspent_output }; -} - -///finds a transaction by its ID.This is used to build the `PrevTxMap` -fn findTx(self: BlockChain, tx_id: Transaction.TxID) Transaction { - var itr = Iterator.iterator(self.arena, self.db, self.last_hash); - - while (itr.next()) |block| { - for (block.transactions.items) |tx| { - if (std.mem.eql(u8, tx.id[0..], tx_id[0..])) return tx; - } - if (block.previous_hash[0] == '\x00') break; - } - unreachable; -} - ///take a transaction `tx` finds all previous transactions it references and sign it with KeyPair `wallet_keys` fn signTx(self: BlockChain, tx: *Transaction, wallet_keys: Wallet.KeyPair) void { //Coinbase transactions are not signed because they don't contain real inputs @@ -306,20 +278,6 @@ fn verifyTx(self: BlockChain, tx: Transaction) bool { return tx.verify(prev_txs, fba.allocator()); } -pub fn getBalance(self: BlockChain, address: Wallets.Address) usize { - if (!Wallet.validateAddress(address)) { - std.log.err("address {s} is invalid", .{address}); - std.process.exit(4); - } - var balance: usize = 0; - const utxos = self.findUTxOs(Wallet.getPubKeyHash(address)); - - for (utxos) |utxo| { - balance += utxo.value; - } - return balance; -} - pub fn sendValue(self: *BlockChain, amount: usize, from: Wallets.Address, to: Wallets.Address) void { assert(amount > 0); assert(!std.mem.eql(u8, &from, &to)); @@ -332,12 +290,20 @@ pub fn sendValue(self: *BlockChain, amount: usize, from: Wallets.Address, to: Wa std.log.err("recipient address {s} is invalid", .{to}); std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); } - var new_transaction = self.newUTx(amount, from, to); - - self.mineBlock(&.{new_transaction}); + const cache = UTXOcache.init(self.db, self.arena); + var new_transaction = self.newUTx(cache, amount, from, to); + //The reward is just a coinbase transaction. When a mining node starts mining a new block, + //it takes transactions from the queue and prepends a coinbase transaction to them. + //The coinbase transaction’s only output contains miner’s public key hash. + //In this implementation, the one who creates a transaction mines the new block, and thus, receives a reward. + const rewardtx = Transaction.initCoinBaseTx(self.arena, from, WALLET_STORAGE); + const block = self.mineBlock(&.{ rewardtx, new_transaction }); + + cache.update(block); } test "getBalance , sendValue" { + if (true) return error.SkipZigTest; var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); diff --git a/src/Lmdb.zig b/src/Lmdb.zig index d33ec4f..7991274 100644 --- a/src/Lmdb.zig +++ b/src/Lmdb.zig @@ -1,5 +1,5 @@ -const mdb = struct { - usingnamespace @cImport({ +pub const mdb = struct { + pub usingnamespace @cImport({ @cInclude("lmdb.h"); }); }; @@ -14,24 +14,35 @@ const err = std.os.E; pub const Lmdb = @This(); const serializer = @import("serializer.zig"); -const BLOCK_DB = @import("utils.zig").BLOCK_DB; +const BLOCK_DB = @import("Blockchain.zig").BLOCK_DB; const Env = mdb.MDB_env; -const Key = mdb.MDB_val; -const Val = mdb.MDB_val; +pub const Key = mdb.MDB_val; +pub const Val = mdb.MDB_val; const Txn = mdb.MDB_txn; const DbHandle = mdb.MDB_dbi; -const TxnType = enum { rw, ro }; +//TODO: since we can get this from the environment dont store it in this struct +///Special options for this environment +pub const TxnType = enum(c_uint) { + ///Use a writeable memory map unless MDB_RDONLY is set. This is faster and uses fewer mallocs, + //but loses protection from application bugs like wild pointer writes and other bad updates into the database. + rw = mdb.MDB_WRITEMAP, + ///Open the environment in read-only mode. No write operations will be allowed. + //LMDB will still modify the lock file - except on read-only filesystems, where LMDB does not use locks. + ro = mdb.MDB_RDONLY, +}; + db_env: *Env, txn: ?*Txn = null, txn_type: TxnType, -db_handle: DbHandle = undefined, +db_handle: DbHandle = std.math.maxInt(c_uint), ///`db_path` is the directory in which the database files reside. This directory must already exist and be writable. -///initialize db environment (mmap file) specifing the db mode `.rw/.ro` -///make sure to start a transaction .ie startTxn() fn before calling any db manipulation fn's -///a maximum of two named db's are allowed +/// `initdb` fn initializes the db environment (mmap file) specifing the db mode `.rw/.ro`. +///Make sure to start a transaction .ie startTxn() fn before calling any db manipulation fn's +///A maximum of two named db's are allowed +///if the environment is opened in read-only mode No write operations will be allowed. pub fn initdb(db_path: []const u8, txn_type: TxnType) Lmdb { var db_env: ?*Env = undefined; const env_state = mdb.mdb_env_create(&db_env); @@ -41,8 +52,7 @@ pub fn initdb(db_path: []const u8, txn_type: TxnType) Lmdb { const db_limit_state = mdb.mdb_env_set_maxdbs(db_env, max_num_of_dbs); checkState(db_limit_state) catch unreachable; - //if .ro open the environment in read-only mode. No write operations will be allowed. - const db_flags: c_uint = if (txn_type == .ro) mdb.MDB_RDONLY else 0; + const db_flags = @enumToInt(txn_type); const permissions: c_uint = 0o0600; //octal permissions for created files in db_dir const open_state = mdb.mdb_env_open(db_env, db_path.ptr, db_flags, permissions); checkState(open_state) catch |open_err| switch (open_err) { @@ -65,87 +75,233 @@ pub fn deinitdb(lmdb: Lmdb) void { mdb.mdb_env_close(lmdb.db_env); } +pub const DbTxnOption = packed struct { + ///Create the named database if it doesn't exist. + ///This option is not allowed in a read-only transaction or a read-only environment. + rw: bool, + ///Duplicate keys may be used in the database. + ///(Or, from another perspective, keys may have multiple data items, stored in sorted order.) + dup: bool = false, +}; + ///start a transaction in rw/ro mode and get a db handle for db manipulation -///commit changes with commitTxns() if .rw / doneReading() if .ro -pub fn startTxn(lmdb: Lmdb, txn_type: TxnType, db_name: []const u8) Lmdb { - const txn = beginTxn(lmdb, txn_type); - const handle = openDb(.{ .db_env = lmdb.db_env, .txn = txn, .txn_type = txn_type }, db_name); +///commit changes with commitTxns() if .rw else doneReading() if .ro +pub fn startTxn(lmdb: Lmdb) *Txn { + // This transaction will not perform any write operations if ro. + const flags: c_uint = if (lmdb.txn_type == .ro) mdb.MDB_RDONLY else 0; + const parent = null; //no parent + var txn: ?*Txn = undefined; //where the new #MDB_txn handle will be stored + const txn_state = mdb.mdb_txn_begin(lmdb.db_env, parent, flags, &txn); + checkState(txn_state) catch unreachable; + + return txn.?; +} + +//TODO:maybe support other flags for db like MDB_DUPSORT && MDB_DUPFIXED +//A single transaction can open multiple databases +pub fn setDbOpt(lmdb: Lmdb, db_txn: *Txn, db_txn_option: DbTxnOption, comptime db_name: []const u8) void { + //Create the named database if it doesn't exist. + //This option is not allowed in a read-only transaction or a read-only environment + var db_flags: c_uint = 0; + if (lmdb.txn_type == .rw and db_txn_option.rw) { + db_flags |= mdb.MDB_CREATE; + } else if (lmdb.txn_type == .ro and db_txn_option.rw) { + @panic("Can't create a new database " ++ db_name ++ " in a read-only environment or transaction"); + } + + if (db_txn_option.dup) db_flags |= mdb.MDB_DUPSORT; + + var db_handle: mdb.MDB_dbi = undefined; //dbi Address where the new #MDB_dbi handle will be stored + const db_state = mdb.mdb_dbi_open(db_txn, db_name.ptr, db_flags, &db_handle); + checkState(db_state) catch unreachable; +} + +//from https://bugs.openldap.org/show_bug.cgi?id=10005 +//The persistent flags you specified when the DB was created +//are stored in the DB record and retrieved when the DB is opened. +//Flags specified to mdb_dbi_open at any other time are ignored. +pub fn openDb(lmdb: Lmdb, db_txn: *Txn, comptime db_name: []const u8) Lmdb { + var db_handle: mdb.MDB_dbi = undefined; //dbi Address where the new #MDB_dbi handle will be stored + const DEFAULT_FLAGS = 0; + const db_state = mdb.mdb_dbi_open(db_txn, db_name.ptr, DEFAULT_FLAGS, &db_handle); + checkState(db_state) catch unreachable; return .{ .db_env = lmdb.db_env, - .txn = txn, - .txn_type = txn_type, - .db_handle = handle, + .txn = db_txn, + .txn_type = lmdb.txn_type, + .db_handle = db_handle, }; } - +//TODO: make openDb consistent with setDbOpt and openDb ///open a different db in an already open transaction -pub fn openNewDb(lmdb: Lmdb, db_name: []const u8) Lmdb { +pub fn openNewDb(lmdb: Lmdb, db_txn_option: DbTxnOption, db_name: []const u8) Lmdb { //make sure a transaction has been created already ensureValidState(lmdb); - const handle = openDb(lmdb, db_name); + const handle = openDb(lmdb, db_txn_option, db_name); return .{ - .db_handle = lmdb.db_env, + .db_env = lmdb.db_env, .txn = lmdb.txn.?, .txn_type = lmdb.txn_type, .db_handle = handle, }; } -fn beginTxn(lmdb: Lmdb, txn_type: TxnType) *Txn { - // This transaction will not perform any write operations if ro. - const flags: c_uint = if (txn_type == .ro) mdb.MDB_RDONLY else 0; - if (flags != mdb.MDB_RDONLY and lmdb.txn_type == .ro) { - panic("Cannot begin a read-write transaction in a read-only environment", .{}); +pub inline fn ensureValidState(lmdb: Lmdb) void { + assert(lmdb.txn != null); + assert(lmdb.db_handle != std.math.maxInt(c_uint)); +} + +const DeleteAction = enum { + //when there are no duplicates .ie MDB_DUPSORT isn't enabled + single, + //when MDB_DUPSORT is enabled and you want to delete a specific duplicate key/value pair + exact, + //when MDB_DUPSORT is enabled, delete all key/value pairs that match the key + all, +}; +///delete entry in db with key `key` +///Use If the database supports sorted duplicate data items (MDB_DUPSORT) else the data parameter is ignored. +///because If the database supports sorted duplicates and the data parameter is NULL, all of the duplicate data items +///for the key will be deleted. While, if the data parameter is non-NULL only the matching data item will be deleted. +pub fn del(lmdb: Lmdb, key: []const u8, comptime del_opt: DeleteAction, data: anytype) !void { + ensureValidState(lmdb); + var del_key = dbKey(key); + + switch (del_opt) { + .exact => { + const db_flags = try DbFlags.flags(lmdb); + if (db_flags.isDupSorted()) { + const serialized_data = serializer.serialize(data); + var del_data = dbValue(serialized_data); + + const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &del_key, &del_data); + try checkState(del_state); + } else unreachable; + }, + .all, .single => { + const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &del_key, null); + try checkState(del_state); + }, } - const parent = null; //no parent - var txn: ?*Txn = undefined; //where the new #MDB_txn handle will be stored - const txn_state = mdb.mdb_txn_begin(lmdb.db_env, parent, flags, &txn); - checkState(txn_state) catch unreachable; +} - return txn.?; +///for the special case of deleting an exact item where the data contains slices +pub fn delDupsAlloc(lmdb: Lmdb, allocator: std.mem.Allocator, key: []const u8, data: anytype) !void { + ensureValidState(lmdb); + //This function will return MDB_NOTFOUND if the specified key/data pair is not in the database. + const db_flags = try DbFlags.flags(lmdb); + if (db_flags.isDupSorted()) { + var del_key = dbKey(key); + const serialized_data = serializer.serializeAlloc(allocator, data); + var del_data = dbValue(serialized_data); + + const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &del_key, &del_data); + try checkState(del_state); + } else unreachable; } -//TODO:may be support other flags for db like MDB_DUPSORT && MDB_DUPFIXED -fn openDb(lmdb: Lmdb, db_name: []const u8) DbHandle { - //Create the named database if it doesn't exist. This option is not allowed in a read-only transaction or a read-only environment. - const db_flags: c_uint = if (lmdb.txn_type == .rw) mdb.MDB_CREATE else 0; +const RemoveAction = enum(u1) { + empty, + delete_and_close, +}; - var db_handle: mdb.MDB_dbi = undefined; //dbi Address where the new #MDB_dbi handle will be stored - const db_state = mdb.mdb_dbi_open(lmdb.txn.?, db_name.ptr, db_flags, &db_handle); - checkState(db_state) catch unreachable; - return db_handle; +inline fn remove(lmdb: Lmdb, action: RemoveAction) void { + //0 to empty the DB, 1 to delete it from the environment and close the DB handle. + const empty_db_state = mdb.mdb_drop(lmdb.txn.?, lmdb.db_handle, @enumToInt(action)); + checkState(empty_db_state) catch unreachable; } -inline fn ensureValidState(lmdb: Lmdb) void { - assert(lmdb.txn != null); - assert(lmdb.db_handle != undefined); +///Empty the DB `db_name` +pub fn emptyDb(lmdb: Lmdb) void { + ensureValidState(lmdb); + + remove(lmdb, .empty); } -///delete entry in db with key `key_val` -///if db was opened with MDB_DUPSORT use `delDups` instead -pub fn del(lmdb: Lmdb, key_val: []const u8) !void { +/// Delete db `db_name` from the environment and close the DB handle. +pub fn delDb(lmdb: Lmdb) void { ensureValidState(lmdb); - const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &key(key_val), null); - try checkState(del_state); -} -///Use If the database supports sorted duplicate data items (MDB_DUPSORT) else the data parameter is ignored. -///because If the database supports sorted duplicates and the data parameter is NULL, all of the duplicate data items -///for the key will be deleted. While, if the data parameter is non-NULL only the matching data item will be deleted. -pub fn delDups(_: Lmdb, _: []const u8, _: anytype) void { - //This function will return MDB_NOTFOUND if the specified key/data pair is not in the database. - panic("TODO"); + remove(lmdb, .delete_and_close); } -//TODO:when MDB_DUPSORT is supported then support MDB_NODUPDATA flag -fn insert(lmdb: Lmdb, serialized_data: []const u8, key_val: []const u8) !void { - const insert_flags: c_uint = mdb.MDB_NOOVERWRITE; // don't overwrite data if key already exist +const DbFlags = struct { + flags: c_uint, + const Self = @This(); + + pub fn flags(lmdb: Lmdb) !DbFlags { + ensureValidState(lmdb); + + var set_flags: c_uint = undefined; + const get_flags_state = mdb.mdb_dbi_flags(lmdb.txn, lmdb.db_handle, &set_flags); + try checkState(get_flags_state); + + return .{ .flags = set_flags }; + } + + fn isDupSorted(self: Self) bool { + return if ((self.flags & mdb.MDB_DUPSORT) == mdb.MDB_DUPSORT) true else false; + } +}; + +const InsertFlags = enum { + //allow duplicate key/data pairs + dup_data, + //allow duplicate keys but not duplicate key/data pairs + no_dup_data, + //disallow duplicate keys even if duplicates are allowed + no_overwrite, + //replace previously existing data, use this with case else you might loss overwriten data + overwrite, +}; + +fn insert(lmdb: Lmdb, key: []const u8, serialized_data: []const u8, flags: InsertFlags) !void { + const set_flags = try DbFlags.flags(lmdb); + + const DEFAULT_BEHAVIOUR = 0; + const ALLOW_DUP_DATA = 0; + // zig fmt: off + const insert_flags: c_uint = + //enter the new key/data pair only if both key and value does not already appear in the database. + //that is allow duplicate keys but not both duplicate keys and values + if (set_flags.isDupSorted() and flags == .no_dup_data ) + // Only for MDB_DUPSORT + // For put: don't write if the key and data pair already exist. + // For mdb_cursor_del: remove all duplicate data items. + mdb.MDB_NODUPDATA + //default behavior: allow adding a duplicate key/data item if duplicates are allowed (MDB_DUPSORT) + else if (set_flags.isDupSorted() and flags == .dup_data ) + ALLOW_DUP_DATA + // if the database supports duplicates (MDB_DUPSORT). The data parameter will be set to point to the existing item. + else if (set_flags.isDupSorted() and flags == .no_overwrite ) mdb.MDB_NOOVERWRITE + //enter the new key/data pair only if the key does not already appear in the database + //that is: don't allow overwriting keys + else if (flags == .no_overwrite ) mdb.MDB_NOOVERWRITE + //The default behavior is to enter the new key/data pair, + //replacing any previously existing key if duplicates are disallowed + //allow overwriting data + else DEFAULT_BEHAVIOUR; + // zig fmt: on + if (flags == .overwrite and set_flags.isDupSorted()) { + del(lmdb, key, .all, {}) catch unreachable; + + return lmdb.insert(key, serialized_data, .no_overwrite) catch unreachable; + } else if (flags == .overwrite) { + //use default behavior + return try mdbput(lmdb, insert_flags, key, serialized_data); + } + + try mdbput(lmdb, insert_flags, key, serialized_data); +} +inline fn mdbput(lmdb: Lmdb, insert_flags: c_uint, key: []const u8, serialized_data: []const u8) !void { + var insert_key = dbKey(key); + var value_data = dbValue(serialized_data[0..]); const put_state = mdb.mdb_put( lmdb.txn.?, lmdb.db_handle, - &key(key_val), - &value(serialized_data[0..]), + &insert_key, + &value_data, insert_flags, ); try checkState(put_state); @@ -153,40 +309,59 @@ fn insert(lmdb: Lmdb, serialized_data: []const u8, key_val: []const u8) !void { ///insert new key/data pair without overwriting already inserted pair ///if `data` contains pointers or slices use `putAlloc` -pub fn put(lmdb: Lmdb, key_val: []const u8, data: anytype) !void { +pub fn put(lmdb: Lmdb, key: []const u8, data: anytype) !void { ensureValidState(lmdb); const serialized_data = serializer.serialize(data); - try insert(lmdb, serialized_data[0..], key_val); + try insert(lmdb, key, serialized_data[0..], .no_overwrite); } ///use `putAlloc` when data contains slices or pointers ///recommend you use fixedBufferAllocator or ArenaAllocator -pub fn putAlloc(lmdb: Lmdb, fba: std.mem.Allocator, key_val: []const u8, data: anytype) !void { +pub fn putAlloc(lmdb: Lmdb, fba: std.mem.Allocator, key: []const u8, data: anytype) !void { ensureValidState(lmdb); const serialized_data = serializer.serializeAlloc(fba, data); - try insert(lmdb, serialized_data[0..], key_val); + try insert(lmdb, key, serialized_data[0..], .no_overwrite); } -///insert/update already existing key/data pair -pub fn update(lmdb: Lmdb, key_val: []const u8, data: anytype) !void { +pub fn putDup(lmdb: Lmdb, key: []const u8, data: anytype, dup_data: bool) !void { ensureValidState(lmdb); - const update_flag: c_uint = 0; //allow overwriting data - const serialized_data = serializer.serialize(data); + if (dup_data) { + try insert(lmdb, key, serialized_data[0..], .dup_data); + } else { + try insert(lmdb, key, serialized_data[0..], .no_dup_data); + } +} - const update_state = mdb.mdb_put( - lmdb.txn.?, - lmdb.db_handle, - &key(key_val), - &value(serialized_data[0..]), - update_flag, - ); - try checkState(update_state); +pub fn putDupAlloc(lmdb: Lmdb, allocator: std.mem.Allocator, key: []const u8, data: anytype, dup_data: bool) !void { + ensureValidState(lmdb); + + const serialized_data = serializer.serializeAlloc(allocator, data); + if (dup_data) { + try insert(lmdb, key, serialized_data[0..], .dup_data); + } else { + try insert(lmdb, key, serialized_data[0..], .no_dup_data); + } } +///insert/update already existing key/data pair +pub fn update(lmdb: Lmdb, key: []const u8, data: anytype) !void { + ensureValidState(lmdb); + + const serialized_data = serializer.serialize(data); + try insert(lmdb, key, serialized_data[0..], .overwrite); +} + +///insert/update already existing key/data pair +pub fn updateAlloc(lmdb: Lmdb, allocator: std.mem.Allocator, key: []const u8, data: anytype) !void { + ensureValidState(lmdb); + + const serialized_data = serializer.serializeAlloc(allocator, data); + try insert(lmdb, key, serialized_data[0..], .overwrite); +} ///commit all transaction on the current db handle ///should usually be called before the end of fn's to save db changes pub fn commitTxns(lmdb: Lmdb) void { @@ -213,7 +388,7 @@ pub fn updateRead(lmdb: Lmdb) void { } ///cancel/discard all transaction on the current db handle -pub fn abortTxns(lmdb: Lmdb) void { +fn abortTxns(lmdb: Lmdb) void { mdb.mdb_txn_abort(lmdb.txn.?); } @@ -222,15 +397,17 @@ pub fn cast(comptime T: type, any_ptr: anytype) T { return @intToPtr(T, @ptrToInt(any_ptr)); } -///get `key_val` as `T` when it doesn't require allocation -pub fn get(lmdb: Lmdb, comptime T: type, key_val: []const u8) !T { +///get `key` as `T` when it doesn't require allocation +pub fn get(lmdb: Lmdb, comptime T: type, key: []const u8) !T { ensureValidState(lmdb); var data: Val = undefined; + + var get_key = dbKey(key); const get_state = mdb.mdb_get( lmdb.txn.?, lmdb.db_handle, - &key(key_val), + &get_key, &data, ); @@ -238,32 +415,33 @@ pub fn get(lmdb: Lmdb, comptime T: type, key_val: []const u8) !T { return serializer.deserialize(T, data.mv_data, data.mv_size); } -///get the `key_val` as `T` when it requires allocation .ie it contains pointers/slices -///recommend using fixedBufferAllocator or ArenaAllocator -pub fn getAlloc(lmdb: Lmdb, comptime T: type, fba: std.mem.Allocator, key_val: []const u8) !T { +///get `key` as `T` when it doesn't require allocation +pub fn getAlloc(lmdb: Lmdb, comptime T: type, fba: std.mem.Allocator, key: []const u8) !T { ensureValidState(lmdb); var data: Val = undefined; + var get_key = dbKey(key); const get_state = mdb.mdb_get( lmdb.txn.?, lmdb.db_handle, - &key(key_val), + &get_key, &data, ); + try checkState(get_state); return serializer.deserializeAlloc(T, fba, data.mv_data, data.mv_size); } -fn key(data: []const u8) Key { - return value(data); +fn dbKey(data: []const u8) Key { + return dbValue(data); } -fn value(data: []const u8) Val { +fn dbValue(data: []const u8) Val { return .{ .mv_size = data.len, .mv_data = cast(*anyopaque, data.ptr) }; } ///check state of operation to make sure there where no errors -fn checkState(state: c_int) !void { +pub fn checkState(state: c_int) !void { switch (state) { //lmdb errors Return Codes //Successful result */ @@ -346,7 +524,7 @@ fn checkState(state: c_int) !void { }, //Unsupported size of key/DB name/data, or wrong DUPFIXED size */ mdb.MDB_BAD_VALSIZE => { - return error.UnsupportedComponentSize; + return error.UnsupportedKeyOrDataSize; }, //The specified DBI was changed unexpectedly */ mdb.MDB_BAD_DBI => { @@ -408,8 +586,26 @@ test "test db key:str / value:str" { } const rtxn = dbh.startTxn(.ro, BLOCK_DB); - defer rtxn.doneReading(); - try testing.expectEqualSlices(u8, "value", (try rtxn.get([5]u8, "key"))[0..]); + { + try testing.expectEqualSlices(u8, "value", (try rtxn.get([5]u8, "key"))[0..]); + defer rtxn.doneReading(); + } + + var slicetxn = dbh.startTxn(.rw, BLOCK_DB); + const slice_data = [_][]const u8{ "hello", "serializer" }; + { + try slicetxn.putAlloc(allocator, "slice", &slice_data); + defer slicetxn.commitTxns(); + } + + slicetxn = dbh.startTxn(.ro, BLOCK_DB); + { + const deserialized_slice_data = try slicetxn.getAlloc([2][]const u8, allocator, "slice"); + for (slice_data, 0..) |str, index| { + try testing.expectEqualStrings(str[0..], deserialized_slice_data[index]); + } + defer slicetxn.doneReading(); + } } test "test db update" { @@ -449,3 +645,78 @@ test "test db update" { try testing.expectEqualSlices(u8, data.ochar[0..], gotten_data.ochar[0..]); try testing.expect(data.int == gotten_data.int); } + +//TODO: review the test below for it relevance now +test "serialization/deserialization data" { + const Data = struct { + char: [21]u8, + int: u8, + ochar: [21]u8, + }; + const data = Data{ + .char = "is my data still here".*, + .int = 254, + .ochar = "is my data still here".*, + }; + + const file = try std.fs.cwd().createFile("serialized.data", .{ .read = true }); + //defer statements are runned in the reverse order of execution + defer std.fs.cwd().deleteFile("serialized.data") catch unreachable; + defer file.close(); + + const writer = file.writer(); + try serializer.serialize(writer, Data, data); + try file.seekTo(0); + + const reader = file.reader(); + const deserialized_data = try serializer.deserialize(reader, Data); + + try testing.expectEqualSlices(u8, data.char[0..], deserialized_data.char[0..]); + try testing.expectEqualSlices(u8, data.ochar[0..], deserialized_data.ochar[0..]); + try testing.expect(data.int == deserialized_data.int); +} + +test "serialization/deserialization packed data" { + const Data = extern struct { + char: [21]u8, + int: u8, + ochar: [21]u8, + }; + + const data = Data{ + .char = "is my data still here".*, + .int = 254, + .ochar = "is my data still here".*, + }; + + const file = try std.fs.cwd().createFile("serialized.data", .{ .read = true }); + //defer statements are runned in the reverse order of execution + defer std.fs.cwd().deleteFile("serialized.data") catch unreachable; + defer file.close(); + + const writer = file.writer(); + try serializer.serialize(writer, Data, data); + try file.seekTo(0); + + const reader = file.reader(); + const deserialized_data = try serializer.deserialize(reader, Data); + + try testing.expectEqualSlices(u8, data.char[0..], deserialized_data.char[0..]); + try testing.expectEqualSlices(u8, data.ochar[0..], deserialized_data.ochar[0..]); + try testing.expect(data.int == deserialized_data.int); +} + +test "readStruct/writeStruct with array field" { + const Data = extern struct { arr: [3]u8 }; + const data = Data{ .arr = [_]u8{'0'} ** 3 }; + + var buf: [@sizeOf(Data)]u8 = undefined; + var fbs = std.io.fixedBufferStream(&buf); + + try fbs.writer().writeStruct(data); + try fbs.seekTo(0); + + const read_data = try fbs.reader().readStruct(Data); + + try testing.expectEqualSlices(u8, data.arr[0..], read_data.arr[0..]); +} diff --git a/src/UTXOcache.zig b/src/UTXOcache.zig new file mode 100644 index 0000000..bc5d6af --- /dev/null +++ b/src/UTXOcache.zig @@ -0,0 +1,213 @@ +const std = @import("std"); +const Lmdb = @import("Lmdb.zig"); +const BlockChain = @import("Blockchain.zig"); +const Wallets = @import("Wallets.zig"); +const Wallet = Wallets.Wallet; +const Transaction = @import("Transaction.zig"); +const LmdbCursor = @import("LmdbCursor.zig"); +const ExitCodes = @import("utils.zig").ExitCodes; +//since key type is already know find a way to specify LmdbCursor type without it +const Cursor = LmdbCursor.LmdbCursor(Transaction.TxID, []const Transaction.TxOutput); +const Block = @import("Block.zig"); + +const TxMap = BlockChain.TxMap; +const UTXOcache = @This(); + +pub const UTXO_DB = "chainstate"; + +db: Lmdb, +arena: std.mem.Allocator, + +/// initializes the cache and opens it as `.ro` by default +pub fn init(db: Lmdb, arena: std.mem.Allocator) UTXOcache { + return .{ .db = db, .arena = arena }; +} + +pub fn reindex(utxo_cache: UTXOcache, bc: BlockChain) void { + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&buffer); + const allocator = fba.allocator(); + + const unspent_txos = bc.findAndMapAllTxIDsToUTxOs(); + var itr = unspent_txos.iterator(); + + const txn = utxo_cache.db.startTxn(); + utxo_cache.db.setDbOpt(txn, .{ .rw = true }, UTXO_DB); + + const db = utxo_cache.db.openDb(txn, UTXO_DB); + defer db.commitTxns(); + + db.emptyDb(); + + while (itr.next()) |entry| { + const tx_id: Transaction.TxID = entry.key_ptr.*; + const utx_outs: []const Transaction.TxOutput = entry.value_ptr.*; + + db.putAlloc(allocator, tx_id[0..], utx_outs) catch unreachable; + } +} + +pub fn findSpendableOutputs( + utxo_cache: UTXOcache, + pub_key_hash: Wallets.PublicKeyHash, + amount: usize, +) struct { + accumulated_amount: usize, + unspent_output: TxMap, +} { + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&buffer); + + var unspent_output = TxMap.init(utxo_cache.arena); + + var accumulated_amount: usize = 0; + + const txn = utxo_cache.db.startTxn(); + const db = utxo_cache.db.openDb(txn, UTXO_DB); + defer db.doneReading(); + + const cursor = Cursor.init(db); + defer cursor.deinit(); + + var iterator = cursor.iterator(fba); + defer iterator.deinit(); + + // const unspentTxs = self.findUTxs(pub_key_hash); + var next = iterator.start(); + // //The method iterates over all unspent transactions and accumulates their values. + while (next) |entry| : (next = iterator.next()) { + //accumulated value and output indices grouped by transaction IDs. We don’t want to take more than we’re going to spend. + const unspent_txos = entry.value; + for (unspent_txos, 0..) |output, out_index| { + if (output.isLockedWithKey(pub_key_hash) and accumulated_amount < amount) { + const unspent_output_txid = entry.key; + accumulated_amount += output.value; + unspent_output.putNoClobber(unspent_output_txid, out_index) catch unreachable; + + // if (accumulated_amount >= amount) { + // break :spendables; + // } + } + } + } + + return .{ .accumulated_amount = accumulated_amount, .unspent_output = unspent_output }; +} + +///find unspent transaction outputs +pub fn findUnlockableOutputs(utxo_cache: UTXOcache, pub_key_hash: Wallets.PublicKeyHash) []const Transaction.TxOutput { + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(buffer[0..]); + + var utx_output_list = std.ArrayList(Transaction.TxOutput).init(utxo_cache.arena); + + const txn = utxo_cache.db.startTxn(); + const db = utxo_cache.db.openDb(txn, UTXO_DB); + defer db.doneReading(); + + const cursor = Cursor.init(db); + defer cursor.deinit(); + + var iterator = cursor.iterator(fba); + defer iterator.deinit(); + + // const unspent_txs = self.findUTxs(pub_key_hash); + var iter = iterator.start(); + while (iter) |entry| : (iter = iterator.next()) { + for (entry.value) |output| { + if (output.isLockedWithKey(pub_key_hash)) { + utx_output_list.append(output) catch unreachable; + } + } + } + return utx_output_list.toOwnedSlice() catch unreachable; +} + +pub fn update(utxo_cache: UTXOcache, block: Block) void { + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&buffer); + const allocator = fba.allocator(); + const txn = utxo_cache.db.startTxn(); + + const db = utxo_cache.db.openDb(txn, UTXO_DB); + defer db.commitTxns(); + + const cursor = Cursor.init(db); + defer cursor.deinit(); + + for (block.transactions.items) |tx| { + if (!tx.isCoinBaseTx()) { + var updated_output = std.ArrayList(Transaction.TxOutput).init(allocator); + defer updated_output.deinit(); + + for (tx.tx_in.items) |txin| { + const utxos = db.getAlloc([]const Transaction.TxOutput, allocator, txin.out_id[0..]) catch unreachable; + + //Updating means removing spent outputs and adding unspent outputs from newly mined transactions. + for (utxos, 0..) |output, out_idx| { + //if the out_idx isn't equal to the txin.out_idx it means that,that output hasn't been spent + if (out_idx != txin.out_index) { + updated_output.append(output) catch unreachable; + } + } + + //if all the outputs of the txin are spent then we need to remove the outputs of that TxID from the + //cache + if (updated_output.items.len == 0) { + //TODO: check that this doesn't affect the preoviously inserted outputs + cursor.print("before txn.delDupsAlloc"); + cursor.print("after txn.delDupsAlloc"); + } else { + //update the cache with the new output_list of the txin.out_id + db.updateAlloc(allocator, txin.out_id[0..], updated_output.items) catch unreachable; + } + } + } + + var uoutput = std.ArrayList(Transaction.TxOutput).initCapacity(allocator, tx.tx_out.items.len) catch unreachable; + defer uoutput.deinit(); + + //TODO: maybe this should be a single transaction so that an ArrayList woun't be needed + for (tx.tx_out.items) |txout| { + uoutput.append(txout) catch unreachable; + } + + //TODO: maybe we should put value rather + // txn.putAlloc(allocator, tx.id[0..], uoutput.items) catch |key_data_already_exist| switch (key_data_already_exist) { + // //when the exact same key and data pair already exist in the db + // error.KeyAlreadyExist => { + // const previous_outputs = txn.getAlloc([]const Transaction.TxOutput, allocator, tx.id[0..]) catch unreachable; + // var previous_value_sum: usize = 0; + // + // for (previous_outputs) |poutputs| { + // previous_value_sum += poutputs.value; + // } + // var current_value_sum = previous_value_sum; + // for (uoutput.items) |poutputs| { + // current_value_sum += poutputs.value; + // } + // + // const new_output_with_all_value = Transaction.TxOutput{ .value = current_value_sum, .pub_key_hash = previous_outputs[0].pub_key_hash }; + // txn.updateAlloc(allocator, tx.id[0..], &.{new_output_with_all_value}) catch unreachable; + // }, + // else => unreachable, + // }; + cursor.print("before txn.putDupAlloc"); + db.putDupAlloc(allocator, tx.id[0..], uoutput.items, true) catch unreachable; + cursor.print("after txn.putDupAlloc"); + } +} + +pub fn getBalance(cache: UTXOcache, address: Wallets.Address) usize { + if (!Wallet.validateAddress(address)) { + std.log.err("address {s} is invalid", .{address}); + std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + } + var balance: usize = 0; + const utxos = cache.findUnlockableOutputs(Wallet.getPubKeyHash(address)); + + for (utxos) |utxo| { + balance += utxo.value; + } + return balance; +} diff --git a/src/main.zig b/src/main.zig index 2da1125..ce2f9bf 100644 --- a/src/main.zig +++ b/src/main.zig @@ -9,15 +9,16 @@ const gpa = if (builtin.link_libc and builtin.mode != .Debug) else default_allocator.allocator(); -// TODO: improve memory usage and recycling at appropiate places. +//TODO: improve memory usage and recycling at appropiate places. // set buffers in local scope based on the sizeof the struct or types stored or allocated //TODO: rethink allocations and memory management pattern used,maybe pass the allocator type so you can free memory //if the data generated at the step won't be used again or isn't useful again +//TODO: update Hex formatting to use X/x pub fn main() !void { defer if (builtin.mode == .Debug) { _ = default_allocator.deinit(); }; - var buf: [1024 * 1024 * 12]u8 = undefined; + var buf: [1024 * 1024 * 7]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buf); var arena = std.heap.ArenaAllocator.init(fba.allocator()); diff --git a/src/serializer.zig b/src/serializer.zig index f7893d4..c0caaec 100644 --- a/src/serializer.zig +++ b/src/serializer.zig @@ -66,7 +66,7 @@ pub fn deserializeAlloc(comptime T: type, fballocator: std.mem.Allocator, data: /// serialized a type in memory fn inMemSerialize(type_to_serialize: anytype, serialized_buf: *[@sizeOf(@TypeOf(type_to_serialize))]u8) void { - @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize), @sizeOf(@TypeOf(type_to_serialize))); + @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize)); } /// deserialize data from memory @@ -124,7 +124,7 @@ fn inMemDeserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { // debug("{s} has {s}", .{ field.name, field }); // const slice = @bitCast([]const u8, manyptr_to_serialize[size .. size + size_of_slice]); // debug("slice ptr contains {s}", .{slice}); -// @memcpy(serialized_buf[size..].ptr, slice.ptr, slice.len); +// @memcpy(serialized_buf[size..].ptr, slice[0..]); // size += size_of_slice; // } else { // const type_size = comptime blk: { @@ -146,7 +146,7 @@ fn inMemDeserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { // } // debug("{} bytes copied", .{size}); // } -// // // @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize), @sizeOf(@TypeOf(type_to_serialize))); +// // // @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize)); // } // // pub fn deserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { @@ -157,7 +157,7 @@ fn inMemDeserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { // inline for (fields) |field| { // if (std.meta.trait.isSlice(field.field_type)) { // const size_of_slice = @sizeOf(field.field_type); -// @memcpy(cast([*]u8, des_type.character.ptr), serialized_t[size .. size + size_of_slice].ptr, size_of_slice); +// @memcpy(cast([*]u8, des_type.character.ptr), serialized_t[size .. size + size_of_slice]); // size += size_of_slice; // } else { // const type_size = comptime blk: { @@ -169,7 +169,7 @@ fn inMemDeserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { // }; // //since size might have been modified // const actual_size = @sizeOf(field.field_type); -// @memcpy(des_type.integer, serialized_t[size .. size + actual_size].ptr, actual_size); +// @memcpy(des_type.integer, serialized_t[size .. size + actual_size]); // size += type_size; // } // } From dd776d9df494f6e702dae12d8b16fc6a545c50c6 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 7 May 2023 21:59:19 +0000 Subject: [PATCH 09/24] Use default upstream lmdb Since @hyc guided me on how to use lmdb dbflags --- .gitmodules | 2 +- deps/lmdb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 4283423..2109cc6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "deps/lmdb"] path = deps/lmdb - url = git@github.com:Ultra-Code/lmdb.git + url = git@github.com:LMDB/lmdb.git diff --git a/deps/lmdb b/deps/lmdb index ce001a3..3947014 160000 --- a/deps/lmdb +++ b/deps/lmdb @@ -1 +1 @@ -Subproject commit ce001a311d8fb16afbf13df2a1e21d505cb477cb +Subproject commit 3947014aed7ffe39a79991fa7fb5b234da47ad1a From 4a61250e5bf742e0554c05e4b13fd641e6c7fe4b Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 7 May 2023 22:01:23 +0000 Subject: [PATCH 10/24] Use UTXOcache in Cli and update Iterator to improved Lmdb API --- src/Cli.zig | 30 ++++++++++++++++++++---------- src/Iterator.zig | 7 ++++--- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/Cli.zig b/src/Cli.zig index d69944e..3e763d7 100644 --- a/src/Cli.zig +++ b/src/Cli.zig @@ -46,7 +46,10 @@ pub fn run(self: Cli) void { if (chain_name) |name| { const bc_address = std.mem.bytesAsSlice(Wallets.Address, name)[0]; - _ = BlockChain.newChain(db_env, self.arena, bc_address, WALLET_STORAGE); + const bc = BlockChain.newChain(db_env, self.arena, bc_address); + + const utxo_cache = UTXOcache.init(bc.db, self.arena); + utxo_cache.reindex(bc); } else { printUsage(.createchain); } @@ -68,12 +71,14 @@ pub fn run(self: Cli) void { bc.sendValue(amount, from_address, to_address); + const cache = UTXOcache.init(bc.db, bc.arena); + std.debug.print("done sending RBC {d} from '{s}' to '{s}'\n", .{ amount, from_address, to_address }); std.debug.print("'{[from_address]s}' now has a balance of RBC {[from_balance]d} and '{[to_address]s}' a balance of RBC {[to_balance]d}\n", .{ .from_address = from_address, - .from_balance = bc.getBalance(from_address), + .from_balance = cache.getBalance(from_address), .to_address = to_address, - .to_balance = bc.getBalance(to_address), + .to_balance = cache.getBalance(to_address), }); } } @@ -89,9 +94,10 @@ pub fn run(self: Cli) void { } } else if (std.mem.eql(u8, argv, "getbalance")) { if (itr.next()) |address| { - const bc = BlockChain.getChain(db_env, self.arena); const users_address = std.mem.bytesAsSlice(Wallets.Address, address)[0]; - const balance = bc.getBalance(users_address); + + const cache = UTXOcache.init(db_env, self.arena); + const balance = cache.getBalance(users_address); std.debug.print("'{[address]s}' has a balance of RBC {[balance]d}\n", .{ .address = users_address, .balance = balance }); } else { printUsage(.getbalance); @@ -99,7 +105,7 @@ pub fn run(self: Cli) void { } else if (std.mem.eql(u8, argv, "printchain")) { const bc = BlockChain.getChain(db_env, self.arena); - var chain_iter = Iterator.iterator(bc.arena, bc.db, bc.last_hash); + var chain_iter = BlockIterator.iterator(bc.arena, bc.db, bc.last_hash); chain_iter.print(); } else if (std.mem.eql(u8, argv, "createwallet")) { const wallets = Wallets.initWallets(self.arena, WALLET_STORAGE); @@ -109,9 +115,11 @@ pub fn run(self: Cli) void { const wallets = Wallets.getWallets(self.arena, WALLET_STORAGE); const address_list = wallets.getAddresses(); - for (address_list) |address, index| { + for (address_list, 0..) |address, index| { std.log.info("address {}\n{s}\n", .{ index, address }); } + } else { + printUsage(.help); } } } @@ -121,20 +129,22 @@ fn printUsage(cmd: Cmd) void { .createchain => { std.debug.print( \\Usage: - \\eg.zig build run -- createchain "blockchain name" + \\eg.zig build run -- createchain "wallet address" \\ , .{}); }, .help => { std.debug.print( \\Usage: - \\eg.zig build run -- createchain "blockchain name" + \\eg.zig build run -- createchain "wallet address" \\OR \\zig build run -- printchain \\OR \\zig build run -- createwallet \\OR - \\zig build run -- getbalance "address" + \\zig build run -- listaddress + \\OR + \\zig build run -- getbalance "wallet address" \\OR \\zig build run -- send --amount value --from address --to address \\ diff --git a/src/Iterator.zig b/src/Iterator.zig index b31a44d..c529d9b 100644 --- a/src/Iterator.zig +++ b/src/Iterator.zig @@ -23,10 +23,11 @@ pub const BlockIterator = struct { ///the returned usize is the address of the Block in memory ///the ptr can be obtained with @intToPtr pub fn next(self: *BlockIterator) ?Block { - const txn = self.db.startTxn(.{ .rw = false }, BLOCK_DB); - defer txn.doneReading(); + const txn = self.db.startTxn(); + const db = self.db.openDb(txn, BLOCK_DB); + defer db.doneReading(); - if (txn.getAlloc(Block, self.arena, self.current_hash[0..])) |current_block| { + if (db.getAlloc(Block, self.arena, self.current_hash[0..])) |current_block| { self.current_hash = current_block.previous_hash; return current_block; From f3c874e4f27d98c4d9c6774039dd0c3521381776 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 7 May 2023 22:03:12 +0000 Subject: [PATCH 11/24] Use zig's updated crypto and memcpy api Add some crypto types make them easy to use --- src/Transaction.zig | 18 ++++++++++-------- src/Wallets.zig | 8 ++++---- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/Transaction.zig b/src/Transaction.zig index bcfe8e8..be39e10 100644 --- a/src/Transaction.zig +++ b/src/Transaction.zig @@ -105,7 +105,7 @@ pub fn sign(self: *Transaction, wallet_keys: Wallet.KeyPair, prev_txs: PrevTxMap //The copy will include all the inputs and outputs, but TxInput.sig and TxInput.pub_key are empty var trimmed_tx_copy = self.trimmedCopy(fba); - for (trimmed_tx_copy.tx_in.items) |value_in, in_index| { + for (trimmed_tx_copy.tx_in.items, 0..) |value_in, in_index| { //we use prev_txs because that has signed and verified to help in signing and verifying new transactions if (prev_txs.get(value_in.out_id)) |prev_tx| { //since the public_key of trimmedCopy is empty we store a copy of the pub_key_hash from the transaction output @@ -117,29 +117,31 @@ pub fn sign(self: *Transaction, wallet_keys: Wallet.KeyPair, prev_txs: PrevTxMap var noise: [Wallets.Ed25519.noise_length]u8 = undefined; std.crypto.random.bytes(&noise); - const signature = Wallets.Ed25519.sign(trimmed_tx_copy.id[0..], wallet_keys, noise) catch unreachable; - + const signature = wallet_keys.sign(trimmed_tx_copy.id[0..], noise) catch unreachable; self.tx_in.items[in_index].sig = signature; } } fn copyHashIntoPubKey(pub_key: *Wallets.PublicKey, pub_key_hash: Wallets.PublicKeyHash) void { //copy 0..20 of pub_key_hash into the beginning of pub_key - @memcpy(pub_key[0..], pub_key_hash[0..], @sizeOf(Wallets.PublicKeyHash)); + @memcpy(pub_key.bytes[0..@sizeOf(Wallets.PublicKeyHash)], pub_key_hash[0..]); //recopy 12 bytes from pub_key_hash into 21..end of pub_key - @memcpy(pub_key[@sizeOf(Wallets.PublicKeyHash)..], pub_key_hash[0..], @sizeOf(Wallets.PublicKey) - @sizeOf(Wallets.PublicKeyHash)); + @memcpy( + pub_key.bytes[@sizeOf(Wallets.PublicKeyHash)..], + pub_key_hash[0..(@sizeOf(Wallets.PublicKey) - @sizeOf(Wallets.PublicKeyHash))], + ); } pub fn verify(self: Transaction, prev_txs: PrevTxMap, fba: Allocator) bool { var trimmed_tx_copy = self.trimmedCopy(fba); - for (self.tx_in.items) |value_in, in_index| { + for (self.tx_in.items, 0..) |value_in, in_index| { if (prev_txs.get(value_in.out_id)) |prev_tx| { copyHashIntoPubKey(&trimmed_tx_copy.tx_in.items[in_index].pub_key, prev_tx.tx_out.items[value_in.out_index].pub_key_hash); } trimmed_tx_copy.setId(); - - if (Wallets.Ed25519.verify(value_in.sig, trimmed_tx_copy.id[0..], value_in.pub_key)) |_| {} else |err| { + const sig: Wallets.Signature = value_in.sig; + if (sig.verify(trimmed_tx_copy.id[0..], value_in.pub_key)) |_| {} else |err| { std.log.info("public key has a value of {}", .{value_in}); std.log.err("{s} occurred while verifying the transaction", .{@errorName(err)}); return false; diff --git a/src/Wallets.zig b/src/Wallets.zig index 8216e20..19c89d8 100644 --- a/src/Wallets.zig +++ b/src/Wallets.zig @@ -16,9 +16,9 @@ const VERSION = '\x01'; pub const PUB_KEY_LEN = Ed25519.public_length; pub const ADDRESS_SIZE = encodedAddressLenght(); -pub const PrivateKey = [Ed25519.secret_length]u8; -pub const PublicKey = [Ed25519.public_length]u8; -pub const Signature = [Ed25519.signature_length]u8; +pub const PrivateKey = Ed25519.SecretKey; +pub const PublicKey = Ed25519.PublicKey; +pub const Signature = Ed25519.Signature; pub const Address = [ADDRESS_SIZE]u8; pub const PublicKeyHash = [PUB_KEY_HASH_LEN]u8; pub const Checksum = [ADDR_CKSUM_LEN]u8; @@ -190,7 +190,7 @@ pub const Wallet = struct { //https://linuxadictos.com/en/blake3-a-fast-and-parallelizable-secure-cryptographic-hash-function.html //replaces sha256 with Blake3 which is also 256 and faster in software var pk_hash: [Blake3.digest_length]u8 = undefined; - Blake3.hash(pub_key[0..], &pk_hash, .{}); + Blake3.hash(pub_key.bytes[0..], &pk_hash, .{}); //use Blake2b160 as a replacement for bitcoins ripemd-160 https://en.bitcoin.it/wiki/RIPEMD-160 //smaller bit lenght for easy readability for user From 0a573b6fadacb77270017a269dd4130adf1fa935 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 7 May 2023 22:05:14 +0000 Subject: [PATCH 12/24] Initial LmdbCursor implementation for cursor operations --- src/LmdbCursor.zig | 189 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 src/LmdbCursor.zig diff --git a/src/LmdbCursor.zig b/src/LmdbCursor.zig new file mode 100644 index 0000000..d6e09ff --- /dev/null +++ b/src/LmdbCursor.zig @@ -0,0 +1,189 @@ +const std = @import("std"); +const Lmdb = @import("Lmdb.zig"); +const serializer = @import("serializer.zig"); +const Transaction = @import("Transaction.zig"); +const mdb = Lmdb.mdb; +const Cursor = mdb.MDB_cursor; +const ensureValidState = Lmdb.ensureValidState; +const checkState = Lmdb.checkState; +const Key = Lmdb.Key; +const Val = Lmdb.Key; + +///This is the set of all operations for retrieving data using a cursor. +const CursorGetOperations = enum(mdb.MDB_cursor_op) { + MDB_FIRST = mdb.MDB_FIRST, //Position at first key/data item + MDB_FIRST_DUP, //Position at first data item of current key. Only for MDB_DUPSORT + MDB_GET_BOTH, //Position at key/data pair. Only for MDB_DUPSORT + MDB_GET_BOTH_RANGE, //position at key, nearest data. Only for MDB_DUPSORT + MDB_GET_CURRENT, //Return key/data at current cursor position + //Return key and up to a page of duplicate data items from current cursor position. + //Move cursor to prepare for MDB_NEXT_MULTIPLE. Only for MDB_DUPFIXED + MDB_GET_MULTIPLE, + MDB_LAST, //Position at last key/data item + MDB_LAST_DUP, //Position at last data item of current key. Only for MDB_DUPSORT + MDB_NEXT, //Position at next data item + MDB_NEXT_DUP, //Position at next data item of current key. Only for MDB_DUPSORT + //Return key and up to a page of duplicate data items from next cursor position. + //Move cursor to prepare for MDB_NEXT_MULTIPLE. Only for MDB_DUPFIXED + MDB_NEXT_MULTIPLE, + MDB_NEXT_NODUP, //Position at first data item of next key + MDB_PREV, //Position at previous data item + MDB_PREV_DUP, //Position at previous data item of current key. Only for MDB_DUPSORT + MDB_PREV_NODUP, //Position at last data item of previous key + MDB_SET, //Position at specified key + MDB_SET_KEY, //Position at specified key, return key + data + MDB_SET_RANGE, //Position at first key greater than or equal to specified key. +}; + +//TODO: switch on Alloc or non Alloc fn based on if type requires allocation +pub fn LmdbCursor(comptime ckey: type, comptime cvalue: type) type { + return struct { + const Self = @This(); + const KeyValue = struct { key: ckey, value: cvalue }; + lmdb: Lmdb, + cursor_handle: *Cursor, + + pub fn init(lmdb_txn: Lmdb) Self { + ensureValidState(lmdb_txn); + var cursor_handle: ?*Cursor = undefined; + const cursor_open_state = mdb.mdb_cursor_open(lmdb_txn.txn.?, lmdb_txn.db_handle, &cursor_handle); + checkState(cursor_open_state) catch unreachable; + return .{ .lmdb = lmdb_txn, .cursor_handle = cursor_handle.? }; + } + + pub fn doneCursoring(cursor: Self) void { + ensureValidState(cursor.lmdb); + + mdb.mdb_cursor_close(cursor.cursor_handle); + } + + pub fn updateCursor(cursor: Self) void { + ensureValidState(cursor.lmdb); + const cursor_renew_state = mdb.mdb_cursor_renew(cursor.lmdb.txn.?, cursor.cursor_handle); + checkState(cursor_renew_state) catch unreachable; + } + + ///get mutiple values with key `key_val` as `T` when it doesn't require allocation + //TODO: implement cursor Iterator for easy iteration + pub fn cursorGet(cursor: Self, cursor_get_op: CursorGetOperations) ?KeyValue { + ensureValidState(cursor.lmdb); + + var key_value: Key = undefined; + var data_value: Val = undefined; + const get_state = mdb.mdb_cursor_get( + cursor.cursor_handle, + &key_value, + &data_value, + @enumToInt(cursor_get_op), + ); + + checkState(get_state) catch return null; + return .{ + .key = serializer.deserialize( + ckey, + key_value.mv_data, + key_value.mv_size, + ), + .value = serializer.deserialize( + cvalue, + data_value.mv_data, + data_value.mv_size, + ), + }; + } + pub fn cursorGetFirst(cursor: Self) ?KeyValue { + return cursorGet(cursor, .MDB_FIRST) orelse null; + } + + pub fn cursorGetFirstAlloc(cursor: Self, fba: std.mem.Allocator) ?KeyValue { + return cursorGetAlloc(cursor, fba, .MDB_FIRST) orelse null; + } + + pub fn cursorGetNext(cursor: Self) ?KeyValue { + return cursorGet(cursor, .MDB_NEXT) orelse null; + } + + pub fn cursorGetNextAlloc(cursor: Self, fba: std.mem.Allocator) ?KeyValue { + return cursorGetAlloc(cursor, fba, .MDB_NEXT) orelse null; + } + + ///get mutiple values with key `key_val` as `T` when it require allocation + pub fn cursorGetAlloc(cursor: Self, fba: std.mem.Allocator, cursor_get_op: CursorGetOperations) ?KeyValue { + ensureValidState(cursor.lmdb); + + var key_value: Key = undefined; + var data_value: Val = undefined; + const get_state = mdb.mdb_cursor_get( + cursor.cursor_handle, + &key_value, + &data_value, + @enumToInt(cursor_get_op), + ); + + checkState(get_state) catch return null; + return .{ + .key = std.mem.bytesAsSlice( + ckey, + serializer.getRawBytes(key_value.mv_data, key_value.mv_size), + )[0], + .value = serializer.deserializeAlloc( + cvalue, + fba, + data_value.mv_data, + data_value.mv_size, + ), + }; + } + + pub fn deinit(cursor: Self) void { + if (cursor.lmdb.txn_type == .ro) { + cursor.doneCursoring(); + } + } + + pub fn iterator(cursor: Self, fba: std.heap.FixedBufferAllocator) Iterator { + return .{ .cursor = cursor, .fba = fba }; + } + + pub const Iterator = struct { + cursor: Self, + fba: std.heap.FixedBufferAllocator, + //start transaction in interator + pub fn init(cursor: Self, fba: std.heap.FixedBufferAllocator) Iterator { + return .{ .cursor = cursor, .fba = fba }; + } + + pub fn deinit(self: *Iterator) void { + self.fba.reset(); + } + + //TODO: Find out if there is the need to store the cursor + pub fn start(itr: Iterator) ?KeyValue { + var fba = itr.fba; + return itr.cursor.cursorGetFirstAlloc(fba.allocator()); + } + pub fn next(itr: Iterator) ?KeyValue { + var fba = itr.fba; + return itr.cursor.cursorGetNextAlloc(fba.allocator()); + } + }; + + pub fn print(cursor: Self, comptime scope_info: []const u8) void { + var buf: [1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&buf); + + var itr = cursor.iterator(fba); + defer itr.deinit(); + + var next = itr.start(); + std.log.info("start printing {s}", .{scope_info}); + while (next) |key_value| : (next = itr.next()) { + std.log.debug("key is {s}", .{key_value.key}); + for (key_value.value, 0..) |val, idx| { + std.log.debug("val {} has amount {} and pub_key_hash {s}", .{ idx, val.value, val.pub_key_hash }); + } + } + std.log.info("done printing\n", .{}); + } + }; +} From e8937fc4e120a13d8e9c04db036402812d862b35 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Wed, 5 Jul 2023 23:10:00 +0000 Subject: [PATCH 13/24] fix: zig breaking change update --- build.zig.zon | 4 ++-- src/Block.zig | 6 +++--- src/Blockchain.zig | 12 ++++++------ src/Cli.zig | 2 +- src/LmdbCursor.zig | 4 ++-- src/UTXOcache.zig | 2 +- src/Wallets.zig | 2 +- src/serializer.zig | 6 +++--- src/utils.zig | 4 ++-- 9 files changed, 21 insertions(+), 21 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index dfe20bc..8b416a9 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -3,8 +3,8 @@ .version = "0.1.0", .dependencies = .{ .s2s = .{ - .url = "https://github.com/ziglibs/s2s/archive/6484f786b1830115601cd9ffb51bccec551ac6be.tar.gz", - .hash = "1220f6c17bde3f75ac79f29b5572f75fc35275b552b5b398d42df133fa038a29b396", + .url = "https://github.com/ziglibs/s2s/archive/f8dc146601fd91d372c1510e74adc2c839eaf41e.tar.gz", + .hash = "1220789c7c650cc65b820a4457b02f0e057ace507ae72220a860c36ef430481e2912", }, }, } diff --git a/src/Block.zig b/src/Block.zig index 4d38555..d209df7 100644 --- a/src/Block.zig +++ b/src/Block.zig @@ -90,8 +90,8 @@ fn getTargetHash(target_dificulty: u7) u256 { //hast to be compaired with for valid hashes to prove work done const @"256bit": u9 = 256; //256 bit is 32 byte which is the size of a Blake3 hash const @"1": u256 = 1; //a 32 byte integer with the value of 1 - const difficult = @intCast(u8, @"256bit" - target_dificulty); - const target_hash_difficult = @shlExact(@"1", difficult); + const difficult: u8 = @intCast(@"256bit" - target_dificulty); + const target_hash_difficult: u256 = @shlExact(@"1", difficult); return target_hash_difficult; } @@ -106,7 +106,7 @@ pub fn POW(block: Block) struct { hash: Hash, nonce: usize } { const hash_int = block.hashBlock(nonce); if (hash_int < target_hash) { - return .{ .hash = @bitCast(Hash, hash_int), .nonce = nonce }; + return .{ .hash = @bitCast(hash_int), .nonce = nonce }; } else { nonce += 1; } diff --git a/src/Blockchain.zig b/src/Blockchain.zig index e5e273c..4ad56d5 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -49,7 +49,7 @@ pub fn getChain(lmdb: Lmdb, arena: std.mem.Allocator) BlockChain { return .{ .last_hash = last_block_hash, .db = db, .arena = arena }; } else |_| { std.log.err("create a blockchain with creatchain command before using any other command", .{}); - std.process.exit(@enumToInt(ExitCodes.blockchain_not_found)); + std.process.exit(@intFromEnum(ExitCodes.blockchain_not_found)); } } @@ -57,7 +57,7 @@ pub fn getChain(lmdb: Lmdb, arena: std.mem.Allocator) BlockChain { pub fn newChain(lmdb: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) BlockChain { if (!Wallet.validateAddress(address)) { std.log.err("blockchain address {s} is invalid", .{address}); - std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + std.process.exit(@intFromEnum(ExitCodes.invalid_wallet_address)); } var buf: [1024 * 6]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buf); @@ -77,7 +77,7 @@ pub fn newChain(lmdb: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) std.log.err("Attempting to create a new blockchain at address '{s}' while a blockchain already exist", .{ address, }); - std.process.exit(@enumToInt(ExitCodes.blockchain_already_exist)); + std.process.exit(@intFromEnum(ExitCodes.blockchain_already_exist)); }, else => unreachable, }; @@ -204,7 +204,7 @@ fn newUTx(self: BlockChain, utxo_cache: UTXOcache, amount: usize, from: Wallets. if (accumulated_amount < amount) { std.log.err("not enough funds to transfer RBC {d} from '{s}' to '{s}'", .{ amount, from, to }); - std.process.exit(@enumToInt(ExitCodes.insufficient_wallet_balance)); + std.process.exit(@intFromEnum(ExitCodes.insufficient_wallet_balance)); } //Build a list of inputs @@ -284,11 +284,11 @@ pub fn sendValue(self: *BlockChain, amount: usize, from: Wallets.Address, to: Wa if (!Wallet.validateAddress(from)) { std.log.err("sender address {s} is invalid", .{from}); - std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + std.process.exit(@intFromEnum(ExitCodes.invalid_wallet_address)); } if (!Wallet.validateAddress(to)) { std.log.err("recipient address {s} is invalid", .{to}); - std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + std.process.exit(@intFromEnum(ExitCodes.invalid_wallet_address)); } const cache = UTXOcache.init(self.db, self.arena); var new_transaction = self.newUTx(cache, amount, from, to); diff --git a/src/Cli.zig b/src/Cli.zig index 3e763d7..88b2bc0 100644 --- a/src/Cli.zig +++ b/src/Cli.zig @@ -165,5 +165,5 @@ fn printUsage(cmd: Cmd) void { , .{}); }, } - std.process.exit(@enumToInt(ExitCodes.invalid_cli_argument)); + std.process.exit(@intFromEnum(ExitCodes.invalid_cli_argument)); } diff --git a/src/LmdbCursor.zig b/src/LmdbCursor.zig index d6e09ff..b20f329 100644 --- a/src/LmdbCursor.zig +++ b/src/LmdbCursor.zig @@ -74,7 +74,7 @@ pub fn LmdbCursor(comptime ckey: type, comptime cvalue: type) type { cursor.cursor_handle, &key_value, &data_value, - @enumToInt(cursor_get_op), + @intFromEnum(cursor_get_op), ); checkState(get_state) catch return null; @@ -117,7 +117,7 @@ pub fn LmdbCursor(comptime ckey: type, comptime cvalue: type) type { cursor.cursor_handle, &key_value, &data_value, - @enumToInt(cursor_get_op), + @intFromEnum(cursor_get_op), ); checkState(get_state) catch return null; diff --git a/src/UTXOcache.zig b/src/UTXOcache.zig index bc5d6af..945b160 100644 --- a/src/UTXOcache.zig +++ b/src/UTXOcache.zig @@ -201,7 +201,7 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { pub fn getBalance(cache: UTXOcache, address: Wallets.Address) usize { if (!Wallet.validateAddress(address)) { std.log.err("address {s} is invalid", .{address}); - std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + std.process.exit(@intFromEnum(ExitCodes.invalid_wallet_address)); } var balance: usize = 0; const utxos = cache.findUnlockableOutputs(Wallet.getPubKeyHash(address)); diff --git a/src/Wallets.zig b/src/Wallets.zig index 19c89d8..dbd410f 100644 --- a/src/Wallets.zig +++ b/src/Wallets.zig @@ -69,7 +69,7 @@ pub fn getWallet(self: Wallets, address: Address) Wallet { return self.wallets.get(address) orelse { std.log.err("The wallet address specified '{s}' does not exit", .{address}); std.log.err("Create a wallet with the 'createwallet' command", .{}); - std.process.exit(@enumToInt(ExitCodes.invalid_wallet_address)); + std.process.exit(@intFromEnum(ExitCodes.invalid_wallet_address)); }; } diff --git a/src/serializer.zig b/src/serializer.zig index c0caaec..1c7628c 100644 --- a/src/serializer.zig +++ b/src/serializer.zig @@ -22,7 +22,7 @@ pub fn serialize(data: anytype) [HASH_SIZE + @sizeOf(@TypeOf(data))]u8 { /// get bytes starting from `0` to `len` pub fn getRawBytes(data: ?*anyopaque, len: usize) []const u8 { - return @ptrCast([*]const u8, data.?)[0..len]; + return @as([*]const u8, @ptrCast(data.?))[0..len]; } ///deserialize bytes representing data as `T` @@ -66,12 +66,12 @@ pub fn deserializeAlloc(comptime T: type, fballocator: std.mem.Allocator, data: /// serialized a type in memory fn inMemSerialize(type_to_serialize: anytype, serialized_buf: *[@sizeOf(@TypeOf(type_to_serialize))]u8) void { - @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize)); + @memcpy(serialized_buf, @as([*]const u8, @ptrCast(&type_to_serialize))); } /// deserialize data from memory fn inMemDeserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { - return @bitCast(T, serialized_t); + return @bitCast(serialized_t); } // test "simple serialization/deserialization with other data interleved " { diff --git a/src/utils.zig b/src/utils.zig index 6c25e64..e77f354 100644 --- a/src/utils.zig +++ b/src/utils.zig @@ -1,7 +1,7 @@ pub fn fmtHash(hash: [32]u8) [32]u8 { - const hash_int = @bitCast(u256, hash); + const hash_int: u256 = @bitCast(hash); const big_end_hash_int = @byteSwap(hash_int); - return @bitCast([32]u8, big_end_hash_int); + return @bitCast(big_end_hash_int); } pub const ExitCodes = enum { From f220b9b3d96851f20a5df1c2eebf373376de92c9 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Wed, 5 Jul 2023 23:29:29 +0000 Subject: [PATCH 14/24] follow up to breaking change fixes --- src/Lmdb.zig | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/Lmdb.zig b/src/Lmdb.zig index 7991274..4c1c74e 100644 --- a/src/Lmdb.zig +++ b/src/Lmdb.zig @@ -52,7 +52,7 @@ pub fn initdb(db_path: []const u8, txn_type: TxnType) Lmdb { const db_limit_state = mdb.mdb_env_set_maxdbs(db_env, max_num_of_dbs); checkState(db_limit_state) catch unreachable; - const db_flags = @enumToInt(txn_type); + const db_flags = @intFromEnum(txn_type); const permissions: c_uint = 0o0600; //octal permissions for created files in db_dir const open_state = mdb.mdb_env_open(db_env, db_path.ptr, db_flags, permissions); checkState(open_state) catch |open_err| switch (open_err) { @@ -207,7 +207,7 @@ const RemoveAction = enum(u1) { inline fn remove(lmdb: Lmdb, action: RemoveAction) void { //0 to empty the DB, 1 to delete it from the environment and close the DB handle. - const empty_db_state = mdb.mdb_drop(lmdb.txn.?, lmdb.db_handle, @enumToInt(action)); + const empty_db_state = mdb.mdb_drop(lmdb.txn.?, lmdb.db_handle, @intFromEnum(action)); checkState(empty_db_state) catch unreachable; } @@ -394,7 +394,7 @@ fn abortTxns(lmdb: Lmdb) void { ///This is any unsafe cast which discards const pub fn cast(comptime T: type, any_ptr: anytype) T { - return @intToPtr(T, @ptrToInt(any_ptr)); + return @constCast(any_ptr); } ///get `key` as `T` when it doesn't require allocation @@ -531,30 +531,30 @@ pub fn checkState(state: c_int) !void { return error.InvalidDbHandle; }, //out of memory. - @enumToInt(err.NOENT) => { + @intFromEnum(err.NOENT) => { return error.NoSuchFileOrDirectory; }, //don't have adecuate permissions to perform operation - @enumToInt(err.ACCES) => { + @intFromEnum(err.ACCES) => { return error.PermissionDenied; }, //the environment was locked by another process. - @enumToInt(err.AGAIN) => { + @intFromEnum(err.AGAIN) => { return error.EnvLockedTryAgain; }, - @enumToInt(err.NOMEM) => { + @intFromEnum(err.NOMEM) => { return error.OutOfMemory; }, //an invalid parameter was specified. - @enumToInt(err.INVAL) => { + @intFromEnum(err.INVAL) => { return error.InvalidArgument; }, //a low-level I/O error occurred - @enumToInt(err.IO) => { + @intFromEnum(err.IO) => { return error.IOFailed; }, //no more disk space on device. - @enumToInt(err.NOSPC) => { + @intFromEnum(err.NOSPC) => { return error.DiskSpaceFull; }, else => panic("'{}' -> {s}", .{ state, mdb.mdb_strerror(state) }), From e9709741a54a511a9fdda34827f560c83cf1cb6e Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sat, 8 Jul 2023 16:40:00 +0000 Subject: [PATCH 15/24] fix(dupsort): improve dupsort error detection Add panic for dupsort when it hits Lmdb keys/dup data limit --- src/Lmdb.zig | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/src/Lmdb.zig b/src/Lmdb.zig index 4c1c74e..19f9ba1 100644 --- a/src/Lmdb.zig +++ b/src/Lmdb.zig @@ -257,23 +257,23 @@ const InsertFlags = enum { fn insert(lmdb: Lmdb, key: []const u8, serialized_data: []const u8, flags: InsertFlags) !void { const set_flags = try DbFlags.flags(lmdb); - + const is_dup_sorted = set_flags.isDupSorted(); const DEFAULT_BEHAVIOUR = 0; const ALLOW_DUP_DATA = 0; // zig fmt: off const insert_flags: c_uint = //enter the new key/data pair only if both key and value does not already appear in the database. //that is allow duplicate keys but not both duplicate keys and values - if (set_flags.isDupSorted() and flags == .no_dup_data ) + if (is_dup_sorted and flags == .no_dup_data ) // Only for MDB_DUPSORT // For put: don't write if the key and data pair already exist. // For mdb_cursor_del: remove all duplicate data items. mdb.MDB_NODUPDATA //default behavior: allow adding a duplicate key/data item if duplicates are allowed (MDB_DUPSORT) - else if (set_flags.isDupSorted() and flags == .dup_data ) + else if (is_dup_sorted and flags == .dup_data ) ALLOW_DUP_DATA // if the database supports duplicates (MDB_DUPSORT). The data parameter will be set to point to the existing item. - else if (set_flags.isDupSorted() and flags == .no_overwrite ) mdb.MDB_NOOVERWRITE + else if (is_dup_sorted and flags == .no_overwrite ) mdb.MDB_NOOVERWRITE //enter the new key/data pair only if the key does not already appear in the database //that is: don't allow overwriting keys else if (flags == .no_overwrite ) mdb.MDB_NOOVERWRITE @@ -282,10 +282,10 @@ fn insert(lmdb: Lmdb, key: []const u8, serialized_data: []const u8, flags: Inser //allow overwriting data else DEFAULT_BEHAVIOUR; // zig fmt: on - if (flags == .overwrite and set_flags.isDupSorted()) { - del(lmdb, key, .all, {}) catch unreachable; - return lmdb.insert(key, serialized_data, .no_overwrite) catch unreachable; + if (is_dup_sorted and flags == .overwrite) { + del(lmdb, key, .all, {}) catch unreachable; + return try mdbput(lmdb, insert_flags, key, serialized_data); } else if (flags == .overwrite) { //use default behavior return try mdbput(lmdb, insert_flags, key, serialized_data); @@ -294,7 +294,10 @@ fn insert(lmdb: Lmdb, key: []const u8, serialized_data: []const u8, flags: Inser try mdbput(lmdb, insert_flags, key, serialized_data); } -inline fn mdbput(lmdb: Lmdb, insert_flags: c_uint, key: []const u8, serialized_data: []const u8) !void { +fn mdbput(lmdb: Lmdb, insert_flags: c_uint, key: []const u8, serialized_data: []const u8) !void { + //due to limitations of lmdb,the len of data items in a #MDB_DUPSORT db are limited to a max of 512 + // NOTE: MDB_MAXKEYSIZE macro in deps/lmdb/libraries/liblmdb/mdb.c Line:665 + var insert_key = dbKey(key); var value_data = dbValue(serialized_data[0..]); const put_state = mdb.mdb_put( @@ -304,7 +307,15 @@ inline fn mdbput(lmdb: Lmdb, insert_flags: c_uint, key: []const u8, serialized_d &value_data, insert_flags, ); - try checkState(put_state); + + checkState(put_state) catch |put_errors| switch (put_errors) { + error.UnsupportedKeyOrDataSize => @panic( + \\Cannot store Keys/#MDB_DUPSORT data items greater than 512. + \\Maybe try the compress option/rethink your use of dupsort db + \\http://www.lmdb.tech/doc/group__mdb.html#gaaf0be004f33828bf2fb09d77eb3cef94 + ), + else => |remaining_put_errors| return remaining_put_errors, + }; } ///insert new key/data pair without overwriting already inserted pair @@ -313,6 +324,7 @@ pub fn put(lmdb: Lmdb, key: []const u8, data: anytype) !void { ensureValidState(lmdb); const serialized_data = serializer.serialize(data); + try insert(lmdb, key, serialized_data[0..], .no_overwrite); } From 062a0c400e3e67a7eac14de3054ab8804ae84cff Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sat, 8 Jul 2023 17:25:49 +0000 Subject: [PATCH 16/24] lmdb: add compress and decompress fns Would build on these later to add compression option to lmdb --- src/Lmdb.zig | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/Lmdb.zig b/src/Lmdb.zig index 19f9ba1..c5911ac 100644 --- a/src/Lmdb.zig +++ b/src/Lmdb.zig @@ -328,6 +328,31 @@ pub fn put(lmdb: Lmdb, key: []const u8, data: anytype) !void { try insert(lmdb, key, serialized_data[0..], .no_overwrite); } +fn compress(serialized_data: []const u8, compressed_buf: [serialized_data.len]u8) *const [serialized_data.len]u8 { + const allocator = std.heap.FixedBufferAllocator.init(&compressed_buf); + const fbs = std.io.fixedBufferStream(compressed_buf); + const writer = fbs.writer(); + + const zlib = try std.compress.zlib.compressStream(allocator, writer, .{}); + defer zlib.deinit(); + + zlib.writer().writeAll(serialized_data); + zlib.finish(); + + return fbs.getWritten(); +} + +fn decompress(allocator: std.heap.FixedBufferAllocator, compressed_data: []const u8) ![]const u8 { + var in_stream = std.io.fixedBufferStream(compressed_data); + const fba = allocator.allocator(); + + var zlib = try std.compress.zlib.decompressStream(fba, in_stream.reader()); + defer zlib.deinit(); + + // Read and decompress the whole file + return try zlib.reader().readAllAlloc(fba, allocator.buffer.len); +} + ///use `putAlloc` when data contains slices or pointers ///recommend you use fixedBufferAllocator or ArenaAllocator pub fn putAlloc(lmdb: Lmdb, fba: std.mem.Allocator, key: []const u8, data: anytype) !void { From 2a0c5361285784fece3dd58949203c9f156c7952 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sat, 15 Jul 2023 15:34:59 +0000 Subject: [PATCH 17/24] lmdb: fix deadlock caused by starting two .rw transactions Improve Lmdb's interface better name evn flags as EnvFlags rename DbTxnOption to DbOption and use discriptive names for the Db options add TxnType to represent database transaction types make open*Db fns consistent DbFlags.flags should not return an error change signature of startTxn improve logic for setting txn_flags replace env transaction flag field with db transaction flag which lmdb doesn't provide us a way to query rename Lmdb fields for consistency add getEnvFlags to get flags set at environment creation --- src/Lmdb.zig | 186 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 111 insertions(+), 75 deletions(-) diff --git a/src/Lmdb.zig b/src/Lmdb.zig index c5911ac..de9f0e7 100644 --- a/src/Lmdb.zig +++ b/src/Lmdb.zig @@ -22,9 +22,8 @@ pub const Val = mdb.MDB_val; const Txn = mdb.MDB_txn; const DbHandle = mdb.MDB_dbi; -//TODO: since we can get this from the environment dont store it in this struct ///Special options for this environment -pub const TxnType = enum(c_uint) { +pub const EnvFlags = enum(c_uint) { ///Use a writeable memory map unless MDB_RDONLY is set. This is faster and uses fewer mallocs, //but loses protection from application bugs like wild pointer writes and other bad updates into the database. rw = mdb.MDB_WRITEMAP, @@ -33,17 +32,32 @@ pub const TxnType = enum(c_uint) { ro = mdb.MDB_RDONLY, }; +///types of database transactions +pub const TxnType = enum { + rw, + ro, + not_set, +}; + db_env: *Env, -txn: ?*Txn = null, -txn_type: TxnType, +db_txn: ?*Txn = null, +db_txn_type: TxnType = .not_set, db_handle: DbHandle = std.math.maxInt(c_uint), +//TODO: add all relevant lmdb get fns +///Get environment flags. +pub fn getEnvFlags(env: *Env) EnvFlags { + var env_flags: c_uint = undefined; + const flags_status = mdb.mdb_env_get_flags(env, &env_flags); + checkState(flags_status) catch unreachable; + return @enumFromInt(env_flags); +} ///`db_path` is the directory in which the database files reside. This directory must already exist and be writable. -/// `initdb` fn initializes the db environment (mmap file) specifing the db mode `.rw/.ro`. +/// `initdb` fn initializes the db environment (mmap file) specifing the `env_flags` [.rw|.ro]. ///Make sure to start a transaction .ie startTxn() fn before calling any db manipulation fn's ///A maximum of two named db's are allowed ///if the environment is opened in read-only mode No write operations will be allowed. -pub fn initdb(db_path: []const u8, txn_type: TxnType) Lmdb { +pub fn initdb(db_path: []const u8, env_flags: EnvFlags) Lmdb { var db_env: ?*Env = undefined; const env_state = mdb.mdb_env_create(&db_env); checkState(env_state) catch unreachable; @@ -52,7 +66,7 @@ pub fn initdb(db_path: []const u8, txn_type: TxnType) Lmdb { const db_limit_state = mdb.mdb_env_set_maxdbs(db_env, max_num_of_dbs); checkState(db_limit_state) catch unreachable; - const db_flags = @intFromEnum(txn_type); + const db_flags = @intFromEnum(env_flags); const permissions: c_uint = 0o0600; //octal permissions for created files in db_dir const open_state = mdb.mdb_env_open(db_env, db_path.ptr, db_flags, permissions); checkState(open_state) catch |open_err| switch (open_err) { @@ -66,7 +80,6 @@ pub fn initdb(db_path: []const u8, txn_type: TxnType) Lmdb { return .{ .db_env = db_env.?, - .txn_type = txn_type, }; } @@ -75,79 +88,120 @@ pub fn deinitdb(lmdb: Lmdb) void { mdb.mdb_env_close(lmdb.db_env); } -pub const DbTxnOption = packed struct { - ///Create the named database if it doesn't exist. - ///This option is not allowed in a read-only transaction or a read-only environment. - rw: bool, - ///Duplicate keys may be used in the database. - ///(Or, from another perspective, keys may have multiple data items, stored in sorted order.) - dup: bool = false, -}; - +//FIX: comment to various fns to reflect current changes ///start a transaction in rw/ro mode and get a db handle for db manipulation ///commit changes with commitTxns() if .rw else doneReading() if .ro -pub fn startTxn(lmdb: Lmdb) *Txn { +pub fn startTxn(lmdb: Lmdb, txn_type: TxnType) Lmdb { + const env_flags = getEnvFlags(lmdb.db_env); // This transaction will not perform any write operations if ro. - const flags: c_uint = if (lmdb.txn_type == .ro) mdb.MDB_RDONLY else 0; + const txn_flags: c_uint = switch (txn_type) { + .rw => if (env_flags != .ro) 0 else { + @panic("cannot start a .rw txn in a .ro environment"); + }, + .ro => mdb.MDB_RDONLY, + else => unreachable, + }; const parent = null; //no parent var txn: ?*Txn = undefined; //where the new #MDB_txn handle will be stored - const txn_state = mdb.mdb_txn_begin(lmdb.db_env, parent, flags, &txn); + const txn_state = mdb.mdb_txn_begin(lmdb.db_env, parent, txn_flags, &txn); checkState(txn_state) catch unreachable; - return txn.?; + return .{ + .db_env = lmdb.db_env, + .db_txn = txn.?, + .db_txn_type = txn_type, + }; } -//TODO:maybe support other flags for db like MDB_DUPSORT && MDB_DUPFIXED -//A single transaction can open multiple databases -pub fn setDbOpt(lmdb: Lmdb, db_txn: *Txn, db_txn_option: DbTxnOption, comptime db_name: []const u8) void { +pub const DbOption = packed struct { + ///Create the named database if it doesn't exist. + ///This option is not allowed in a read-only transaction or a read-only environment. + create_db_if_not_exist: bool = true, + ///Duplicate keys may be used in the database. + ///(Or, from another perspective, keys may have multiple data items, stored in sorted order.) + enable_dupsort: bool = false, +}; + +const DbFlags = struct { + flags: c_uint, + const Self = @This(); + + pub fn flags(lmdb: Lmdb) DbFlags { + ensureValidState(lmdb); + + var set_flags: c_uint = undefined; + const get_flags_state = mdb.mdb_dbi_flags(lmdb.db_txn, lmdb.db_handle, &set_flags); + checkState(get_flags_state) catch unreachable; + + return .{ .flags = set_flags }; + } + + fn isDupSorted(self: Self) bool { + return if ((self.flags & mdb.MDB_DUPSORT) == mdb.MDB_DUPSORT) true else false; + } +}; + +//INFO:from https://bugs.openldap.org/show_bug.cgi?id=10005 +//The persistent flags you specified when the DB was created +//are stored in the DB record and retrieved when the DB is opened. +//Flags specified to mdb_dbi_open at any other time are ignored. +///set flags `db_options` to be used for the opened db `db_name` +pub fn setDbOpt(lmdb: Lmdb, comptime db_name: []const u8, db_options: DbOption) void { + const env_flags = getEnvFlags(lmdb.db_env); //Create the named database if it doesn't exist. //This option is not allowed in a read-only transaction or a read-only environment var db_flags: c_uint = 0; - if (lmdb.txn_type == .rw and db_txn_option.rw) { + if (env_flags == .rw and db_options.create_db_if_not_exist) { db_flags |= mdb.MDB_CREATE; - } else if (lmdb.txn_type == .ro and db_txn_option.rw) { + } + //create_db_if_not_exist is not allowed in a read-only transaction or a read-only environment. + else if (env_flags == .ro or lmdb.db_txn_type == .ro) { @panic("Can't create a new database " ++ db_name ++ " in a read-only environment or transaction"); } - if (db_txn_option.dup) db_flags |= mdb.MDB_DUPSORT; + if (db_options.enable_dupsort) db_flags |= mdb.MDB_DUPSORT; var db_handle: mdb.MDB_dbi = undefined; //dbi Address where the new #MDB_dbi handle will be stored - const db_state = mdb.mdb_dbi_open(db_txn, db_name.ptr, db_flags, &db_handle); + const db_state = mdb.mdb_dbi_open(lmdb.db_txn, db_name.ptr, db_flags, &db_handle); checkState(db_state) catch unreachable; } -//from https://bugs.openldap.org/show_bug.cgi?id=10005 -//The persistent flags you specified when the DB was created -//are stored in the DB record and retrieved when the DB is opened. -//Flags specified to mdb_dbi_open at any other time are ignored. -pub fn openDb(lmdb: Lmdb, db_txn: *Txn, comptime db_name: []const u8) Lmdb { +///Open a database in the environment. +///The old database handle is returned if the database was already open. +///A single transaction can open multiple databases +pub fn openDb(lmdb: Lmdb, comptime db_name: []const u8) Lmdb { + assert(lmdb.db_txn != null); + assert(lmdb.db_txn_type != .not_set); + var db_handle: mdb.MDB_dbi = undefined; //dbi Address where the new #MDB_dbi handle will be stored const DEFAULT_FLAGS = 0; - const db_state = mdb.mdb_dbi_open(db_txn, db_name.ptr, DEFAULT_FLAGS, &db_handle); + const db_state = mdb.mdb_dbi_open(lmdb.db_txn, db_name.ptr, DEFAULT_FLAGS, &db_handle); checkState(db_state) catch unreachable; return .{ .db_env = lmdb.db_env, - .txn = db_txn, - .txn_type = lmdb.txn_type, + .db_txn = lmdb.db_txn, + .db_txn_type = lmdb.db_txn_type, .db_handle = db_handle, }; } //TODO: make openDb consistent with setDbOpt and openDb ///open a different db in an already open transaction -pub fn openNewDb(lmdb: Lmdb, db_txn_option: DbTxnOption, db_name: []const u8) Lmdb { +pub fn openNewDb(lmdb: Lmdb, db_name: []const u8, db_flags: DbOption) Lmdb { //make sure a transaction has been created already ensureValidState(lmdb); - const handle = openDb(lmdb, db_txn_option, db_name); + setDbOpt(lmdb, db_name, db_flags); + const DEFAULT_FLAGS = 0; + const handle = openDb(lmdb, DEFAULT_FLAGS, db_name); return .{ .db_env = lmdb.db_env, - .txn = lmdb.txn.?, - .txn_type = lmdb.txn_type, + .db_txn = lmdb.db_txn.?, + .db_txn_type = lmdb.db_txn_type, .db_handle = handle, }; } pub inline fn ensureValidState(lmdb: Lmdb) void { - assert(lmdb.txn != null); + assert(lmdb.db_txn != null); assert(lmdb.db_handle != std.math.maxInt(c_uint)); } @@ -169,17 +223,17 @@ pub fn del(lmdb: Lmdb, key: []const u8, comptime del_opt: DeleteAction, data: an switch (del_opt) { .exact => { - const db_flags = try DbFlags.flags(lmdb); + const db_flags = DbFlags.flags(lmdb); if (db_flags.isDupSorted()) { const serialized_data = serializer.serialize(data); - var del_data = dbValue(serialized_data); + var del_data = dbValue(serialized_data[0..]); - const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &del_key, &del_data); + const del_state = mdb.mdb_del(lmdb.db_txn.?, lmdb.db_handle, &del_key, &del_data); try checkState(del_state); } else unreachable; }, .all, .single => { - const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &del_key, null); + const del_state = mdb.mdb_del(lmdb.db_txn.?, lmdb.db_handle, &del_key, null); try checkState(del_state); }, } @@ -189,13 +243,13 @@ pub fn del(lmdb: Lmdb, key: []const u8, comptime del_opt: DeleteAction, data: an pub fn delDupsAlloc(lmdb: Lmdb, allocator: std.mem.Allocator, key: []const u8, data: anytype) !void { ensureValidState(lmdb); //This function will return MDB_NOTFOUND if the specified key/data pair is not in the database. - const db_flags = try DbFlags.flags(lmdb); + const db_flags = DbFlags.flags(lmdb); if (db_flags.isDupSorted()) { var del_key = dbKey(key); const serialized_data = serializer.serializeAlloc(allocator, data); var del_data = dbValue(serialized_data); - const del_state = mdb.mdb_del(lmdb.txn.?, lmdb.db_handle, &del_key, &del_data); + const del_state = mdb.mdb_del(lmdb.db_txn.?, lmdb.db_handle, &del_key, &del_data); try checkState(del_state); } else unreachable; } @@ -207,7 +261,7 @@ const RemoveAction = enum(u1) { inline fn remove(lmdb: Lmdb, action: RemoveAction) void { //0 to empty the DB, 1 to delete it from the environment and close the DB handle. - const empty_db_state = mdb.mdb_drop(lmdb.txn.?, lmdb.db_handle, @intFromEnum(action)); + const empty_db_state = mdb.mdb_drop(lmdb.db_txn.?, lmdb.db_handle, @intFromEnum(action)); checkState(empty_db_state) catch unreachable; } @@ -225,25 +279,6 @@ pub fn delDb(lmdb: Lmdb) void { remove(lmdb, .delete_and_close); } -const DbFlags = struct { - flags: c_uint, - const Self = @This(); - - pub fn flags(lmdb: Lmdb) !DbFlags { - ensureValidState(lmdb); - - var set_flags: c_uint = undefined; - const get_flags_state = mdb.mdb_dbi_flags(lmdb.txn, lmdb.db_handle, &set_flags); - try checkState(get_flags_state); - - return .{ .flags = set_flags }; - } - - fn isDupSorted(self: Self) bool { - return if ((self.flags & mdb.MDB_DUPSORT) == mdb.MDB_DUPSORT) true else false; - } -}; - const InsertFlags = enum { //allow duplicate key/data pairs dup_data, @@ -256,7 +291,7 @@ const InsertFlags = enum { }; fn insert(lmdb: Lmdb, key: []const u8, serialized_data: []const u8, flags: InsertFlags) !void { - const set_flags = try DbFlags.flags(lmdb); + const set_flags = DbFlags.flags(lmdb); const is_dup_sorted = set_flags.isDupSorted(); const DEFAULT_BEHAVIOUR = 0; const ALLOW_DUP_DATA = 0; @@ -301,7 +336,7 @@ fn mdbput(lmdb: Lmdb, insert_flags: c_uint, key: []const u8, serialized_data: [] var insert_key = dbKey(key); var value_data = dbValue(serialized_data[0..]); const put_state = mdb.mdb_put( - lmdb.txn.?, + lmdb.db_txn.?, lmdb.db_handle, &insert_key, &value_data, @@ -328,6 +363,7 @@ pub fn put(lmdb: Lmdb, key: []const u8, data: anytype) !void { try insert(lmdb, key, serialized_data[0..], .no_overwrite); } +//TODO: move to it own module fn compress(serialized_data: []const u8, compressed_buf: [serialized_data.len]u8) *const [serialized_data.len]u8 { const allocator = std.heap.FixedBufferAllocator.init(&compressed_buf); const fbs = std.io.fixedBufferStream(compressed_buf); @@ -404,7 +440,7 @@ pub fn updateAlloc(lmdb: Lmdb, allocator: std.mem.Allocator, key: []const u8, da pub fn commitTxns(lmdb: Lmdb) void { ensureValidState(lmdb); - const commit_state = mdb.mdb_txn_commit(lmdb.txn.?); + const commit_state = mdb.mdb_txn_commit(lmdb.db_txn.?); checkState(commit_state) catch unreachable; } @@ -419,14 +455,14 @@ pub fn doneReading(lmdb: Lmdb) void { ///it will update the current read-only transaction to see the changes made in the read-write transaction pub fn updateRead(lmdb: Lmdb) void { ensureValidState(lmdb); - mdb.mdb_txn_reset(lmdb.txn.?); - const rewew_state = mdb.mdb_txn_renew(lmdb.txn.?); + mdb.mdb_txn_reset(lmdb.db_txn.?); + const rewew_state = mdb.mdb_txn_renew(lmdb.db_txn.?); checkState(rewew_state) catch unreachable; } ///cancel/discard all transaction on the current db handle fn abortTxns(lmdb: Lmdb) void { - mdb.mdb_txn_abort(lmdb.txn.?); + mdb.mdb_txn_abort(lmdb.db_txn.?); } ///This is any unsafe cast which discards const @@ -442,7 +478,7 @@ pub fn get(lmdb: Lmdb, comptime T: type, key: []const u8) !T { var get_key = dbKey(key); const get_state = mdb.mdb_get( - lmdb.txn.?, + lmdb.db_txn.?, lmdb.db_handle, &get_key, &data, @@ -459,7 +495,7 @@ pub fn getAlloc(lmdb: Lmdb, comptime T: type, fba: std.mem.Allocator, key: []con var data: Val = undefined; var get_key = dbKey(key); const get_state = mdb.mdb_get( - lmdb.txn.?, + lmdb.db_txn.?, lmdb.db_handle, &get_key, &data, From 3a8d23acb39e2ebf401d34c9551c0b5fdae83d38 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sat, 15 Jul 2023 16:00:26 +0000 Subject: [PATCH 18/24] Lmdb: follow up on lmdb api changes --- src/Blockchain.zig | 16 ++++----- src/Iterator.zig | 4 +-- src/LmdbCursor.zig | 6 ++-- src/UTXOcache.zig | 81 ++++++++++++++++++++++------------------------ 4 files changed, 51 insertions(+), 56 deletions(-) diff --git a/src/Blockchain.zig b/src/Blockchain.zig index 4ad56d5..2d986e6 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -40,10 +40,10 @@ arena: std.mem.Allocator, //TODO:organise and document exit codes pub fn getChain(lmdb: Lmdb, arena: std.mem.Allocator) BlockChain { - const txn = lmdb.startTxn(); + const txn = lmdb.startTxn(.ro); - const db = lmdb.openDb(txn, BLOCK_DB); - defer db.commitTxns(); + const db = txn.openDb(BLOCK_DB); + defer db.doneReading(); if (db.get(Hash, LAST)) |last_block_hash| { return .{ .last_hash = last_block_hash, .db = db, .arena = arena }; @@ -66,10 +66,10 @@ pub fn newChain(lmdb: Lmdb, arena: std.mem.Allocator, address: Wallets.Address) const coinbase_tx = Transaction.initCoinBaseTx(allocator, address, WALLET_STORAGE); const genesis_block = Block.genesisBlock(allocator, coinbase_tx); - const txn = lmdb.startTxn(); + const txn = lmdb.startTxn(.rw); - lmdb.setDbOpt(txn, .{ .rw = true, .dup = true }, BLOCK_DB); - const db = lmdb.openDb(txn, BLOCK_DB); + txn.setDbOpt(BLOCK_DB, .{}); + const db = txn.openDb(BLOCK_DB); defer db.commitTxns(); db.put(LAST, genesis_block.hash) catch |newchain_err| switch (newchain_err) { @@ -107,8 +107,8 @@ pub fn mineBlock(bc: *BlockChain, transactions: []const Transaction) Block { assert(new_block.validate() == true); - const txn = bc.db.startTxn(); - const db = bc.db.openDb(txn, BLOCK_DB); + const txn = bc.db.startTxn(.rw); + const db = txn.openDb(BLOCK_DB); defer db.commitTxns(); db.putAlloc(allocator, new_block.hash[0..], new_block) catch unreachable; diff --git a/src/Iterator.zig b/src/Iterator.zig index c529d9b..17fe9ca 100644 --- a/src/Iterator.zig +++ b/src/Iterator.zig @@ -23,8 +23,8 @@ pub const BlockIterator = struct { ///the returned usize is the address of the Block in memory ///the ptr can be obtained with @intToPtr pub fn next(self: *BlockIterator) ?Block { - const txn = self.db.startTxn(); - const db = self.db.openDb(txn, BLOCK_DB); + const txn = self.db.startTxn(.ro); + const db = txn.openDb(BLOCK_DB); defer db.doneReading(); if (db.getAlloc(Block, self.arena, self.current_hash[0..])) |current_block| { diff --git a/src/LmdbCursor.zig b/src/LmdbCursor.zig index b20f329..7b1db0a 100644 --- a/src/LmdbCursor.zig +++ b/src/LmdbCursor.zig @@ -46,7 +46,7 @@ pub fn LmdbCursor(comptime ckey: type, comptime cvalue: type) type { pub fn init(lmdb_txn: Lmdb) Self { ensureValidState(lmdb_txn); var cursor_handle: ?*Cursor = undefined; - const cursor_open_state = mdb.mdb_cursor_open(lmdb_txn.txn.?, lmdb_txn.db_handle, &cursor_handle); + const cursor_open_state = mdb.mdb_cursor_open(lmdb_txn.db_txn.?, lmdb_txn.db_handle, &cursor_handle); checkState(cursor_open_state) catch unreachable; return .{ .lmdb = lmdb_txn, .cursor_handle = cursor_handle.? }; } @@ -59,7 +59,7 @@ pub fn LmdbCursor(comptime ckey: type, comptime cvalue: type) type { pub fn updateCursor(cursor: Self) void { ensureValidState(cursor.lmdb); - const cursor_renew_state = mdb.mdb_cursor_renew(cursor.lmdb.txn.?, cursor.cursor_handle); + const cursor_renew_state = mdb.mdb_cursor_renew(cursor.lmdb.db_txn.?, cursor.cursor_handle); checkState(cursor_renew_state) catch unreachable; } @@ -136,7 +136,7 @@ pub fn LmdbCursor(comptime ckey: type, comptime cvalue: type) type { } pub fn deinit(cursor: Self) void { - if (cursor.lmdb.txn_type == .ro) { + if (cursor.lmdb.db_txn_type == .ro) { cursor.doneCursoring(); } } diff --git a/src/UTXOcache.zig b/src/UTXOcache.zig index 945b160..725cc86 100644 --- a/src/UTXOcache.zig +++ b/src/UTXOcache.zig @@ -24,6 +24,13 @@ pub fn init(db: Lmdb, arena: std.mem.Allocator) UTXOcache { } pub fn reindex(utxo_cache: UTXOcache, bc: BlockChain) void { + const txn = utxo_cache.db.startTxn(.rw); + txn.setDbOpt(UTXO_DB, .{}); + const db = txn.openDb(UTXO_DB); + defer db.commitTxns(); + + db.emptyDb(); + var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = fba.allocator(); @@ -31,14 +38,6 @@ pub fn reindex(utxo_cache: UTXOcache, bc: BlockChain) void { const unspent_txos = bc.findAndMapAllTxIDsToUTxOs(); var itr = unspent_txos.iterator(); - const txn = utxo_cache.db.startTxn(); - utxo_cache.db.setDbOpt(txn, .{ .rw = true }, UTXO_DB); - - const db = utxo_cache.db.openDb(txn, UTXO_DB); - defer db.commitTxns(); - - db.emptyDb(); - while (itr.next()) |entry| { const tx_id: Transaction.TxID = entry.key_ptr.*; const utx_outs: []const Transaction.TxOutput = entry.value_ptr.*; @@ -55,23 +54,22 @@ pub fn findSpendableOutputs( accumulated_amount: usize, unspent_output: TxMap, } { - var buffer: [1024 * 1024]u8 = undefined; - var fba = std.heap.FixedBufferAllocator.init(&buffer); - - var unspent_output = TxMap.init(utxo_cache.arena); - - var accumulated_amount: usize = 0; - - const txn = utxo_cache.db.startTxn(); - const db = utxo_cache.db.openDb(txn, UTXO_DB); + const txn = utxo_cache.db.startTxn(.ro); + const db = txn.openDb(UTXO_DB); defer db.doneReading(); const cursor = Cursor.init(db); defer cursor.deinit(); + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&buffer); + var iterator = cursor.iterator(fba); defer iterator.deinit(); + var accumulated_amount: usize = 0; + var unspent_output = TxMap.init(utxo_cache.arena); + // const unspentTxs = self.findUTxs(pub_key_hash); var next = iterator.start(); // //The method iterates over all unspent transactions and accumulates their values. @@ -83,10 +81,6 @@ pub fn findSpendableOutputs( const unspent_output_txid = entry.key; accumulated_amount += output.value; unspent_output.putNoClobber(unspent_output_txid, out_index) catch unreachable; - - // if (accumulated_amount >= amount) { - // break :spendables; - // } } } } @@ -96,22 +90,20 @@ pub fn findSpendableOutputs( ///find unspent transaction outputs pub fn findUnlockableOutputs(utxo_cache: UTXOcache, pub_key_hash: Wallets.PublicKeyHash) []const Transaction.TxOutput { - var buffer: [1024 * 1024]u8 = undefined; - var fba = std.heap.FixedBufferAllocator.init(buffer[0..]); - - var utx_output_list = std.ArrayList(Transaction.TxOutput).init(utxo_cache.arena); - - const txn = utxo_cache.db.startTxn(); - const db = utxo_cache.db.openDb(txn, UTXO_DB); + const txn = utxo_cache.db.startTxn(.ro); + const db = txn.openDb(UTXO_DB); defer db.doneReading(); const cursor = Cursor.init(db); defer cursor.deinit(); + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(buffer[0..]); + + var utx_output_list = std.ArrayList(Transaction.TxOutput).init(utxo_cache.arena); var iterator = cursor.iterator(fba); defer iterator.deinit(); - // const unspent_txs = self.findUTxs(pub_key_hash); var iter = iterator.start(); while (iter) |entry| : (iter = iterator.next()) { for (entry.value) |output| { @@ -124,19 +116,19 @@ pub fn findUnlockableOutputs(utxo_cache: UTXOcache, pub_key_hash: Wallets.Public } pub fn update(utxo_cache: UTXOcache, block: Block) void { - var buffer: [1024 * 1024]u8 = undefined; - var fba = std.heap.FixedBufferAllocator.init(&buffer); - const allocator = fba.allocator(); - const txn = utxo_cache.db.startTxn(); - - const db = utxo_cache.db.openDb(txn, UTXO_DB); + const txn = utxo_cache.db.startTxn(.rw); + const db = txn.openDb(UTXO_DB); defer db.commitTxns(); const cursor = Cursor.init(db); defer cursor.deinit(); + var buffer: [1024 * 1024]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&buffer); + const allocator = fba.allocator(); + for (block.transactions.items) |tx| { - if (!tx.isCoinBaseTx()) { + if (tx.isCoinBaseTx() == false) { var updated_output = std.ArrayList(Transaction.TxOutput).init(allocator); defer updated_output.deinit(); @@ -155,8 +147,7 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { //cache if (updated_output.items.len == 0) { //TODO: check that this doesn't affect the preoviously inserted outputs - cursor.print("before txn.delDupsAlloc"); - cursor.print("after txn.delDupsAlloc"); + db.del(txin.out_id[0..], .exact, {}) catch unreachable; } else { //update the cache with the new output_list of the txin.out_id db.updateAlloc(allocator, txin.out_id[0..], updated_output.items) catch unreachable; @@ -173,7 +164,8 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { } //TODO: maybe we should put value rather - // txn.putAlloc(allocator, tx.id[0..], uoutput.items) catch |key_data_already_exist| switch (key_data_already_exist) { + db.putAlloc(allocator, tx.id[0..], uoutput.items) catch unreachable; + // |key_data_already_exist| switch (key_data_already_exist) { // //when the exact same key and data pair already exist in the db // error.KeyAlreadyExist => { // const previous_outputs = txn.getAlloc([]const Transaction.TxOutput, allocator, tx.id[0..]) catch unreachable; @@ -187,14 +179,17 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { // current_value_sum += poutputs.value; // } // - // const new_output_with_all_value = Transaction.TxOutput{ .value = current_value_sum, .pub_key_hash = previous_outputs[0].pub_key_hash }; + // const new_output_with_all_value = Transaction.TxOutput{ + // .value = current_value_sum, + // .pub_key_hash = previous_outputs[0].pub_key_hash, + // }; // txn.updateAlloc(allocator, tx.id[0..], &.{new_output_with_all_value}) catch unreachable; // }, // else => unreachable, // }; - cursor.print("before txn.putDupAlloc"); - db.putDupAlloc(allocator, tx.id[0..], uoutput.items, true) catch unreachable; - cursor.print("after txn.putDupAlloc"); + // cursor.print("before txn.putDupAlloc"); + // db.putDupAlloc(allocator, tx.id[0..], uoutput.items, true) catch unreachable; + // cursor.print("after txn.putDupAlloc"); } } From 3cde8e3ed8c61d2da68d01169ba9a58005ed0538 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sat, 19 Aug 2023 21:42:54 +0000 Subject: [PATCH 19/24] fix build.zig for zig 0.11.0 --- build.zig | 4 ++-- src/Blockchain.zig | 2 +- src/Transaction.zig | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/build.zig b/build.zig index ac80905..abc22cd 100644 --- a/build.zig +++ b/build.zig @@ -48,7 +48,7 @@ pub fn build(b: *Build) void { }); exe.addModule("s2s", s2s_module); exe.linkLibrary(lmdb); - exe.addIncludePath(LMDB_PATH); + exe.addIncludePath(.{ .path = LMDB_PATH }); b.installArtifact(exe); exe.stack_size = 1024 * 1024 * 64; @@ -86,7 +86,7 @@ pub fn build(b: *Build) void { }); exe_tests.addModule("s2s", s2s_module); exe_tests.linkLibrary(lmdb); - exe_tests.addIncludePath(LMDB_PATH); + exe_tests.addIncludePath(.{ .path = LMDB_PATH }); const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&lmdb.step); diff --git a/src/Blockchain.zig b/src/Blockchain.zig index 2d986e6..0eeb5be 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -200,9 +200,9 @@ fn newUTx(self: BlockChain, utxo_cache: UTXOcache, amount: usize, from: Wallets. const spendable_txns = utxo_cache.findSpendableOutputs(Wallet.getPubKeyHash(from), amount); const accumulated_amount = spendable_txns.accumulated_amount; var unspent_output = spendable_txns.unspent_output; - std.log.debug("spendable amount is {d}", .{accumulated_amount}); if (accumulated_amount < amount) { + std.log.err("spendable amount is {d}", .{accumulated_amount}); std.log.err("not enough funds to transfer RBC {d} from '{s}' to '{s}'", .{ amount, from, to }); std.process.exit(@intFromEnum(ExitCodes.insufficient_wallet_balance)); } diff --git a/src/Transaction.zig b/src/Transaction.zig index be39e10..6e93b3d 100644 --- a/src/Transaction.zig +++ b/src/Transaction.zig @@ -10,9 +10,9 @@ pub const PrevTxMap = std.AutoArrayHashMap(TxID, Transaction); const serializer = @import("serializer.zig"); -//CHECKOUT: https://arxiv.org/abs/1806.06738 The Evolution of Embedding Metadata in Blockchain Transactions +//INFO: https://arxiv.org/abs/1806.06738 The Evolution of Embedding Metadata in Blockchain Transactions //by Tooba Faisal, Nicolas Courtois, Antoaneta Serguieva -//USE_AS: further extension to this blockchain to improve anonymity and security even futher +//INFO: further extension to this blockchain to improve anonymity and security even futher //Transactions just lock values with a script, which can be unlocked only by the one who locked them. const Transaction = @This(); From 068681e16458a4e4a6362479ddb05b8b639e58f1 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Tue, 22 Aug 2023 22:36:00 +0000 Subject: [PATCH 20/24] update s2s --- build.zig.zon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index 8b416a9..2fe0e2e 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -3,8 +3,8 @@ .version = "0.1.0", .dependencies = .{ .s2s = .{ - .url = "https://github.com/ziglibs/s2s/archive/f8dc146601fd91d372c1510e74adc2c839eaf41e.tar.gz", - .hash = "1220789c7c650cc65b820a4457b02f0e057ace507ae72220a860c36ef430481e2912", + .url = "https://github.com/ziglibs/s2s/archive/f95da7705f0ab8535d7e3e0af800116a8c4c58a2.tar.gz", + .hash = "12205ec5ab2bc745cd17c2b7e09da5b5d641f2a1ff019f342bce1586323acc572293", }, }, } From e0446bec26ef6d0e66a28a86092ce50f329c162d Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Tue, 22 Aug 2023 22:50:32 +0000 Subject: [PATCH 21/24] catch key exist error on put in update fn use gpa for testing and to help find bug --- src/UTXOcache.zig | 65 +++++++++++++++++++++-------------------------- src/main.zig | 19 +++++++------- 2 files changed, 39 insertions(+), 45 deletions(-) diff --git a/src/UTXOcache.zig b/src/UTXOcache.zig index 725cc86..d64209c 100644 --- a/src/UTXOcache.zig +++ b/src/UTXOcache.zig @@ -54,8 +54,7 @@ pub fn findSpendableOutputs( accumulated_amount: usize, unspent_output: TxMap, } { - const txn = utxo_cache.db.startTxn(.ro); - const db = txn.openDb(UTXO_DB); + const db = utxo_cache.db.startTxn(.ro).openDb(UTXO_DB); defer db.doneReading(); const cursor = Cursor.init(db); @@ -90,8 +89,7 @@ pub fn findSpendableOutputs( ///find unspent transaction outputs pub fn findUnlockableOutputs(utxo_cache: UTXOcache, pub_key_hash: Wallets.PublicKeyHash) []const Transaction.TxOutput { - const txn = utxo_cache.db.startTxn(.ro); - const db = txn.openDb(UTXO_DB); + const db = utxo_cache.db.startTxn(.ro).openDb(UTXO_DB); defer db.doneReading(); const cursor = Cursor.init(db); @@ -116,13 +114,9 @@ pub fn findUnlockableOutputs(utxo_cache: UTXOcache, pub_key_hash: Wallets.Public } pub fn update(utxo_cache: UTXOcache, block: Block) void { - const txn = utxo_cache.db.startTxn(.rw); - const db = txn.openDb(UTXO_DB); + const db = utxo_cache.db.startTxn(.rw).openDb(UTXO_DB); defer db.commitTxns(); - const cursor = Cursor.init(db); - defer cursor.deinit(); - var buffer: [1024 * 1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const allocator = fba.allocator(); @@ -147,7 +141,9 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { //cache if (updated_output.items.len == 0) { //TODO: check that this doesn't affect the preoviously inserted outputs - db.del(txin.out_id[0..], .exact, {}) catch unreachable; + //I suspect this fuction is the cause of the current bug so I have to test and fuzz my understanding + //hear thoroughly + db.del(txin.out_id[0..], .single, {}) catch unreachable; } else { //update the cache with the new output_list of the txin.out_id db.updateAlloc(allocator, txin.out_id[0..], updated_output.items) catch unreachable; @@ -164,32 +160,29 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { } //TODO: maybe we should put value rather - db.putAlloc(allocator, tx.id[0..], uoutput.items) catch unreachable; - // |key_data_already_exist| switch (key_data_already_exist) { - // //when the exact same key and data pair already exist in the db - // error.KeyAlreadyExist => { - // const previous_outputs = txn.getAlloc([]const Transaction.TxOutput, allocator, tx.id[0..]) catch unreachable; - // var previous_value_sum: usize = 0; - // - // for (previous_outputs) |poutputs| { - // previous_value_sum += poutputs.value; - // } - // var current_value_sum = previous_value_sum; - // for (uoutput.items) |poutputs| { - // current_value_sum += poutputs.value; - // } - // - // const new_output_with_all_value = Transaction.TxOutput{ - // .value = current_value_sum, - // .pub_key_hash = previous_outputs[0].pub_key_hash, - // }; - // txn.updateAlloc(allocator, tx.id[0..], &.{new_output_with_all_value}) catch unreachable; - // }, - // else => unreachable, - // }; - // cursor.print("before txn.putDupAlloc"); - // db.putDupAlloc(allocator, tx.id[0..], uoutput.items, true) catch unreachable; - // cursor.print("after txn.putDupAlloc"); + //check reference guide to make sure I'm on the right path + db.putAlloc(allocator, tx.id[0..], uoutput.items) catch |key_data_already_exist| switch (key_data_already_exist) { + //when the exact same key and data pair already exist in the db + error.KeyAlreadyExist => { + const previous_outputs = db.getAlloc([]const Transaction.TxOutput, allocator, tx.id[0..]) catch unreachable; + var previous_value_sum: usize = 0; + + for (previous_outputs) |poutputs| { + previous_value_sum += poutputs.value; + } + var current_value_sum = previous_value_sum; + for (uoutput.items) |poutputs| { + current_value_sum += poutputs.value; + } + + const new_output_with_all_value = Transaction.TxOutput{ + .value = current_value_sum, + .pub_key_hash = previous_outputs[0].pub_key_hash, + }; + db.updateAlloc(allocator, tx.id[0..], &.{new_output_with_all_value}) catch unreachable; + }, + else => unreachable, + }; } } diff --git a/src/main.zig b/src/main.zig index ce2f9bf..d3b3d4b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3,25 +3,26 @@ const builtin = @import("builtin"); const Cli = @import("Cli.zig"); -var default_allocator = std.heap.GeneralPurposeAllocator(.{}){}; -const gpa = if (builtin.link_libc and builtin.mode != .Debug) - std.heap.raw_c_allocator -else - default_allocator.allocator(); - //TODO: improve memory usage and recycling at appropiate places. // set buffers in local scope based on the sizeof the struct or types stored or allocated //TODO: rethink allocations and memory management pattern used,maybe pass the allocator type so you can free memory //if the data generated at the step won't be used again or isn't useful again //TODO: update Hex formatting to use X/x pub fn main() !void { + var default_allocator = std.heap.GeneralPurposeAllocator(.{}){}; + const gpa = if (builtin.link_libc and builtin.mode != .Debug) + std.heap.raw_c_allocator + else + default_allocator.allocator(); + defer if (builtin.mode == .Debug) { _ = default_allocator.deinit(); }; - var buf: [1024 * 1024 * 7]u8 = undefined; - var fba = std.heap.FixedBufferAllocator.init(&buf); + // var buf: [1024 * 1024 * 7]u8 = undefined; + // var fba = std.heap.FixedBufferAllocator.init(&buf); + + var arena = std.heap.ArenaAllocator.init(gpa); - var arena = std.heap.ArenaAllocator.init(fba.allocator()); defer arena.deinit(); const allocator = arena.allocator(); From d3bb980a461d0db4fb22326d0e383f253e18d77e Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 27 Aug 2023 19:24:48 +0000 Subject: [PATCH 22/24] Update s2s after upstream incooperated my changes --- build.zig.zon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index 2fe0e2e..00d5a55 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -3,8 +3,8 @@ .version = "0.1.0", .dependencies = .{ .s2s = .{ - .url = "https://github.com/ziglibs/s2s/archive/f95da7705f0ab8535d7e3e0af800116a8c4c58a2.tar.gz", - .hash = "12205ec5ab2bc745cd17c2b7e09da5b5d641f2a1ff019f342bce1586323acc572293", + .url = "https://github.com/ziglibs/s2s/archive/f1d0508cc47b2af353658d4e52616a45aafa91ce.tar.gz", + .hash = "1220206c698a1890d742a8b98acb16db99275d16f2e7fe2046c3d8f2249ed267ca71", }, }, } From a6e1acba8867283d87fd4d853e54922e46390951 Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Sun, 27 Aug 2023 19:28:13 +0000 Subject: [PATCH 23/24] Fix type hash mismatch in update fn This was caused by the tuple type not being coerced to the appropriate type before been passed to the anytype parameter This lead to the anytype inferring it as the tuple type instead of my intented type --- src/UTXOcache.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/UTXOcache.zig b/src/UTXOcache.zig index d64209c..eca8840 100644 --- a/src/UTXOcache.zig +++ b/src/UTXOcache.zig @@ -179,7 +179,7 @@ pub fn update(utxo_cache: UTXOcache, block: Block) void { .value = current_value_sum, .pub_key_hash = previous_outputs[0].pub_key_hash, }; - db.updateAlloc(allocator, tx.id[0..], &.{new_output_with_all_value}) catch unreachable; + db.updateAlloc(allocator, tx.id[0..], @as([]const Transaction.TxOutput, &.{new_output_with_all_value})) catch unreachable; }, else => unreachable, }; From 237ae349861262832051b49e76f73bc17d90815b Mon Sep 17 00:00:00 2001 From: Ultra-Code Date: Mon, 28 Aug 2023 23:26:20 +0000 Subject: [PATCH 24/24] Improve signature of sendValue fn --- src/Blockchain.zig | 6 +++--- src/Cli.zig | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/Blockchain.zig b/src/Blockchain.zig index 0eeb5be..fb2a6dc 100644 --- a/src/Blockchain.zig +++ b/src/Blockchain.zig @@ -278,9 +278,9 @@ fn verifyTx(self: BlockChain, tx: Transaction) bool { return tx.verify(prev_txs, fba.allocator()); } -pub fn sendValue(self: *BlockChain, amount: usize, from: Wallets.Address, to: Wallets.Address) void { +pub fn sendValue(self: *BlockChain, cache: UTXOcache, amount: usize, from: Wallets.Address, to: Wallets.Address) void { assert(amount > 0); - assert(!std.mem.eql(u8, &from, &to)); + assert(!std.mem.eql(u8, from[0..], to[0..])); if (!Wallet.validateAddress(from)) { std.log.err("sender address {s} is invalid", .{from}); @@ -290,7 +290,7 @@ pub fn sendValue(self: *BlockChain, amount: usize, from: Wallets.Address, to: Wa std.log.err("recipient address {s} is invalid", .{to}); std.process.exit(@intFromEnum(ExitCodes.invalid_wallet_address)); } - const cache = UTXOcache.init(self.db, self.arena); + var new_transaction = self.newUTx(cache, amount, from, to); //The reward is just a coinbase transaction. When a mining node starts mining a new block, //it takes transactions from the queue and prepends a coinbase transaction to them. diff --git a/src/Cli.zig b/src/Cli.zig index 88b2bc0..8fb0fb2 100644 --- a/src/Cli.zig +++ b/src/Cli.zig @@ -69,9 +69,8 @@ pub fn run(self: Cli) void { var bc = BlockChain.getChain(db_env, self.arena); - bc.sendValue(amount, from_address, to_address); - const cache = UTXOcache.init(bc.db, bc.arena); + bc.sendValue(cache, amount, from_address, to_address); std.debug.print("done sending RBC {d} from '{s}' to '{s}'\n", .{ amount, from_address, to_address }); std.debug.print("'{[from_address]s}' now has a balance of RBC {[from_balance]d} and '{[to_address]s}' a balance of RBC {[to_balance]d}\n", .{