diff --git a/Cargo.lock b/Cargo.lock index a350893f..c05d9d99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -280,6 +280,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "cpufeatures" version = "0.2.9" @@ -314,6 +320,19 @@ dependencies = [ "typenum", ] +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", +] + [[package]] name = "diff" version = "0.1.13" @@ -351,6 +370,21 @@ dependencies = [ "winapi", ] +[[package]] +name = "dissimilar" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" + +[[package]] +name = "dlmalloc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "203540e710bfadb90e5e29930baf5d10270cec1f43ab34f46f78b147b2de715a" +dependencies = [ + "libc", +] + [[package]] name = "either" version = "1.9.0" @@ -407,6 +441,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "expect-test" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d9eafeadd538e68fb28016364c9732d78e420b9ff8853fa5e4058861e9f8d3" +dependencies = [ + "dissimilar", + "once_cell", +] + [[package]] name = "fastrand" version = "2.0.0" @@ -473,6 +517,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + [[package]] name = "hashbrown" version = "0.14.0" @@ -522,6 +572,16 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.0.0" @@ -529,7 +589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.14.0", ] [[package]] @@ -550,6 +610,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "jobserver" version = "0.1.27" @@ -574,6 +643,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "leb128" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" + [[package]] name = "libc" version = "0.2.147" @@ -593,7 +668,7 @@ source = "git+https://github.com/bitwalker/lit?branch=main#a4f29aba5023d66cce649 dependencies = [ "clap 2.34.0", "error-chain", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "regex", @@ -716,6 +791,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "miden-frontend-wasm" +version = "0.1.0" +dependencies = [ + "anyhow", + "derive_more", + "expect-test", + "itertools 0.11.0", + "log", + "miden-diagnostics", + "miden-hir", + "miden-hir-type", + "sha2", + "smallvec", + "thiserror", + "wasmparser 0.107.0", + "wasmprinter", + "wat", +] + [[package]] name = "miden-hir" version = "0.1.0" @@ -798,6 +893,7 @@ dependencies = [ "human-panic", "log", "miden-diagnostics", + "miden-frontend-wasm", ] [[package]] @@ -902,7 +998,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.0.0", ] [[package]] @@ -1001,6 +1097,13 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +[[package]] +name = "rust-wasm-tests" +version = "0.1.0" +dependencies = [ + "dlmalloc", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1013,6 +1116,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "0.38.11" @@ -1057,6 +1169,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "semver" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" + [[package]] name = "serde" version = "1.0.188" @@ -1086,6 +1204,17 @@ dependencies = [ "serde", ] +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha3" version = "0.10.8" @@ -1225,7 +1354,7 @@ version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -1299,6 +1428,66 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasm-encoder" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41763f20eafed1399fff1afb466496d3a959f58241436cfdc17e3f5ca954de16" +dependencies = [ + "leb128", +] + +[[package]] +name = "wasmparser" +version = "0.107.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29e3ac9b780c7dda0cac7a52a5d6d2d6707cc6e3451c9db209b6c758f40d7acb" +dependencies = [ + "indexmap 1.9.3", + "semver", +] + +[[package]] +name = "wasmparser" +version = "0.111.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad71036aada3f6b09251546e97e4f4f176dd6b41cf6fa55e7e0f65e86aec319a" +dependencies = [ + "indexmap 2.0.0", + "semver", +] + +[[package]] +name = "wasmprinter" +version = "0.2.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeb8cc41d341939dce08ee902b50e36cd35add940f6044c94b144e8f73fe07a6" +dependencies = [ + "anyhow", + "wasmparser 0.111.0", +] + +[[package]] +name = "wast" +version = "62.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8ae06f09dbe377b889fbd620ff8fa21e1d49d1d9d364983c0cdbf9870cb9f1f" +dependencies = [ + "leb128", + "memchr", + "unicode-width", + "wasm-encoder", +] + +[[package]] +name = "wat" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "842e15861d203fb4a96d314b0751cdeaf0f6f8b35e8d81d2953af2af5e44e637" +dependencies = [ + "wast", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index aa1b19f8..b0811b03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,8 @@ members = [ "hir-transform", "hir-type", "tools/*", + "frontend-wasm", + "tests/rust-wasm-tests" ] [workspace.package] @@ -40,7 +42,11 @@ parking_lot = "0.12" parking_lot_core = "0.9" pretty_assertions = "1.0" rustc-hash = "1.1" -smallvec = { version = "1.9", features = ["union", "const_generics", "const_new"] } +smallvec = { version = "1.9", features = [ + "union", + "const_generics", + "const_new", +] } smallstr = { version = "0.3", features = ["union"] } thiserror = "1.0" miden-assembly = "0.7" @@ -53,6 +59,7 @@ miden-hir-symbol = { path = "hir-symbol" } miden-hir-transform = { path = "hir-transform" } miden-hir-type = { path = "hir-type" } miden-parsing = "0.1" +miden-frontend-wasm = { path = "frontend-wasm" } [profile.release] opt-level = 2 diff --git a/Makefile.toml b/Makefile.toml index 8b6ed175..2d29677b 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -163,11 +163,24 @@ category = "Test" description = "Runs all tests" dependencies = ["test-rust", "test-filecheck"] +[tasks.install-wasm-target] +category = "Test" +description = "Install wasm32-unknown-unknown target" +command = "rustup" +args = ["target", "add", "wasm32-unknown-unknown", "--toolchain", "${CARGO_MAKE_TOOLCHAIN}"] + +[tasks.install-rust-src] +category = "Test" +description = "Install rust-src component" +command = "rustup" +args = ["component", "add", "rust-src", "--toolchain", "${CARGO_MAKE_TOOLCHAIN}"] + [tasks.test-rust] category = "Test" description = "Runs tests written in Rust" command = "rustup" args = ["run", "${CARGO_MAKE_TOOLCHAIN}", "cargo", "test", "@@remove-empty(CARGO_MAKE_CARGO_VERBOSE_FLAGS)", "@@split(CARGO_MAKE_CARGO_BUILD_TEST_FLAGS, )", "${@}"] +dependencies = ["install-wasm-target", "install-rust-src"] [tasks.test-filecheck] category = "Test" diff --git a/frontend-wasm/Cargo.toml b/frontend-wasm/Cargo.toml new file mode 100644 index 00000000..0afc4c83 --- /dev/null +++ b/frontend-wasm/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "miden-frontend-wasm" +description = "Build MidenIR from Wasm" +version.workspace = true +rust-version.workspace = true +authors.workspace = true +repository.workspace = true +categories.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +edition.workspace = true + +[dependencies] +miden-hir.workspace = true +miden-hir-type.workspace = true +miden-diagnostics.workspace = true + +thiserror.workspace = true +smallvec.workspace = true +log.workspace = true +anyhow.workspace = true +wasmparser = "0.107" +itertools = "0.11" +derive_more = "0.99" + +[dev-dependencies] +wat = "1.0.69" +expect-test = "1.4.1" +wasmprinter = "0.2.63" +sha2 = "0.10" diff --git a/frontend-wasm/src/code_translator/mod.rs b/frontend-wasm/src/code_translator/mod.rs new file mode 100644 index 00000000..53135051 --- /dev/null +++ b/frontend-wasm/src/code_translator/mod.rs @@ -0,0 +1,1048 @@ +//! This module contains the bulk of the code performing the translation between +//! WebAssembly and Miden IR. +//! +//! The translation is done in one pass, opcode by opcode. Two main data structures are used during +//! code translations: the value stack and the control stack. The value stack mimics the execution +//! of the WebAssembly stack machine: each instruction result is pushed onto the stack and +//! instruction arguments are popped off the stack. Similarly, when encountering a control flow +//! block, it is pushed onto the control stack and popped off when encountering the corresponding +//! `End`. +//! +//! Another data structure, the translation state, records information concerning unreachable code +//! status and about if inserting a return at the end of the function is necessary. +//! +//! Based on Cranelift's Wasm -> CLIF translator v11.0.0 + +use std::collections::{hash_map, HashMap}; + +use crate::error::{WasmError, WasmResult}; +use crate::func_translation_state::ControlStackFrame; +use crate::func_translation_state::{ElseData, FuncTranslationState}; +use crate::function_builder_ext::FunctionBuilderExt; +use crate::module_env::ModuleInfo; +use crate::ssa::Variable; +use crate::unsupported_diag; +use crate::wasm_types::{BlockType, GlobalIndex}; +use miden_diagnostics::{DiagnosticsHandler, SourceSpan}; +use miden_hir::cranelift_entity::packed_option::ReservedValue; +use miden_hir::Type::*; +use miden_hir::{Block, Inst, InstBuilder, Value}; +use miden_hir::{Immediate, Type}; +use wasmparser::{MemArg, Operator}; + +#[cfg(test)] +mod tests; + +#[cfg(test)] +mod tests_unsupported; + +/// Translates wasm operators into Miden IR instructions. +pub fn translate_operator( + op: &Operator, + builder: &mut FunctionBuilderExt, + state: &mut FuncTranslationState, + mod_info: &ModuleInfo, + diagnostics: &DiagnosticsHandler, + span: SourceSpan, +) -> WasmResult<()> { + if !state.reachable { + translate_unreachable_operator(&op, builder, state, mod_info, span)?; + return Ok(()); + } + + // Given that we believe the current block is reachable, the FunctionBuilderExt ought to agree. + debug_assert!(!builder.is_unreachable()); + + match op { + /********************************** Locals **************************************** + * `get_local` and `set_local` are treated as non-SSA variables and will completely + * disappear in the Miden IR + ***********************************************************************************/ + Operator::LocalGet { local_index } => { + let val = builder.use_var(Variable::from_u32(*local_index)); + state.push1(val); + } + Operator::LocalSet { local_index } => { + let val = state.pop1(); + builder.def_var(Variable::from_u32(*local_index), val); + } + Operator::LocalTee { local_index } => { + let val = state.peek1(); + builder.def_var(Variable::from_u32(*local_index), val); + } + /********************************** Globals ****************************************/ + Operator::GlobalGet { global_index } => { + let global_index = GlobalIndex::from_u32(*global_index); + let name = mod_info.global_name(global_index); + let ty = mod_info.globals[global_index].ty.clone(); + state.push1(builder.ins().load_symbol(name, ty, span)); + } + Operator::GlobalSet { global_index } => { + let global_index = GlobalIndex::from_u32(*global_index); + let name = mod_info.global_name(global_index); + let ty = (&mod_info.globals[global_index]).ty.clone(); + let ptr = builder + .ins() + .symbol_addr(name, Ptr(ty.clone().into()), span); + let val = state.pop1(); + builder.ins().store(ptr, val, span); + } + /********************************* Stack misc **************************************/ + Operator::Drop => _ = state.pop1(), + Operator::Select => { + let (arg1, arg2, cond) = state.pop3(); + // if cond is not 0, return arg1, else return arg2 + // https://www.w3.org/TR/wasm-core-1/#-hrefsyntax-instr-parametricmathsfselect%E2%91%A0 + let cond_i1 = builder.ins().neq_imm(cond, Immediate::I32(0), span); + state.push1(builder.ins().select(cond_i1, arg1, arg2, span)); + } + Operator::Unreachable => { + builder.ins().unreachable(span); + state.reachable = false; + } + Operator::Nop => {} + /***************************** Control flow blocks *********************************/ + Operator::Block { blockty } => translate_block(blockty, builder, state, mod_info, span)?, + Operator::Loop { blockty } => translate_loop(blockty, builder, state, mod_info, span)?, + Operator::If { blockty } => translate_if(blockty, state, builder, mod_info, span)?, + Operator::Else => translate_else(state, builder, span)?, + Operator::End => translate_end(state, builder, span), + + /**************************** Branch instructions *********************************/ + Operator::Br { relative_depth } => translate_br(state, relative_depth, builder, span), + Operator::BrIf { relative_depth } => translate_br_if(*relative_depth, builder, state, span), + Operator::BrTable { targets } => translate_br_table(targets, state, builder, span)?, + Operator::Return => translate_return(state, builder, diagnostics, span)?, + /************************************ Calls ****************************************/ + Operator::Call { function_index } => { + translate_call(state, builder, function_index, mod_info, span, diagnostics)?; + } + /******************************* Memory management *********************************/ + Operator::MemoryGrow { .. } => { + let arg = state.pop1_casted(U32, builder, span); + state.push1(builder.ins().mem_grow(arg, span)); + } + Operator::MemorySize { .. } => { + // Return total Miden memory size + state.push1(builder.ins().i32(mem_total_pages(), span)); + } + /******************************* Load instructions ***********************************/ + Operator::I32Load8U { memarg } => { + translate_load_zext(U8, I32, memarg, state, builder, span) + } + Operator::I32Load16U { memarg } => { + translate_load_zext(U16, I32, memarg, state, builder, span) + } + Operator::I32Load8S { memarg } => { + translate_load_sext(I8, I32, memarg, state, builder, span); + } + Operator::I32Load16S { memarg } => { + translate_load_sext(I16, I32, memarg, state, builder, span); + } + Operator::I64Load8U { memarg } => { + translate_load_zext(U8, I64, memarg, state, builder, span) + } + Operator::I64Load16U { memarg } => { + translate_load_zext(U16, I64, memarg, state, builder, span) + } + Operator::I64Load8S { memarg } => { + translate_load_sext(I8, I64, memarg, state, builder, span); + } + Operator::I64Load16S { memarg } => { + translate_load_sext(I16, I64, memarg, state, builder, span); + } + Operator::I64Load32S { memarg } => { + translate_load_sext(I32, I64, memarg, state, builder, span) + } + Operator::I64Load32U { memarg } => { + translate_load_zext(U32, I64, memarg, state, builder, span) + } + Operator::I32Load { memarg } => translate_load(I32, memarg, state, builder, span), + Operator::I64Load { memarg } => translate_load(I64, memarg, state, builder, span), + /****************************** Store instructions ***********************************/ + Operator::I32Store { memarg } => translate_store(I32, memarg, state, builder, span), + Operator::I64Store { memarg } => translate_store(I64, memarg, state, builder, span), + Operator::I32Store8 { memarg } | Operator::I64Store8 { memarg } => { + translate_store(U8, memarg, state, builder, span); + } + Operator::I32Store16 { memarg } | Operator::I64Store16 { memarg } => { + translate_store(U16, memarg, state, builder, span); + } + Operator::I64Store32 { memarg } => translate_store(U32, memarg, state, builder, span), + /****************************** Nullary Operators **********************************/ + Operator::I32Const { value } => state.push1(builder.ins().i32(*value, span)), + Operator::I64Const { value } => state.push1(builder.ins().i64(*value, span)), + + /******************************* Unary Operators *************************************/ + Operator::I32Clz | Operator::I32Ctz => { + // Temporary workaround to allow further code translations + // until clz and ctz are available in Miden IR + // TODO: use the `clz` and `ctz` instructions when they are available + let val = state.pop1(); + state.push1(builder.ins().popcnt(val, span)); + } + Operator::I32Popcnt | Operator::I64Popcnt => { + let val = state.pop1(); + state.push1(builder.ins().popcnt(val, span)); + } + Operator::I64ExtendI32S => { + let val = state.pop1(); + state.push1(builder.ins().sext(val, I64, span)); + } + Operator::I64ExtendI32U => { + let val = state.pop1(); + state.push1(builder.ins().zext(val, I64, span)); + } + Operator::I32WrapI64 => { + let val = state.pop1(); + state.push1(builder.ins().trunc(val, I32, span)); + } + /****************************** Binary Operators ************************************/ + Operator::I32Add | Operator::I64Add => { + let (arg1, arg2) = state.pop2(); + // wrapping because the result is mod 2^N + // https://www.w3.org/TR/wasm-core-1/#op-iadd + state.push1(builder.ins().add_wrapping(arg1, arg2, span)); + } + Operator::I32And | Operator::I64And => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().band(arg1, arg2, span)); + } + Operator::I32Or | Operator::I64Or => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().bor(arg1, arg2, span)); + } + Operator::I32Xor | Operator::I64Xor => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().bxor(arg1, arg2, span)); + } + Operator::I32Shl | Operator::I64Shl => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().shl(arg1, arg2, span)); + } + Operator::I32ShrU => { + let (arg1, arg2) = state.pop2_casted(U32, builder, span); + let val = builder.ins().shr(arg1, arg2, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64ShrU => { + let (arg1, arg2) = state.pop2_casted(U64, builder, span); + let val = builder.ins().shr(arg1, arg2, span); + state.push1(builder.ins().cast(val, I64, span)); + } + Operator::I32ShrS | Operator::I64ShrS => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().shr(arg1, arg2, span)); + } + Operator::I32Rotl | Operator::I64Rotl => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().shl_wrapping(arg1, arg2, span)); + } + Operator::I32Rotr | Operator::I64Rotr => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().shr_wrapping(arg1, arg2, span)); + } + Operator::I32Sub | Operator::I64Sub => { + let (arg1, arg2) = state.pop2(); + // wrapping because the result is mod 2^N + // https://www.w3.org/TR/wasm-core-1/#op-isub + state.push1(builder.ins().sub_wrapping(arg1, arg2, span)); + } + Operator::F64Sub => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().sub(arg1, arg2, span)); + } + Operator::I32Mul | Operator::I64Mul => { + let (arg1, arg2) = state.pop2(); + // wrapping because the result is mod 2^N + // https://www.w3.org/TR/wasm-core-1/#op-imul + state.push1(builder.ins().mul_wrapping(arg1, arg2, span)); + } + Operator::I32DivS | Operator::I64DivS => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().div(arg1, arg2, span)); + } + Operator::I32DivU => { + let (arg1, arg2) = state.pop2_casted(U32, builder, span); + let val = builder.ins().div(arg1, arg2, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64DivU => { + let (arg1, arg2) = state.pop2_casted(U64, builder, span); + let val = builder.ins().div(arg1, arg2, span); + state.push1(builder.ins().cast(val, I64, span)); + } + Operator::I32RemU => { + let (arg1, arg2) = state.pop2_casted(U32, builder, span); + let val = builder.ins().r#mod(arg1, arg2, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64RemU => { + let (arg1, arg2) = state.pop2_casted(U64, builder, span); + let val = builder.ins().r#mod(arg1, arg2, span); + state.push1(builder.ins().cast(val, I64, span)); + } + Operator::I32RemS | Operator::I64RemS => { + let (arg1, arg2) = state.pop2(); + state.push1(builder.ins().r#mod(arg1, arg2, span)); + } + /**************************** Comparison Operators **********************************/ + Operator::I32LtU => { + let (arg0, arg1) = state.pop2_casted(U32, builder, span); + let val = builder.ins().lt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64LtU => { + let (arg0, arg1) = state.pop2_casted(U64, builder, span); + let val = builder.ins().lt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32LtS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().lt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64LtS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().lt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32LeU => { + let (arg0, arg1) = state.pop2_casted(U32, builder, span); + let val = builder.ins().lte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64LeU => { + let (arg0, arg1) = state.pop2_casted(U64, builder, span); + let val = builder.ins().lte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32LeS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().lte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64LeS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().lte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32GtU => { + let (arg0, arg1) = state.pop2_casted(U32, builder, span); + let val = builder.ins().gt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64GtU => { + let (arg0, arg1) = state.pop2_casted(U64, builder, span); + let val = builder.ins().gt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32GtS | Operator::I64GtS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().gt(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32GeU => { + let (arg0, arg1) = state.pop2_casted(U32, builder, span); + let val = builder.ins().gte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64GeU => { + let (arg0, arg1) = state.pop2_casted(U64, builder, span); + let val = builder.ins().gte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32GeS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().gte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64GeS => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().gte(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32Eqz => { + let arg = state.pop1(); + let val = builder.ins().eq_imm(arg, Immediate::I32(0), span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64Eqz => { + let arg = state.pop1(); + let val = builder.ins().eq_imm(arg, Immediate::I64(0), span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32Eq => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().eq(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64Eq => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().eq(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I32Ne => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().neq(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + Operator::I64Ne => { + let (arg0, arg1) = state.pop2(); + let val = builder.ins().neq(arg0, arg1, span); + state.push1(builder.ins().cast(val, I32, span)); + } + op => { + unsupported_diag!(diagnostics, "Wasm op {:?} is not supported", op); + } + }; + Ok(()) +} + +fn translate_br_table( + targets: &wasmparser::BrTable<'_>, + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) -> Result<(), WasmError> { + let default = targets.default(); + let mut min_depth = default; + for depth in targets.targets() { + let depth = depth?; + if depth < min_depth { + min_depth = depth; + } + } + let jump_args_count = { + let i = state.control_stack.len() - 1 - (min_depth as usize); + let min_depth_frame = &state.control_stack[i]; + if min_depth_frame.is_loop() { + min_depth_frame.num_param_values() + } else { + min_depth_frame.num_return_values() + } + }; + let val = state.pop1(); + let val = if builder.data_flow_graph().value_type(val) != &U32 { + builder.ins().cast(val, U32, span) + } else { + val + }; + let mut data = Vec::with_capacity(targets.len() as usize); + if jump_args_count == 0 { + // No jump arguments + for depth in targets.targets() { + let depth = depth?; + let block = { + let i = state.control_stack.len() - 1 - (depth as usize); + let frame = &mut state.control_stack[i]; + frame.set_branched_to_exit(); + frame.br_destination() + }; + data.push((depth, block)); + } + let def_block = { + let i = state.control_stack.len() - 1 - (default as usize); + let frame = &mut state.control_stack[i]; + frame.set_branched_to_exit(); + frame.br_destination() + }; + builder.ins().switch(val, data, def_block, span); + } else { + // Here we have jump arguments, but Midens's switch op doesn't support them + // We then proceed to split the edges going out of the br_table + let return_count = jump_args_count; + let mut dest_block_sequence = vec![]; + let mut dest_block_map = HashMap::new(); + for depth in targets.targets() { + let depth = depth?; + let branch_block = match dest_block_map.entry(depth as usize) { + hash_map::Entry::Occupied(entry) => *entry.get(), + hash_map::Entry::Vacant(entry) => { + let block = builder.create_block(); + dest_block_sequence.push((depth as usize, block)); + *entry.insert(block) + } + }; + data.push((depth, branch_block)); + } + let default_branch_block = match dest_block_map.entry(default as usize) { + hash_map::Entry::Occupied(entry) => *entry.get(), + hash_map::Entry::Vacant(entry) => { + let block = builder.create_block(); + dest_block_sequence.push((default as usize, block)); + *entry.insert(block) + } + }; + builder.ins().switch(val, data, default_branch_block, span); + for (depth, dest_block) in dest_block_sequence { + builder.switch_to_block(dest_block); + builder.seal_block(dest_block); + let real_dest_block = { + let i = state.control_stack.len() - 1 - depth; + let frame = &mut state.control_stack[i]; + frame.set_branched_to_exit(); + frame.br_destination() + }; + let destination_args = state.peekn_mut(return_count); + builder.ins().br(real_dest_block, destination_args, span); + } + state.popn(return_count); + } + state.reachable = false; + Ok(()) +} + +/// Return the total Miden VM memory size (2^32 addresses * word (4 felts) bytes) in 64KB pages +const fn mem_total_pages() -> i32 { + const FELT_BYTES: u64 = 4; // felts are effectively 32 bits + const WORD_BYTES: u64 = 4 * FELT_BYTES; // 4 felts per word + const PAGE_SIZE: u64 = 64 * 1024; + const MEMORY_SIZE: u64 = u32::MAX as u64 * WORD_BYTES; + (MEMORY_SIZE / PAGE_SIZE) as i32 +} + +fn translate_load( + ptr_ty: Type, + memarg: &MemArg, + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) { + let addr_int = state.pop1(); + let addr = prepare_addr(addr_int, &ptr_ty, memarg, builder, span); + state.push1(builder.ins().load(addr, span)); +} + +fn translate_load_sext( + ptr_ty: Type, + sext_ty: Type, + memarg: &MemArg, + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) { + let addr_int = state.pop1(); + let addr = prepare_addr(addr_int, &ptr_ty, memarg, builder, span); + let val = builder.ins().load(addr, span); + let sext_val = builder.ins().sext(val, sext_ty, span); + state.push1(sext_val); +} + +fn translate_load_zext( + ptr_ty: Type, + zext_ty: Type, + memarg: &MemArg, + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) { + assert!(ptr_ty.is_unsigned_integer()); + let addr_int = state.pop1(); + let addr = prepare_addr(addr_int, &ptr_ty, memarg, builder, span); + let val = builder.ins().load(addr, span); + let sext_val = builder.ins().zext(val, zext_ty, span); + state.push1(sext_val); +} + +fn translate_store( + ptr_ty: Type, + memarg: &MemArg, + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) { + let (addr_int, val) = state.pop2(); + let val_ty = builder.data_flow_graph().value_type(val); + let arg = if ptr_ty != *val_ty { + builder.ins().trunc(val, ptr_ty.clone(), span) + } else { + val + }; + let addr = prepare_addr(addr_int, &ptr_ty, memarg, builder, span); + builder.ins().store(addr, arg, span); +} + +fn prepare_addr( + addr_int: Value, + ptr_ty: &Type, + memarg: &MemArg, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) -> Value { + let addr_int_ty = builder.data_flow_graph().value_type(addr_int); + let addr_u32 = if *addr_int_ty == U32 { + addr_int + } else { + builder.ins().cast(addr_int, U32, span) + }; + let full_addr_int = if memarg.offset != 0 { + builder + .ins() + .add_imm(addr_u32, Immediate::U32(memarg.offset as u32), span) + } else { + addr_u32 + }; + builder + .ins() + .inttoptr(full_addr_int, Type::Ptr(ptr_ty.clone().into()), span) +} + +fn translate_call( + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + function_index: &u32, + mod_info: &ModuleInfo, + span: SourceSpan, + diagnostics: &DiagnosticsHandler, +) -> WasmResult<()> { + let (fident, num_args) = state.get_direct_func( + builder.data_flow_graph_mut(), + *function_index, + mod_info, + diagnostics, + )?; + let args = state.peekn_mut(num_args); + let call = builder.ins().call(fident, &args, span); + let inst_results = builder.inst_results(call); + state.popn(num_args); + state.pushn(inst_results); + Ok(()) +} + +fn translate_return( + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + diagnostics: &DiagnosticsHandler, + span: SourceSpan, +) -> WasmResult<()> { + let return_count = { + let frame = &mut state.control_stack[0]; + frame.num_return_values() + }; + { + let return_args = match return_count { + 0 => None, + 1 => Some(state.peekn_mut(return_count).first().unwrap().clone()), + _ => { + unsupported_diag!(diagnostics, "Multiple values are not supported"); + } + }; + + builder.ins().ret(return_args, span); + } + state.popn(return_count); + state.reachable = false; + Ok(()) +} + +fn translate_br( + state: &mut FuncTranslationState, + relative_depth: &u32, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) { + let i = state.control_stack.len() - 1 - (*relative_depth as usize); + let (return_count, br_destination) = { + let frame = &mut state.control_stack[i]; + // We signal that all the code that follows until the next End is unreachable + frame.set_branched_to_exit(); + let return_count = if frame.is_loop() { + frame.num_param_values() + } else { + frame.num_return_values() + }; + (return_count, frame.br_destination()) + }; + let destination_args = state.peekn_mut(return_count); + builder.ins().br(br_destination, &destination_args, span); + state.popn(return_count); + state.reachable = false; +} + +fn translate_br_if( + relative_depth: u32, + builder: &mut FunctionBuilderExt, + state: &mut FuncTranslationState, + span: SourceSpan, +) { + let cond = state.pop1(); + let (br_destination, inputs) = translate_br_if_args(relative_depth, state); + let next_block = builder.create_block(); + let then_dest = br_destination; + let then_args = inputs; + let else_dest = next_block; + let else_args = &[]; + let cond_i1 = builder.ins().neq_imm(cond, Immediate::I32(0), span); + builder + .ins() + .cond_br(cond_i1, then_dest, then_args, else_dest, else_args, span); + builder.seal_block(next_block); // The only predecessor is the current block. + builder.switch_to_block(next_block); +} + +fn translate_br_if_args( + relative_depth: u32, + state: &mut FuncTranslationState, +) -> (Block, &mut [Value]) { + let i = state.control_stack.len() - 1 - (relative_depth as usize); + let (return_count, br_destination) = { + let frame = &mut state.control_stack[i]; + // The values returned by the branch are still available for the reachable + // code that comes after it + frame.set_branched_to_exit(); + let return_count = if frame.is_loop() { + frame.num_param_values() + } else { + frame.num_return_values() + }; + (return_count, frame.br_destination()) + }; + let inputs = state.peekn_mut(return_count); + (br_destination, inputs) +} + +fn translate_block( + blockty: &wasmparser::BlockType, + builder: &mut FunctionBuilderExt, + state: &mut FuncTranslationState, + module_info: &ModuleInfo, + span: SourceSpan, +) -> WasmResult<()> { + let blockty = BlockType::from_wasm(blockty, module_info)?; + let next = builder.create_block_with_params(blockty.results.clone(), span); + state.push_block(next, blockty.params.len(), blockty.results.len()); + Ok(()) +} + +fn translate_end( + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) { + // The `End` instruction pops the last control frame from the control stack, seals + // the destination block (since `br` instructions targeting it only appear inside the + // block and have already been translated) and modify the value stack to use the + // possible `Block`'s arguments values. + let frame = state.control_stack.pop().unwrap(); + let next_block = frame.following_code(); + let return_count = frame.num_return_values(); + let return_args = state.peekn_mut(return_count); + + builder.ins().br(next_block, return_args, span); + + // You might expect that if we just finished an `if` block that + // didn't have a corresponding `else` block, then we would clean + // up our duplicate set of parameters that we pushed earlier + // right here. However, we don't have to explicitly do that, + // since we truncate the stack back to the original height + // below. + + builder.switch_to_block(next_block); + builder.seal_block(next_block); + + // If it is a loop we also have to seal the body loop block + if let ControlStackFrame::Loop { header, .. } = frame { + builder.seal_block(header) + } + + frame.truncate_value_stack_to_original_size(&mut state.stack); + state + .stack + .extend_from_slice(builder.block_params(next_block)); +} + +fn translate_else( + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + span: SourceSpan, +) -> WasmResult<()> { + let i = state.control_stack.len() - 1; + Ok(match state.control_stack[i] { + ControlStackFrame::If { + ref else_data, + head_is_reachable, + ref mut consequent_ends_reachable, + num_return_values, + ref blocktype, + destination, + .. + } => { + // We finished the consequent, so record its final + // reachability state. + debug_assert!(consequent_ends_reachable.is_none()); + *consequent_ends_reachable = Some(state.reachable); + + if head_is_reachable { + // We have a branch from the head of the `if` to the `else`. + state.reachable = true; + + // Ensure we have a block for the `else` block (it may have + // already been pre-allocated, see `ElseData` for details). + let else_block = match *else_data { + ElseData::NoElse { + branch_inst, + placeholder, + } => { + debug_assert_eq!(blocktype.params.len(), num_return_values); + let else_block = + builder.create_block_with_params(blocktype.params.clone(), span); + let params_len = blocktype.params.len(); + builder.ins().br(destination, state.peekn(params_len), span); + state.popn(params_len); + + builder.change_jump_destination(branch_inst, placeholder, else_block); + builder.seal_block(else_block); + else_block + } + ElseData::WithElse { else_block } => { + builder + .ins() + .br(destination, state.peekn(num_return_values), span); + state.popn(num_return_values); + else_block + } + }; + + // You might be expecting that we push the parameters for this + // `else` block here, something like this: + // + // state.pushn(&control_stack_frame.params); + // + // We don't do that because they are already on the top of the stack + // for us: we pushed the parameters twice when we saw the initial + // `if` so that we wouldn't have to save the parameters in the + // `ControlStackFrame` as another `Vec` allocation. + + builder.switch_to_block(else_block); + + // We don't bother updating the control frame's `ElseData` + // to `WithElse` because nothing else will read it. + } + } + _ => unreachable!(), + }) +} + +fn translate_if( + blockty: &wasmparser::BlockType, + state: &mut FuncTranslationState, + builder: &mut FunctionBuilderExt, + module_info: &ModuleInfo, + span: SourceSpan, +) -> WasmResult<()> { + let blockty = BlockType::from_wasm(blockty, module_info)?; + let cond = state.pop1(); + let cond_i1 = builder.ins().neq_imm(cond, Immediate::I32(0), span); + let next_block = builder.create_block(); + let (destination, else_data) = if blockty.params.eq(&blockty.results) { + // It is possible there is no `else` block, so we will only + // allocate a block for it if/when we find the `else`. For now, + // we if the condition isn't true, then we jump directly to the + // destination block following the whole `if...end`. If we do end + // up discovering an `else`, then we will allocate a block for it + // and go back and patch the jump. + let destination = builder.create_block_with_params(blockty.results.clone(), span); + let branch_inst = builder.ins().cond_br( + cond_i1, + next_block, + &[], + destination, + state.peekn(blockty.params.len()), + span, + ); + ( + destination, + ElseData::NoElse { + branch_inst, + placeholder: destination, + }, + ) + } else { + // The `if` type signature is not valid without an `else` block, + // so we eagerly allocate the `else` block here. + let destination = builder.create_block_with_params(blockty.results.clone(), span); + let else_block = builder.create_block_with_params(blockty.params.clone(), span); + builder.ins().cond_br( + cond_i1, + next_block, + &[], + else_block, + state.peekn(blockty.params.len()), + span, + ); + builder.seal_block(else_block); + (destination, ElseData::WithElse { else_block }) + }; + builder.seal_block(next_block); + builder.switch_to_block(next_block); + state.push_if( + destination, + else_data, + blockty.params.len(), + blockty.results.len(), + blockty, + ); + Ok(()) +} + +fn translate_loop( + blockty: &wasmparser::BlockType, + builder: &mut FunctionBuilderExt, + state: &mut FuncTranslationState, + module_info: &ModuleInfo, + span: SourceSpan, +) -> WasmResult<()> { + let blockty = BlockType::from_wasm(blockty, module_info)?; + let loop_body = builder.create_block_with_params(blockty.params.clone(), span); + let next = builder.create_block_with_params(blockty.results.clone(), span); + builder + .ins() + .br(loop_body, state.peekn(blockty.params.len()), span); + state.push_loop(loop_body, next, blockty.params.len(), blockty.results.len()); + state.popn(blockty.params.len()); + state + .stack + .extend_from_slice(builder.block_params(loop_body)); + builder.switch_to_block(loop_body); + Ok(()) +} + +/// Deals with a Wasm instruction located in an unreachable portion of the code. Most of them +/// are dropped but special ones like `End` or `Else` signal the potential end of the unreachable +/// portion so the translation state must be updated accordingly. +fn translate_unreachable_operator( + op: &Operator, + builder: &mut FunctionBuilderExt, + state: &mut FuncTranslationState, + module_info: &ModuleInfo, + span: SourceSpan, +) -> WasmResult<()> { + debug_assert!(!state.reachable); + match *op { + Operator::If { blockty } => { + // Push a placeholder control stack entry. The if isn't reachable, + // so we don't have any branches anywhere. + let blockty = BlockType::from_wasm(&blockty, module_info)?; + state.push_if( + Block::reserved_value(), + ElseData::NoElse { + branch_inst: Inst::reserved_value(), + placeholder: Block::reserved_value(), + }, + 0, + 0, + blockty, + ); + } + Operator::Loop { blockty: _ } | Operator::Block { blockty: _ } => { + state.push_block(Block::reserved_value(), 0, 0); + } + Operator::Else => { + let i = state.control_stack.len() - 1; + match state.control_stack[i] { + ControlStackFrame::If { + ref else_data, + head_is_reachable, + ref mut consequent_ends_reachable, + ref blocktype, + .. + } => { + debug_assert!(consequent_ends_reachable.is_none()); + *consequent_ends_reachable = Some(state.reachable); + + if head_is_reachable { + // We have a branch from the head of the `if` to the `else`. + state.reachable = true; + + let else_block = match *else_data { + ElseData::NoElse { + branch_inst, + placeholder, + } => { + let else_block = builder + .create_block_with_params(blocktype.params.clone(), span); + let frame = state.control_stack.last().unwrap(); + frame.truncate_value_stack_to_else_params(&mut state.stack); + + // We change the target of the branch instruction. + builder.change_jump_destination( + branch_inst, + placeholder, + else_block, + ); + builder.seal_block(else_block); + else_block + } + ElseData::WithElse { else_block } => { + let frame = state.control_stack.last().unwrap(); + frame.truncate_value_stack_to_else_params(&mut state.stack); + else_block + } + }; + + builder.switch_to_block(else_block); + + // Again, no need to push the parameters for the `else`, + // since we already did when we saw the original `if`. See + // the comment for translating `Operator::Else` in + // `translate_operator` for details. + } + } + _ => unreachable!(), + } + } + Operator::End => { + let stack = &mut state.stack; + let control_stack = &mut state.control_stack; + let frame = control_stack.pop().unwrap(); + + // Pop unused parameters from stack. + frame.truncate_value_stack_to_original_size(stack); + + let reachable_anyway = match frame { + // If it is a loop we also have to seal the body loop block + ControlStackFrame::Loop { header, .. } => { + builder.seal_block(header); + // And loops can't have branches to the end. + false + } + // If we never set `consequent_ends_reachable` then that means + // we are finishing the consequent now, and there was no + // `else`. Whether the following block is reachable depends only + // on if the head was reachable. + ControlStackFrame::If { + head_is_reachable, + consequent_ends_reachable: None, + .. + } => head_is_reachable, + // Since we are only in this function when in unreachable code, + // we know that the alternative just ended unreachable. Whether + // the following block is reachable depends on if the consequent + // ended reachable or not. + ControlStackFrame::If { + head_is_reachable, + consequent_ends_reachable: Some(consequent_ends_reachable), + .. + } => head_is_reachable && consequent_ends_reachable, + // All other control constructs are already handled. + _ => false, + }; + + if frame.exit_is_branched_to() || reachable_anyway { + builder.switch_to_block(frame.following_code()); + builder.seal_block(frame.following_code()); + + // And add the return values of the block but only if the next block is reachable + // (which corresponds to testing if the stack depth is 1) + stack.extend_from_slice(builder.block_params(frame.following_code())); + state.reachable = true; + } + } + _ => { + // We don't translate because this is unreachable code + } + } + + Ok(()) +} diff --git a/frontend-wasm/src/code_translator/tests.rs b/frontend-wasm/src/code_translator/tests.rs new file mode 100644 index 00000000..08be2e35 --- /dev/null +++ b/frontend-wasm/src/code_translator/tests.rs @@ -0,0 +1,1735 @@ +use expect_test::expect; +use miden_hir::write_instruction; +use miden_hir::Ident; + +use crate::test_utils::test_diagnostics; +use crate::translate_module; +use crate::WasmTranslationConfig; + +/// Compiles the given Wasm code to Miden IR and checks the IR generated. +fn check_ir(wat: &str, expected_ir: expect_test::Expect) { + let wasm = wat::parse_str(wat).unwrap(); + let diagnostics = test_diagnostics(); + let module = translate_module(&wasm, &WasmTranslationConfig::default(), &diagnostics).unwrap(); + expected_ir.assert_eq(&module.to_string()); +} + +/// Check IR generated for a Wasm op(s). +/// Wrap Wasm ops in a function and check the IR generated for the entry block of that function. +fn check_op(wat_op: &str, expected_ir: expect_test::Expect) { + let wat = format!( + r#" + (module + (memory (;0;) 16384) + (func $test_wrapper + {wat_op} + ) + )"#, + ); + let wasm = wat::parse_str(wat).unwrap(); + let diagnostics = test_diagnostics(); + let module = translate_module(&wasm, &WasmTranslationConfig::default(), &diagnostics).unwrap(); + let func = module.function(Ident::from("test_wrapper")).unwrap(); + // let fref = module.get_funcref_by_name("test_wrapper").unwrap(); + // let func = module.get_function(fref).unwrap(); + let entry_block = func.dfg.entry_block(); + // let entry_block_data = func.dfg.block_data(entry_block); + let entry_block_data = func.dfg.block(entry_block); + let mut w = String::new(); + // print instructions up to the branch to the exit block + for inst in entry_block_data + .insts() + .take_while(|inst| !func.dfg[*inst].opcode().is_branch()) + { + write_instruction(&mut w, func, inst, 0).unwrap(); + } + expected_ir.assert_eq(&w); +} + +#[test] +fn module() { + check_ir( + r#" + (module + (func $main + i32.const 0 + drop + ) + ) + "#, + expect![[r#" + module noname + + pub fn main() { + block0: + v0 = const.i32 0 : i32 + br block1 + + block1: + ret + } + "#]], + ); +} + +#[test] +fn locals() { + check_ir( + r#" + (module + (func $main (local i32) + i32.const 1 + local.set 0 + local.get 0 + drop + ) + ) + "#, + expect![[r#" + module noname + + pub fn main() { + block0: + v0 = const.i32 0 : i32 + v1 = const.i32 1 : i32 + br block1 + + block1: + ret + } + "#]], + ); +} + +#[test] +fn locals_inter_block() { + check_ir( + r#" + (module + (func $main (result i32) (local i32) + block + i32.const 3 + local.set 0 + end + block + local.get 0 + i32.const 5 + i32.add + local.set 0 + end + i32.const 7 + local.get 0 + i32.add + ) + ) + "#, + expect![[r#" + module noname + + pub fn main() -> i32 { + block0: + v1 = const.i32 0 : i32 + v2 = const.i32 3 : i32 + br block2 + + block1(v0: i32): + ret v0 + + block2: + v3 = const.i32 5 : i32 + v4 = add v2, v3 : i32 + br block3 + + block3: + v5 = const.i32 7 : i32 + v6 = add v5, v4 : i32 + br block1(v6) + } + "#]], + ); +} + +#[test] +fn func_call() { + check_ir( + r#" + (module + (func $add (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.add + ) + (func $main (result i32) + i32.const 3 + i32.const 5 + call $add + ) + ) + "#, + expect![[r#" + module noname + + pub fn add(i32, i32) -> i32 { + block0(v0: i32, v1: i32): + v3 = add v0, v1 : i32 + br block1(v3) + + block1(v2: i32): + ret v2 + } + + pub fn main() -> i32 { + block0: + v1 = const.i32 3 : i32 + v2 = const.i32 5 : i32 + v3 = call noname::add(v1, v2) : i32 + br block1(v3) + + block1(v0: i32): + ret v0 + } + "#]], + ); +} + +#[test] +fn br() { + check_ir( + r#" + (module + (func $main (result i32) (local i32) + block + i32.const 3 + local.set 0 + br 0 + end + local.get 0 + ) + ) + "#, + expect![[r#" + module noname + + pub fn main() -> i32 { + block0: + v1 = const.i32 0 : i32 + v2 = const.i32 3 : i32 + br block2 + + block1(v0: i32): + ret v0 + + block2: + br block1(v2) + } + "#]], + ); +} + +#[test] +fn loop_br_if() { + // sum the decreasing numbers from 2 to 0, i.e. 2 + 1 + 0, then exit the loop + check_ir( + r#" + (module + (func $main (result i32) (local i32 i32) + i32.const 2 + local.set 0 + loop + local.get 0 + local.get 1 + i32.add + local.set 1 + local.get 0 + i32.const 1 + i32.sub + local.tee 0 + br_if 0 + end + local.get 1 + ) + ) + "#, + expect![[r#" + module noname + + pub fn main() -> i32 { + block0: + v1 = const.i32 0 : i32 + v2 = const.i32 2 : i32 + br block2(v2, v1) + + block1(v0: i32): + ret v0 + + block2(v3: i32, v4: i32): + v5 = add v3, v4 : i32 + v6 = const.i32 1 : i32 + v7 = sub v3, v6 : i32 + v8 = neq v7, 0 : i1 + condbr v8, block2(v7, v5), block4 + + block3: + br block1(v5) + + block4: + br block3 + } + "#]], + ); +} + +#[test] +fn if_then_else() { + check_ir( + r#" + (module + (func $main (result i32) + i32.const 2 + if (result i32) + i32.const 3 + else + i32.const 5 + end + ) + ) + "#, + expect![[r#" + module noname + + pub fn main() -> i32 { + block0: + v1 = const.i32 2 : i32 + v2 = neq v1, 0 : i1 + condbr v2, block2, block4 + + block1(v0: i32): + ret v0 + + block2: + v4 = const.i32 3 : i32 + br block3(v4) + + block3(v3: i32): + br block1(v3) + + block4: + v5 = const.i32 5 : i32 + br block3(v5) + } + "#]], + ); +} + +#[test] +fn global_var() { + check_ir( + r#" + (module + (global $MyGlobalVal (mut i32) i32.const 42) + (func $main + global.get $MyGlobalVal + i32.const 9 + i32.add + global.set $MyGlobalVal + ) + ) + "#, + expect![[r#" + module noname + global external MyGlobalVal : i32 = 0x0000002a { id = gvar0 }; + + + pub fn main() { + block0: + v0 = global.load (@MyGlobalVal) as *mut i8 : i32 + v1 = const.i32 9 : i32 + v2 = add v0, v1 : i32 + v3 = global.symbol @MyGlobalVal : *mut i32 + store v3, v2 + br block1 + + block1: + ret + } + "#]], + ); +} + +#[test] +fn memory_grow() { + check_op( + r#" + i32.const 1 + memory.grow + drop + "#, + expect![[r#" + v0 = const.i32 1 : i32 + v1 = cast v0 : u32 + v2 = memory.grow v1 : i32 + "#]], + ) +} + +#[test] +fn memory_size() { + check_op( + r#" + memory.size + drop + "#, + expect![[r#" + v0 = const.i32 1048575 : i32 + "#]], + ) +} + +#[test] +fn i32_load8_u() { + check_op( + r#" + i32.const 1024 + i32.load8_u + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut u8 + v3 = load v2 : u8 + v4 = zext v3 : i32 + "#]], + ) +} + +#[test] +fn i32_load16_u() { + check_op( + r#" + i32.const 1024 + i32.load16_u + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut u16 + v3 = load v2 : u16 + v4 = zext v3 : i32 + "#]], + ) +} + +#[test] +fn i32_load8_s() { + check_op( + r#" + i32.const 1024 + i32.load8_s + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i8 + v3 = load v2 : i8 + v4 = sext v3 : i32 + "#]], + ) +} + +#[test] +fn i32_load16_s() { + check_op( + r#" + i32.const 1024 + i32.load16_s + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i16 + v3 = load v2 : i16 + v4 = sext v3 : i32 + "#]], + ) +} + +#[test] +fn i64_load8_u() { + check_op( + r#" + i32.const 1024 + i64.load8_u + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut u8 + v3 = load v2 : u8 + v4 = zext v3 : i64 + "#]], + ) +} + +#[test] +fn i64_load16_u() { + check_op( + r#" + i32.const 1024 + i64.load16_u + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut u16 + v3 = load v2 : u16 + v4 = zext v3 : i64 + "#]], + ) +} + +#[test] +fn i64_load8_s() { + check_op( + r#" + i32.const 1024 + i64.load8_s + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i8 + v3 = load v2 : i8 + v4 = sext v3 : i64 + "#]], + ) +} + +#[test] +fn i64_load16_s() { + check_op( + r#" + i32.const 1024 + i64.load16_s + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i16 + v3 = load v2 : i16 + v4 = sext v3 : i64 + "#]], + ) +} + +#[test] +fn i64_load32_s() { + check_op( + r#" + i32.const 1024 + i64.load32_s + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i32 + v3 = load v2 : i32 + v4 = sext v3 : i64 + "#]], + ) +} + +#[test] +fn i64_load32_u() { + check_op( + r#" + i32.const 1024 + i64.load32_u + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut u32 + v3 = load v2 : u32 + v4 = zext v3 : i64 + "#]], + ) +} + +#[test] +fn i32_load() { + check_op( + r#" + i32.const 1024 + i32.load + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i32 + v3 = load v2 : i32 + "#]], + ) +} + +#[test] +fn i64_load() { + check_op( + r#" + i32.const 1024 + i64.load + drop + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = cast v0 : u32 + v2 = inttoptr v1 : *mut i64 + v3 = load v2 : i64 + "#]], + ) +} + +#[test] +fn i32_store() { + check_op( + r#" + i32.const 1024 + i32.const 1 + i32.store + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = inttoptr v2 : *mut i32 + store v3, v1 + "#]], + ) +} + +#[test] +fn i64_store() { + check_op( + r#" + i32.const 1024 + i64.const 1 + i64.store + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = const.i64 1 : i64 + v2 = cast v0 : u32 + v3 = inttoptr v2 : *mut i64 + store v3, v1 + "#]], + ) +} + +#[test] +fn i32_store8() { + check_op( + r#" + i32.const 1024 + i32.const 1 + i32.store8 + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = const.i32 1 : i32 + v2 = trunc v1 : u8 + v3 = cast v0 : u32 + v4 = inttoptr v3 : *mut u8 + store v4, v2 + "#]], + ) +} + +#[test] +fn i32_store16() { + check_op( + r#" + i32.const 1024 + i32.const 1 + i32.store16 + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = const.i32 1 : i32 + v2 = trunc v1 : u16 + v3 = cast v0 : u32 + v4 = inttoptr v3 : *mut u16 + store v4, v2 + "#]], + ) +} + +#[test] +fn i64_store32() { + check_op( + r#" + i32.const 1024 + i64.const 1 + i64.store32 + "#, + expect![[r#" + v0 = const.i32 1024 : i32 + v1 = const.i64 1 : i64 + v2 = trunc v1 : u32 + v3 = cast v0 : u32 + v4 = inttoptr v3 : *mut u32 + store v4, v2 + "#]], + ) +} + +#[test] +fn i32_const() { + check_op( + r#" + i32.const 1 + drop + "#, + expect![[r#" + v0 = const.i32 1 : i32 + "#]], + ) +} + +#[test] +fn i64_const() { + check_op( + r#" + i64.const 1 + drop + "#, + expect![[r#" + v0 = const.i64 1 : i64 + "#]], + ) +} + +#[test] +fn i32_popcnt() { + check_op( + r#" + i32.const 1 + i32.popcnt + drop + "#, + expect![[r#" + v0 = const.i32 1 : i32 + v1 = popcnt v0 : i32 + "#]], + ) +} + +#[test] +fn i64_extend_i32_s() { + check_op( + r#" + i32.const 1 + i64.extend_i32_s + drop + "#, + expect![[r#" + v0 = const.i32 1 : i32 + v1 = sext v0 : i64 + "#]], + ) +} + +#[test] +fn i64_extend_i32_u() { + check_op( + r#" + i32.const 1 + i64.extend_i32_u + drop + "#, + expect![[r#" + v0 = const.i32 1 : i32 + v1 = zext v0 : i64 + "#]], + ) +} + +#[test] +fn i32_wrap_i64() { + check_op( + r#" + i64.const 1 + i32.wrap_i64 + drop + "#, + expect![[r#" + v0 = const.i64 1 : i64 + v1 = trunc v0 : i32 + "#]], + ) +} + +#[test] +fn i32_add() { + check_op( + r#" + i32.const 3 + i32.const 1 + i32.add + drop + "#, + expect![[r#" + v0 = const.i32 3 : i32 + v1 = const.i32 1 : i32 + v2 = add v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_add() { + check_op( + r#" + i64.const 3 + i64.const 1 + i64.add + drop + "#, + expect![[r#" + v0 = const.i64 3 : i64 + v1 = const.i64 1 : i64 + v2 = add v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_and() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.and + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = band v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_and() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.and + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = band v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_or() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.or + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = bor v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_or() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.or + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = bor v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_sub() { + check_op( + r#" + i32.const 3 + i32.const 1 + i32.sub + drop + "#, + expect![[r#" + v0 = const.i32 3 : i32 + v1 = const.i32 1 : i32 + v2 = sub v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_sub() { + check_op( + r#" + i64.const 3 + i64.const 1 + i64.sub + drop + "#, + expect![[r#" + v0 = const.i64 3 : i64 + v1 = const.i64 1 : i64 + v2 = sub v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_xor() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.xor + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = bxor v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_xor() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.xor + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = bxor v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_shl() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.shl + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = shl v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_shl() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.shl + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = shl v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_shr_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.shr_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = shr v2, v3 : u32 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_shr_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.shr_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = shr v2, v3 : u64 + v5 = cast v4 : i64 + "#]], + ) +} + +#[test] +fn i32_shr_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.shr_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = shr v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_shr_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.shr_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = shr v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_rotl() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.rotl + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = shl v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_rotl() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.rotl + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = shl v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_rotr() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.rotr + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = shr v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_rotr() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.rotr + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = shr v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_mul() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.mul + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = mul v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_mul() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.mul + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = mul v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_div_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.div_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = div v2, v3 : u32 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_div_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.div_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = div v2, v3 : u64 + v5 = cast v4 : i64 + "#]], + ) +} + +#[test] +fn i32_div_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.div_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = div v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_div_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.div_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = div v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_rem_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.rem_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = mod v2, v3 : u32 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_rem_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.rem_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = mod v2, v3 : u64 + v5 = cast v4 : i64 + "#]], + ) +} + +#[test] +fn i32_rem_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.rem_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = mod v0, v1 : i32 + "#]], + ) +} + +#[test] +fn i64_rem_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.rem_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = mod v0, v1 : i64 + "#]], + ) +} + +#[test] +fn i32_lt_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.lt_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = lt v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_lt_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.lt_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = lt v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i32_lt_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.lt_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = lt v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i64_lt_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.lt_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = lt v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i32_le_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.le_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = lte v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_le_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.le_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = lte v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i32_le_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.le_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = lte v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i64_le_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.le_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = lte v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i32_gt_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.gt_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = gt v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_gt_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.gt_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = gt v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i32_gt_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.gt_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = gt v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i64_gt_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.gt_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = gt v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i32_ge_u() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.ge_u + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = cast v0 : u32 + v3 = cast v1 : u32 + v4 = gte v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i64_ge_u() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.ge_u + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = cast v0 : u64 + v3 = cast v1 : u64 + v4 = gte v2, v3 : i1 + v5 = cast v4 : i32 + "#]], + ) +} + +#[test] +fn i32_ge_s() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.ge_s + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = gte v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i64_ge_s() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.ge_s + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = gte v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i32_eqz() { + check_op( + r#" + i32.const 2 + i32.eqz + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = eq v0, 0 : i1 + v2 = cast v1 : i32 + "#]], + ) +} + +#[test] +fn i64_eqz() { + check_op( + r#" + i64.const 2 + i64.eqz + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = eq v0, 0 : i1 + v2 = cast v1 : i32 + "#]], + ) +} + +#[test] +fn i32_eq() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.eq + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = eq v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i64_eq() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.eq + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = eq v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i32_ne() { + check_op( + r#" + i32.const 2 + i32.const 1 + i32.ne + drop + "#, + expect![[r#" + v0 = const.i32 2 : i32 + v1 = const.i32 1 : i32 + v2 = neq v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn i64_ne() { + check_op( + r#" + i64.const 2 + i64.const 1 + i64.ne + drop + "#, + expect![[r#" + v0 = const.i64 2 : i64 + v1 = const.i64 1 : i64 + v2 = neq v0, v1 : i1 + v3 = cast v2 : i32 + "#]], + ) +} + +#[test] +fn select_i32() { + check_op( + r#" + i64.const 3 + i64.const 7 + i32.const 42 + select + drop + "#, + expect![[r#" + v0 = const.i64 3 : i64 + v1 = const.i64 7 : i64 + v2 = const.i32 42 : i32 + v3 = neq v2, 0 : i1 + v4 = select v3, v0, v1 : i64 + "#]], + ) +} diff --git a/frontend-wasm/src/code_translator/tests_unsupported.rs b/frontend-wasm/src/code_translator/tests_unsupported.rs new file mode 100644 index 00000000..cb0047ea --- /dev/null +++ b/frontend-wasm/src/code_translator/tests_unsupported.rs @@ -0,0 +1,190 @@ +use miden_diagnostics::SourceSpan; +use miden_hir::CallConv; +use miden_hir::Linkage; +use miden_hir::ModuleBuilder; +use miden_hir::Signature; + +use wasmparser::MemArg; +use wasmparser::Operator; +use wasmparser::Operator::*; + +use crate::func_translation_state::FuncTranslationState; +use crate::function_builder_ext::FunctionBuilderContext; +use crate::function_builder_ext::FunctionBuilderExt; +use crate::module_env::ModuleInfo; +use crate::test_utils::test_diagnostics; + +use super::translate_operator; + +fn check_unsupported(op: &Operator) { + let diagnostics = test_diagnostics(); + let mod_builder = ModuleBuilder::new("noname"); + let module_info = ModuleInfo::new(mod_builder.name()); + let mut module_builder = ModuleBuilder::new(module_info.name.as_str()); + let sig = Signature { + params: vec![], + results: vec![], + cc: CallConv::SystemV, + linkage: Linkage::External, + }; + let mut module_func_builder = module_builder.function("func_name", sig.clone()).unwrap(); + let mut fb_ctx = FunctionBuilderContext::new(); + let mut state = FuncTranslationState::new(); + let mut builder_ext = FunctionBuilderExt::new(&mut module_func_builder, &mut fb_ctx); + let result = translate_operator( + op, + &mut builder_ext, + &mut state, + &module_info, + &diagnostics, + SourceSpan::default(), + ); + assert!( + result.is_err(), + "Expected unsupported op error for {:?}", + op + ); + assert_eq!( + result.unwrap_err().to_string(), + format!("Unsupported Wasm: Wasm op {:?} is not supported", op) + ); + assert!( + diagnostics.has_errors(), + "Expected diagnostics to have errors" + ); +} + +// Wasm Spec v1.0 +const UNSUPPORTED_WASM_V1_OPS: &[Operator] = &[ + CallIndirect { + type_index: 0, + table_index: 0, + table_byte: 0, + }, + /****************************** Memory Operators ************************************/ + F32Load { + memarg: MemArg { + align: 0, + max_align: 0, + offset: 0, + memory: 0, + }, + }, + F64Load { + memarg: MemArg { + align: 0, + max_align: 0, + offset: 0, + memory: 0, + }, + }, + F32Store { + memarg: MemArg { + align: 0, + max_align: 0, + offset: 0, + memory: 0, + }, + }, + F64Store { + memarg: MemArg { + align: 0, + max_align: 0, + offset: 0, + memory: 0, + }, + }, + /****************************** Nullary Operators ************************************/ + + // Cannot construct since Ieee32 fields are private + // F32Const { + // value: Ieee32(0), + // }, + // F64Const { + // value: Ieee32(0), + // }, + + /****************************** Unary Operators ************************************/ + // I32Ctz, + // I32Clz, + I64Ctz, + I64Clz, + F32Sqrt, + F64Sqrt, + F32Ceil, + F64Ceil, + F32Floor, + F64Floor, + F32Trunc, + F64Trunc, + F32Nearest, + F64Nearest, + F32Abs, + F64Abs, + F32Neg, + F64Neg, + F64ConvertI64U, + F64ConvertI32U, + F64ConvertI64S, + F64ConvertI32S, + F32ConvertI64S, + F32ConvertI32S, + F32ConvertI64U, + F32ConvertI32U, + F64PromoteF32, + F32DemoteF64, + I64TruncF64S, + I64TruncF32S, + I32TruncF64S, + I32TruncF32S, + I64TruncF64U, + I64TruncF32U, + I32TruncF64U, + I32TruncF32U, + I64TruncSatF64S, + I64TruncSatF32S, + I32TruncSatF64S, + I32TruncSatF32S, + I64TruncSatF64U, + I64TruncSatF32U, + I32TruncSatF64U, + I32TruncSatF32U, + F32ReinterpretI32, + F64ReinterpretI64, + I32ReinterpretF32, + I64ReinterpretF64, + /****************************** Binary Operators ************************************/ + F32Add, + F32Sub, + F32Mul, + F32Div, + F32Min, + F32Max, + F32Copysign, + F64Copysign, + F64Add, + F64Mul, + F64Div, + F64Min, + F64Max, + /**************************** Comparison Operators **********************************/ + F32Eq, + F32Ne, + F32Gt, + F32Ge, + F32Le, + F32Lt, + F64Eq, + F64Ne, + F64Gt, + F64Ge, + F64Le, + F64Lt, +]; + +#[test] +fn error_for_unsupported_wasm_v1_ops() { + for op in UNSUPPORTED_WASM_V1_OPS.iter() { + check_unsupported(op); + } +} diff --git a/frontend-wasm/src/config.rs b/frontend-wasm/src/config.rs new file mode 100644 index 00000000..b28a657a --- /dev/null +++ b/frontend-wasm/src/config.rs @@ -0,0 +1,3 @@ +/// Configuration for the WASM translation. +#[derive(Debug, Default)] +pub struct WasmTranslationConfig {} diff --git a/frontend-wasm/src/error.rs b/frontend-wasm/src/error.rs new file mode 100644 index 00000000..c6c520d8 --- /dev/null +++ b/frontend-wasm/src/error.rs @@ -0,0 +1,70 @@ +use miden_diagnostics::Diagnostic; +use miden_diagnostics::ToDiagnostic; +use miden_hir::SymbolConflictError; +use thiserror::Error; + +/// A WebAssembly translation error. +/// +/// When a WebAssembly function can't be translated, one of these error codes will be returned +/// to describe the failure. +#[derive(Error, Debug)] +pub enum WasmError { + /// The input WebAssembly code is invalid. + /// + /// This error code is used by a WebAssembly translator when it encounters invalid WebAssembly + /// code. This should never happen for validated WebAssembly code. + #[error("Invalid input WebAssembly code at offset {offset}: {message}")] + InvalidWebAssembly { + /// A string describing the validation error. + message: String, + /// The bytecode offset where the error occurred. + offset: usize, + }, + + /// A feature used by the WebAssembly code is not supported by the Miden IR. + #[error("Unsupported Wasm: {0}")] + Unsupported(String), + + #[error("Too many declared functions in the module")] + FuncNumLimitExceeded, + + #[error("{0}")] + SymbolConflictError(#[from] SymbolConflictError), + #[error("Failed to build function. See diagnostics for details")] + InvalidFunctionError, + + #[error("Unexpected: {0}")] + Unexpected(String), +} + +impl From for WasmError { + fn from(e: wasmparser::BinaryReaderError) -> Self { + Self::InvalidWebAssembly { + message: e.message().into(), + offset: e.offset(), + } + } +} + +impl ToDiagnostic for WasmError { + fn to_diagnostic(self) -> Diagnostic { + Diagnostic::error().with_message(self.to_string()) + } +} + +/// A convenient alias for a `Result` that uses `WasmError` as the error type. +pub type WasmResult = Result; + +/// Emit diagnostics and return an `Err(WasmError::Unsupported(msg))` where `msg` the string built by calling `format!` +/// on the arguments to this macro. +#[macro_export] +macro_rules! unsupported_diag { + ($diagnostics:expr, $($arg:tt)*) => { + let message = format!($($arg)*); + $diagnostics + .diagnostic(miden_diagnostics::Severity::Error) + .with_message(message.clone()) + .emit(); + return Err($crate::error::WasmError::Unsupported(message)); + } +} diff --git a/frontend-wasm/src/func_translation_state.rs b/frontend-wasm/src/func_translation_state.rs new file mode 100644 index 00000000..725f0d76 --- /dev/null +++ b/frontend-wasm/src/func_translation_state.rs @@ -0,0 +1,485 @@ +//! WebAssembly module and function translation state. +//! +//! The `FuncTranslationState` struct defined in this module is used to keep track of the WebAssembly +//! value and control stacks during the translation of a single function. +//! +//! Based on Cranelift's Wasm -> CLIF translator v11.0.0 + +use crate::{ + error::{WasmError, WasmResult}, + function_builder_ext::FunctionBuilderExt, + module_env::ModuleInfo, + translation_utils::sig_from_funct_type, + wasm_types::{BlockType, FuncIndex}, +}; +use miden_diagnostics::{DiagnosticsHandler, SourceSpan}; +use miden_hir::{ + cranelift_entity::EntityRef, Block, CallConv, DataFlowGraph, FunctionIdent, Ident, Inst, + InstBuilder, Linkage, Signature, Symbol, Value, +}; +use miden_hir_type::Type; +use std::{ + collections::{hash_map::Entry::Occupied, hash_map::Entry::Vacant, HashMap}, + vec::Vec, +}; + +/// Information about the presence of an associated `else` for an `if`, or the +/// lack thereof. +#[derive(Debug)] +pub enum ElseData { + /// The `if` does not already have an `else` block. + /// + /// This doesn't mean that it will never have an `else`, just that we + /// haven't seen it yet. + NoElse { + /// If we discover that we need an `else` block, this is the jump + /// instruction that needs to be fixed up to point to the new `else` + /// block rather than the destination block after the `if...end`. + branch_inst: Inst, + + /// The placeholder block we're replacing. + placeholder: Block, + }, + + /// We have already allocated an `else` block. + /// + /// Usually we don't know whether we will hit an `if .. end` or an `if + /// .. else .. end`, but sometimes we can tell based on the block's type + /// signature that the signature is not valid if there isn't an `else`. In + /// these cases, we pre-allocate the `else` block. + WithElse { + /// This is the `else` block. + else_block: Block, + }, +} + +/// A control stack frame can be an `if`, a `block` or a `loop`, each one having the following +/// fields: +/// +/// - `destination`: reference to the `Block` that will hold the code after the control block; +/// - `num_return_values`: number of values returned by the control block; +/// - `original_stack_size`: size of the value stack at the beginning of the control block. +/// +/// The `loop` frame has a `header` field that references the `Block` that contains the beginning +/// of the body of the loop. +#[derive(Debug)] +pub enum ControlStackFrame { + If { + destination: Block, + else_data: ElseData, + num_param_values: usize, + num_return_values: usize, + original_stack_size: usize, + exit_is_branched_to: bool, + blocktype: BlockType, + /// Was the head of the `if` reachable? + head_is_reachable: bool, + /// What was the reachability at the end of the consequent? + /// + /// This is `None` until we're finished translating the consequent, and + /// is set to `Some` either by hitting an `else` when we will begin + /// translating the alternative, or by hitting an `end` in which case + /// there is no alternative. + consequent_ends_reachable: Option, + // Note: no need for `alternative_ends_reachable` because that is just + // `state.reachable` when we hit the `end` in the `if .. else .. end`. + }, + Block { + destination: Block, + num_param_values: usize, + num_return_values: usize, + original_stack_size: usize, + exit_is_branched_to: bool, + }, + Loop { + destination: Block, + header: Block, + num_param_values: usize, + num_return_values: usize, + original_stack_size: usize, + }, +} + +/// Helper methods for the control stack objects. +impl ControlStackFrame { + pub fn num_return_values(&self) -> usize { + match *self { + Self::If { + num_return_values, .. + } + | Self::Block { + num_return_values, .. + } + | Self::Loop { + num_return_values, .. + } => num_return_values, + } + } + pub fn num_param_values(&self) -> usize { + match *self { + Self::If { + num_param_values, .. + } + | Self::Block { + num_param_values, .. + } + | Self::Loop { + num_param_values, .. + } => num_param_values, + } + } + pub fn following_code(&self) -> Block { + match *self { + Self::If { destination, .. } + | Self::Block { destination, .. } + | Self::Loop { destination, .. } => destination, + } + } + pub fn br_destination(&self) -> Block { + match *self { + Self::If { destination, .. } | Self::Block { destination, .. } => destination, + Self::Loop { header, .. } => header, + } + } + /// Private helper. Use `truncate_value_stack_to_else_params()` or + /// `truncate_value_stack_to_original_size()` to restore value-stack state. + fn original_stack_size(&self) -> usize { + match *self { + Self::If { + original_stack_size, + .. + } + | Self::Block { + original_stack_size, + .. + } + | Self::Loop { + original_stack_size, + .. + } => original_stack_size, + } + } + pub fn is_loop(&self) -> bool { + match *self { + Self::If { .. } | Self::Block { .. } => false, + Self::Loop { .. } => true, + } + } + + pub fn exit_is_branched_to(&self) -> bool { + match *self { + Self::If { + exit_is_branched_to, + .. + } + | Self::Block { + exit_is_branched_to, + .. + } => exit_is_branched_to, + Self::Loop { .. } => false, + } + } + + pub fn set_branched_to_exit(&mut self) { + match *self { + Self::If { + ref mut exit_is_branched_to, + .. + } + | Self::Block { + ref mut exit_is_branched_to, + .. + } => *exit_is_branched_to = true, + Self::Loop { .. } => {} + } + } + + /// Pop values from the value stack so that it is left at the + /// input-parameters to an else-block. + pub fn truncate_value_stack_to_else_params(&self, stack: &mut Vec) { + debug_assert!(matches!(self, &ControlStackFrame::If { .. })); + stack.truncate(self.original_stack_size()); + } + + /// Pop values from the value stack so that it is left at the state it was + /// before this control-flow frame. + pub fn truncate_value_stack_to_original_size(&self, stack: &mut Vec) { + // The "If" frame pushes its parameters twice, so they're available to the else block + // (see also `FuncTranslationState::push_if`). + // Yet, the original_stack_size member accounts for them only once, so that the else + // block can see the same number of parameters as the consequent block. As a matter of + // fact, we need to substract an extra number of parameter values for if blocks. + let num_duplicated_params = match self { + &ControlStackFrame::If { + num_param_values, .. + } => { + debug_assert!(num_param_values <= self.original_stack_size()); + num_param_values + } + _ => 0, + }; + stack.truncate(self.original_stack_size() - num_duplicated_params); + } +} + +/// Contains information passed along during a function's translation and that records: +/// +/// - The current value and control stacks. +/// - The depth of the two unreachable control blocks stacks, that are manipulated when translating +/// unreachable code; +pub struct FuncTranslationState { + /// A stack of values corresponding to the active values in the input wasm function at this + /// point. + pub(crate) stack: Vec, + /// A stack of active control flow operations at this point in the input wasm function. + pub(crate) control_stack: Vec, + /// Is the current translation state still reachable? This is false when translating operators + /// like End, Return, or Unreachable. + pub(crate) reachable: bool, + + // Imported and local functions that have been created by + // `FuncEnvironment::make_direct_func()`. + // Stores both the function reference and the number of WebAssembly arguments + functions: HashMap, +} + +impl FuncTranslationState { + /// Construct a new, empty, `FuncTranslationState` + pub(crate) fn new() -> Self { + Self { + stack: Vec::new(), + control_stack: Vec::new(), + reachable: true, + functions: HashMap::new(), + } + } + + fn clear(&mut self) { + debug_assert!(self.stack.is_empty()); + debug_assert!(self.control_stack.is_empty()); + self.reachable = true; + self.functions.clear(); + } + + /// Initialize the state for compiling a function with the given signature. + /// + /// This resets the state to containing only a single block representing the whole function. + /// The exit block is the last block in the function which will contain the return instruction. + pub(crate) fn initialize(&mut self, sig: &Signature, exit_block: Block) { + self.clear(); + self.push_block(exit_block, 0, sig.results().len()); + } + + /// Push a value. + pub(crate) fn push1(&mut self, val: Value) { + self.stack.push(val); + } + + /// Push multiple values. + pub(crate) fn pushn(&mut self, vals: &[Value]) { + self.stack.extend_from_slice(vals); + } + + /// Pop one value. + pub(crate) fn pop1(&mut self) -> Value { + self.stack + .pop() + .expect("attempted to pop a value from an empty stack") + } + + /// Pop one value and cast it to the specified type. + pub(crate) fn pop1_casted( + &mut self, + ty: Type, + builder: &mut FunctionBuilderExt, + span: SourceSpan, + ) -> Value { + let val = self + .stack + .pop() + .expect("attempted to pop a value from an empty stack"); + builder.ins().cast(val, ty.clone(), span) + } + + /// Peek at the top of the stack without popping it. + pub(crate) fn peek1(&self) -> Value { + *self + .stack + .last() + .expect("attempted to peek at a value on an empty stack") + } + + /// Pop two values. Return them in the order they were pushed. + pub(crate) fn pop2(&mut self) -> (Value, Value) { + let v2 = self.stack.pop().unwrap(); + let v1 = self.stack.pop().unwrap(); + (v1, v2) + } + + /// Pop two values. Cast them to the specified type. Return them in the order they were pushed. + pub(crate) fn pop2_casted( + &mut self, + ty: Type, + builder: &mut FunctionBuilderExt, + span: SourceSpan, + ) -> (Value, Value) { + let v2 = self.stack.pop().unwrap(); + let v1 = self.stack.pop().unwrap(); + let v1_casted = builder.ins().cast(v1, ty.clone(), span); + let v2_casted = builder.ins().cast(v2, ty, span); + (v1_casted, v2_casted) + } + + /// Pop three values. Return them in the order they were pushed. + pub(crate) fn pop3(&mut self) -> (Value, Value, Value) { + let v3 = self.stack.pop().unwrap(); + let v2 = self.stack.pop().unwrap(); + let v1 = self.stack.pop().unwrap(); + (v1, v2, v3) + } + + /// Helper to ensure the the stack size is at least as big as `n`; note that due to + /// `debug_assert` this will not execute in non-optimized builds. + #[inline] + fn ensure_length_is_at_least(&self, n: usize) { + debug_assert!( + n <= self.stack.len(), + "attempted to access {} values but stack only has {} values", + n, + self.stack.len() + ) + } + + /// Pop the top `n` values on the stack. + /// + /// The popped values are not returned. Use `peekn` to look at them before popping. + pub(crate) fn popn(&mut self, n: usize) { + self.ensure_length_is_at_least(n); + let new_len = self.stack.len() - n; + self.stack.truncate(new_len); + } + + /// Peek at the top `n` values on the stack in the order they were pushed. + pub(crate) fn peekn(&self, n: usize) -> &[Value] { + self.ensure_length_is_at_least(n); + &self.stack[self.stack.len() - n..] + } + + /// Peek at the top `n` values on the stack in the order they were pushed. + pub(crate) fn peekn_mut(&mut self, n: usize) -> &mut [Value] { + self.ensure_length_is_at_least(n); + let len = self.stack.len(); + &mut self.stack[len - n..] + } + + /// Push a block on the control stack. + pub(crate) fn push_block( + &mut self, + following_code: Block, + num_param_types: usize, + num_result_types: usize, + ) { + debug_assert!(num_param_types <= self.stack.len()); + self.control_stack.push(ControlStackFrame::Block { + destination: following_code, + original_stack_size: self.stack.len() - num_param_types, + num_param_values: num_param_types, + num_return_values: num_result_types, + exit_is_branched_to: false, + }); + } + + /// Push a loop on the control stack. + pub(crate) fn push_loop( + &mut self, + header: Block, + following_code: Block, + num_param_types: usize, + num_result_types: usize, + ) { + debug_assert!(num_param_types <= self.stack.len()); + self.control_stack.push(ControlStackFrame::Loop { + header, + destination: following_code, + original_stack_size: self.stack.len() - num_param_types, + num_param_values: num_param_types, + num_return_values: num_result_types, + }); + } + + /// Push an if on the control stack. + pub(crate) fn push_if( + &mut self, + destination: Block, + else_data: ElseData, + num_param_types: usize, + num_result_types: usize, + blocktype: BlockType, + ) { + debug_assert!(num_param_types <= self.stack.len()); + + // Push a second copy of our `if`'s parameters on the stack. This lets + // us avoid saving them on the side in the `ControlStackFrame` for our + // `else` block (if it exists), which would require a second heap + // allocation. See also the comment in `translate_operator` for + // `Operator::Else`. + self.stack.reserve(num_param_types); + for i in (self.stack.len() - num_param_types)..self.stack.len() { + let val = self.stack[i]; + self.stack.push(val); + } + + self.control_stack.push(ControlStackFrame::If { + destination, + else_data, + original_stack_size: self.stack.len() - num_param_types, + num_param_values: num_param_types, + num_return_values: num_result_types, + exit_is_branched_to: false, + head_is_reachable: self.reachable, + consequent_ends_reachable: None, + blocktype, + }); + } +} + +/// Methods for handling entity references. +impl FuncTranslationState { + /// Get the `FunctionIdent` that should be used to make a direct call to function + /// `index`. Also return the number of WebAssembly arguments in the signature. + /// + /// Import the callee into `func`'s DFG if it is not already present. + pub(crate) fn get_direct_func( + &mut self, + dfg: &mut DataFlowGraph, + index: u32, + mod_info: &ModuleInfo, + diagnostics: &DiagnosticsHandler, + ) -> WasmResult<(FunctionIdent, usize)> { + let index = FuncIndex::from_u32(index); + Ok(match self.functions.entry(index) { + Occupied(entry) => *entry.get(), + Vacant(entry) => { + let sigidx = mod_info.functions[index]; + let func_type = mod_info.func_types[sigidx].clone(); + let func_name = mod_info + .function_names + .get(index) + .cloned() + .unwrap_or_else(|| format!("func{}", index.index())); + let func_name_id = Ident::with_empty_span(Symbol::intern(&func_name)); + let sig = sig_from_funct_type(&func_type, CallConv::SystemV, Linkage::External); + let Ok(func_id) = dfg.import_function(mod_info.name, func_name_id, sig.clone()) + else { + let message = format!("Function with name {} in module {} with signature {sig:?} is already imported (function call) with a different signature", func_name_id, mod_info.name); + diagnostics + .diagnostic(miden_diagnostics::Severity::Error) + .with_message(message.clone()) + .emit(); + return Err(WasmError::Unexpected(message)); + }; + *entry.insert((func_id, sig.params().len())) + } + }) + } +} diff --git a/frontend-wasm/src/func_translator.rs b/frontend-wasm/src/func_translator.rs new file mode 100644 index 00000000..c60aa387 --- /dev/null +++ b/frontend-wasm/src/func_translator.rs @@ -0,0 +1,190 @@ +//! Stand-alone WebAssembly to Miden IR translator. +//! +//! This module defines the `FuncTranslator` type which can translate a single WebAssembly +//! function to Miden IR guided by a `FuncEnvironment` which provides information about the +//! WebAssembly module and the runtime environment. +//! +//! Based on Cranelift's Wasm -> CLIF translator v11.0.0 + +use crate::code_translator::translate_operator; +use crate::error::WasmResult; +use crate::func_translation_state::FuncTranslationState; +use crate::function_builder_ext::{FunctionBuilderContext, FunctionBuilderExt}; +use crate::module_env::ModuleInfo; +use crate::ssa::Variable; +use crate::translation_utils::emit_zero; +use crate::wasm_types::valtype_to_type; +use miden_diagnostics::{DiagnosticsHandler, SourceSpan}; +use miden_hir::cranelift_entity::EntityRef; +use miden_hir::{Block, InstBuilder, ModuleFunctionBuilder}; +use wasmparser::{BinaryReader, FuncValidator, FunctionBody, WasmModuleResources}; + +/// WebAssembly to Miden IR function translator. +/// +/// A `FuncTranslator` is used to translate a binary WebAssembly function into Miden IR guided +/// by a `FuncEnvironment` object. A single translator instance can be reused to translate multiple +/// functions which will reduce heap allocation traffic. +pub struct FuncTranslator { + func_ctx: FunctionBuilderContext, + state: FuncTranslationState, +} + +impl FuncTranslator { + /// Create a new translator. + pub fn new() -> Self { + Self { + func_ctx: FunctionBuilderContext::new(), + state: FuncTranslationState::new(), + } + } + + /// Translate a binary WebAssembly function from a `FunctionBody`. + pub fn translate_body( + &mut self, + body: &FunctionBody<'_>, + mod_func_builder: &mut ModuleFunctionBuilder, + mod_info: &ModuleInfo, + diagnostics: &DiagnosticsHandler, + func_validator: &mut FuncValidator, + ) -> WasmResult<()> { + let mut reader = body.get_binary_reader(); + + let mut builder = FunctionBuilderExt::new(mod_func_builder, &mut self.func_ctx); + let entry_block = builder.current_block(); + builder.seal_block(entry_block); // Declare all predecessors known. + + let num_params = declare_parameters(&mut builder, entry_block); + + // Set up the translation state with a single pushed control block representing the whole + // function and its return values. + let exit_block = builder.create_block(); + builder.append_block_params_for_function_returns(exit_block); + self.state.initialize(&builder.signature(), exit_block); + + parse_local_decls(&mut reader, &mut builder, num_params, func_validator)?; + parse_function_body( + reader, + &mut builder, + &mut self.state, + mod_info, + diagnostics, + func_validator, + )?; + + builder.finalize(); + Ok(()) + } +} + +/// Declare local variables for the signature parameters that correspond to WebAssembly locals. +/// +/// Return the number of local variables declared. +fn declare_parameters(builder: &mut FunctionBuilderExt, entry_block: Block) -> usize { + let sig_len = builder.signature().params().len(); + let mut next_local = 0; + for i in 0..sig_len { + let abi_param = &builder.signature().params()[i]; + let local = Variable::new(next_local); + builder.declare_var(local, abi_param.ty.clone()); + next_local += 1; + + let param_value = builder.block_params(entry_block)[i]; + builder.def_var(local, param_value); + } + next_local +} + +/// Parse the local variable declarations that precede the function body. +/// +/// Declare local variables, starting from `num_params`. +fn parse_local_decls( + reader: &mut BinaryReader, + builder: &mut FunctionBuilderExt, + num_params: usize, + validator: &mut FuncValidator, +) -> WasmResult<()> { + let mut next_local = num_params; + let local_count = reader.read_var_u32()?; + + for _ in 0..local_count { + let pos = reader.original_position(); + let count = reader.read_var_u32()?; + let ty = reader.read()?; + validator.define_locals(pos, count, ty)?; + declare_locals(builder, count, ty, &mut next_local)?; + } + + Ok(()) +} + +/// Declare `count` local variables of the same type, starting from `next_local`. +/// +/// Fail if too many locals are declared in the function, or if the type is not valid for a local. +fn declare_locals( + builder: &mut FunctionBuilderExt, + count: u32, + wasm_type: wasmparser::ValType, + next_local: &mut usize, +) -> WasmResult<()> { + let ty = valtype_to_type(&wasm_type)?; + // All locals are initialized to 0. + let init = emit_zero(&ty, builder); + for _ in 0..count { + let local = Variable::new(*next_local); + builder.declare_var(local, ty.clone()); + builder.def_var(local, init); + *next_local += 1; + } + Ok(()) +} + +/// Parse the function body in `reader`. +/// +/// This assumes that the local variable declarations have already been parsed and function +/// arguments and locals are declared in the builder. +fn parse_function_body( + mut reader: BinaryReader, + builder: &mut FunctionBuilderExt, + state: &mut FuncTranslationState, + mod_info: &ModuleInfo, + diagnostics: &DiagnosticsHandler, + func_validator: &mut FuncValidator, +) -> WasmResult<()> { + // The control stack is initialized with a single block representing the whole function. + debug_assert_eq!(state.control_stack.len(), 1, "State not initialized"); + + while !reader.eof() { + let pos = reader.original_position(); + let op = reader.read_operator()?; + func_validator.op(pos, &op)?; + translate_operator( + &op, + builder, + state, + mod_info, + diagnostics, + SourceSpan::default(), + )?; + } + let pos = reader.original_position(); + func_validator.finish(pos)?; + + // The final `End` operator left us in the exit block where we need to manually add a return + // instruction. + // + // If the exit block is unreachable, it may not have the correct arguments, so we would + // generate a return instruction that doesn't match the signature. + if state.reachable { + if !builder.is_unreachable() { + builder + .ins() + .ret(state.stack.first().cloned(), SourceSpan::default()); + } + } + + // Discard any remaining values on the stack. Either we just returned them, + // or the end of the function is unreachable. + state.stack.clear(); + + Ok(()) +} diff --git a/frontend-wasm/src/function_builder_ext.rs b/frontend-wasm/src/function_builder_ext.rs new file mode 100644 index 00000000..220a737a --- /dev/null +++ b/frontend-wasm/src/function_builder_ext.rs @@ -0,0 +1,525 @@ +use miden_diagnostics::SourceSpan; +use miden_hir::cranelift_entity::EntitySet; +use miden_hir::cranelift_entity::SecondaryMap; +use miden_hir::Block; +use miden_hir::Br; +use miden_hir::CondBr; +use miden_hir::DataFlowGraph; +use miden_hir::InsertionPoint; +use miden_hir::Inst; +use miden_hir::InstBuilderBase; +use miden_hir::Instruction; +use miden_hir::ModuleFunctionBuilder; +use miden_hir::ProgramPoint; +use miden_hir::Switch; +use miden_hir::Value; +use miden_hir_type::Type; + +use crate::ssa::SSABuilder; +use crate::ssa::SideEffects; +use crate::ssa::Variable; + +/// Tracking variables and blocks for SSA construction. +pub struct FunctionBuilderContext { + ssa: SSABuilder, + status: SecondaryMap, + types: SecondaryMap, +} + +impl FunctionBuilderContext { + pub fn new() -> Self { + Self { + ssa: SSABuilder::default(), + status: SecondaryMap::new(), + types: SecondaryMap::with_default(Type::Unknown), + } + } + + fn is_empty(&self) -> bool { + self.ssa.is_empty() && self.status.is_empty() && self.types.is_empty() + } + + fn clear(&mut self) { + self.ssa.clear(); + self.status.clear(); + self.types.clear(); + } +} + +#[derive(Clone, Default, Eq, PartialEq)] +enum BlockStatus { + /// No instructions have been added. + #[default] + Empty, + /// Some instructions have been added, but no terminator. + Partial, + /// A terminator has been added; no further instructions may be added. + Filled, +} + +/// A wrapper around Miden's `FunctionBuilder` and `SSABuilder` which provides +/// additional API for dealing with variables and SSA construction. +pub struct FunctionBuilderExt<'a, 'b, 'c: 'b> { + inner: &'b mut ModuleFunctionBuilder<'c>, + func_ctx: &'a mut FunctionBuilderContext, +} + +impl<'a, 'b, 'c> FunctionBuilderExt<'a, 'b, 'c> { + pub fn new( + inner: &'b mut ModuleFunctionBuilder<'c>, + func_ctx: &'a mut FunctionBuilderContext, + ) -> Self { + debug_assert!(func_ctx.is_empty()); + Self { inner, func_ctx } + } + + pub fn data_flow_graph(&self) -> &DataFlowGraph { + &self.inner.data_flow_graph() + } + + pub fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph { + self.inner.data_flow_graph_mut() + } + + pub fn signature(&self) -> &miden_hir::Signature { + self.inner.signature() + } + + pub fn ins<'short>(&'short mut self) -> FuncInstBuilderExt<'short, 'a, 'b, 'c> { + let block = self.inner.current_block(); + FuncInstBuilderExt::new(self, block) + } + + #[inline] + pub fn current_block(&self) -> Block { + self.inner.current_block() + } + + pub fn inst_results(&self, inst: Inst) -> &[Value] { + self.inner.inst_results(inst) + } + + pub fn create_block(&mut self) -> Block { + let block = self.inner.create_block(); + self.func_ctx.ssa.declare_block(block); + block + } + + /// Create a `Block` with the given parameters. + pub fn create_block_with_params( + &mut self, + params: impl IntoIterator, + span: SourceSpan, + ) -> Block { + let block = self.create_block(); + for ty in params { + self.inner.append_block_param(block, ty, span); + } + block + } + + /// Append parameters to the given `Block` corresponding to the function + /// return values. This can be used to set up the block parameters for a + /// function exit block. + pub fn append_block_params_for_function_returns(&mut self, block: Block) { + // These parameters count as "user" parameters here because they aren't + // inserted by the SSABuilder. + debug_assert!( + self.is_pristine(block), + "You can't add block parameters after adding any instruction" + ); + + for argtyp in self.signature().results().to_vec() { + self.inner + .append_block_param(block, argtyp.ty.clone(), SourceSpan::default()); + } + } + + /// After the call to this function, new instructions will be inserted into the designated + /// block, in the order they are declared. You must declare the types of the Block arguments + /// you will use here. + /// + /// When inserting the terminator instruction (which doesn't have a fallthrough to its immediate + /// successor), the block will be declared filled and it will not be possible to append + /// instructions to it. + pub fn switch_to_block(&mut self, block: Block) { + // First we check that the previous block has been filled. + debug_assert!( + self.is_unreachable() + || self.is_pristine(self.inner.current_block()) + || self.is_filled(self.inner.current_block()), + "you have to fill your block before switching" + ); + // We cannot switch to a filled block + debug_assert!( + !self.is_filled(block), + "you cannot switch to a block which is already filled" + ); + // Then we change the cursor position. + self.inner.switch_to_block(block); + } + + /// Retrieves all the parameters for a `Block` currently inferred from the jump instructions + /// inserted that target it and the SSA construction. + pub fn block_params(&self, block: Block) -> &[Value] { + self.inner.block_params(block) + } + + /// Declares that all the predecessors of this block are known. + /// + /// Function to call with `block` as soon as the last branch instruction to `block` has been + /// created. Forgetting to call this method on every block will cause inconsistencies in the + /// produced functions. + pub fn seal_block(&mut self, block: Block) { + let side_effects = self + .func_ctx + .ssa + .seal_block(block, self.inner.data_flow_graph_mut()); + self.handle_ssa_side_effects(side_effects); + } + + /// A Block is 'filled' when a terminator instruction is present. + fn fill_current_block(&mut self) { + self.func_ctx.status[self.inner.current_block()] = BlockStatus::Filled; + } + + fn handle_ssa_side_effects(&mut self, side_effects: SideEffects) { + for modified_block in side_effects.instructions_added_to_blocks { + if self.is_pristine(modified_block) { + self.func_ctx.status[modified_block] = BlockStatus::Partial; + } + } + } + + /// Make sure that the current block is inserted in the layout. + pub fn ensure_inserted_block(&mut self) { + let block = self.inner.current_block(); + if self.is_pristine(block) { + self.func_ctx.status[block] = BlockStatus::Partial; + } else { + debug_assert!( + !self.is_filled(block), + "you cannot add an instruction to a block already filled" + ); + } + } + + /// Declare that translation of the current function is complete. + /// + /// This resets the state of the `FunctionBuilderContext` in preparation to + /// be used for another function. + pub fn finalize(self) { + // Check that all the `Block`s are filled and sealed. + #[cfg(debug_assertions)] + { + for block in self.func_ctx.status.keys() { + if !self.is_pristine(block) { + assert!( + self.func_ctx.ssa.is_sealed(block), + "FunctionBuilderExt finalized, but block {} is not sealed", + block, + ); + assert!( + self.is_filled(block), + "FunctionBuilderExt finalized, but block {} is not filled", + block, + ); + } + } + } + + // Clear the state (but preserve the allocated buffers) in preparation + // for translation another function. + self.func_ctx.clear(); + } + + /// Declares the type of a variable, so that it can be used later (by calling + /// [`FunctionBuilderExt::use_var`]). This function will return an error if the variable + /// has been previously declared. + pub fn try_declare_var(&mut self, var: Variable, ty: Type) -> Result<(), DeclareVariableError> { + if self.func_ctx.types[var] != Type::Unknown { + return Err(DeclareVariableError::DeclaredMultipleTimes(var)); + } + self.func_ctx.types[var] = ty; + Ok(()) + } + + /// In order to use a variable (by calling [`FunctionBuilderExt::use_var`]), you need + /// to first declare its type with this method. + pub fn declare_var(&mut self, var: Variable, ty: Type) { + self.try_declare_var(var, ty) + .unwrap_or_else(|_| panic!("the variable {:?} has been declared multiple times", var)) + } + + /// Returns the Miden IR necessary to use a previously defined user + /// variable, returning an error if this is not possible. + pub fn try_use_var(&mut self, var: Variable) -> Result { + // Assert that we're about to add instructions to this block using the definition of the + // given variable. ssa.use_var is the only part of this crate which can add block parameters + // behind the caller's back. If we disallow calling append_block_param as soon as use_var is + // called, then we enforce a strict separation between user parameters and SSA parameters. + self.ensure_inserted_block(); + + let (val, side_effects) = { + let ty = self + .func_ctx + .types + .get(var) + .cloned() + .ok_or(UseVariableError::UsedBeforeDeclared(var))?; + debug_assert_ne!( + ty, + Type::Unknown, + "variable {:?} is used but its type has not been declared", + var + ); + let current_block = self.inner.current_block(); + self.func_ctx + .ssa + .use_var(self.inner.data_flow_graph_mut(), var, ty, current_block) + }; + self.handle_ssa_side_effects(side_effects); + Ok(val) + } + + /// Returns the Miden IR value corresponding to the utilization at the current program + /// position of a previously defined user variable. + pub fn use_var(&mut self, var: Variable) -> Value { + self.try_use_var(var).unwrap_or_else(|_| { + panic!( + "variable {:?} is used but its type has not been declared", + var + ) + }) + } + + /// Registers a new definition of a user variable. This function will return + /// an error if the value supplied does not match the type the variable was + /// declared to have. + pub fn try_def_var(&mut self, var: Variable, val: Value) -> Result<(), DefVariableError> { + let var_ty = self + .func_ctx + .types + .get(var) + .ok_or(DefVariableError::DefinedBeforeDeclared(var))?; + if var_ty != self.data_flow_graph().value_type(val) { + return Err(DefVariableError::TypeMismatch(var, val)); + } + + self.func_ctx + .ssa + .def_var(var, val, self.inner.current_block()); + Ok(()) + } + + /// Register a new definition of a user variable. The type of the value must be + /// the same as the type registered for the variable. + pub fn def_var(&mut self, var: Variable, val: Value) { + self.try_def_var(var, val) + .unwrap_or_else(|error| match error { + DefVariableError::TypeMismatch(var, val) => { + assert_eq!( + &self.func_ctx.types[var], + self.data_flow_graph().value_type(val), + "declared type of variable {:?} doesn't match type of value {}", + var, + val + ); + } + DefVariableError::DefinedBeforeDeclared(var) => { + panic!( + "variable {:?} is used but its type has not been declared", + var + ); + } + }) + } + + /// Returns `true` if and only if no instructions have been added since the last call to + /// `switch_to_block`. + fn is_pristine(&self, block: Block) -> bool { + self.func_ctx.status[block] == BlockStatus::Empty + } + + /// Returns `true` if and only if a terminator instruction has been inserted since the + /// last call to `switch_to_block`. + fn is_filled(&self, block: Block) -> bool { + self.func_ctx.status[block] == BlockStatus::Filled + } + + /// Returns `true` if and only if the current `Block` is sealed and has no predecessors declared. + /// + /// The entry block of a function is never unreachable. + pub fn is_unreachable(&self) -> bool { + let is_entry = self.inner.current_block() == self.data_flow_graph().entry_block(); + !is_entry + && self.func_ctx.ssa.is_sealed(self.inner.current_block()) + && !self + .func_ctx + .ssa + .has_any_predecessors(self.inner.current_block()) + } + + /// Changes the destination of a jump instruction after creation. + /// + /// **Note:** You are responsible for maintaining the coherence with the arguments of + /// other jump instructions. + pub fn change_jump_destination(&mut self, inst: Inst, old_block: Block, new_block: Block) { + self.func_ctx.ssa.remove_block_predecessor(old_block, inst); + match self.data_flow_graph_mut().insts[inst].data.item { + Instruction::Br(Br { + ref mut destination, + .. + }) if destination == &old_block => { + *destination = new_block; + } + Instruction::CondBr(CondBr { + then_dest: (ref mut then_dest, _), + else_dest: (ref mut else_dest, _), + .. + }) => { + if then_dest == &old_block { + *then_dest = new_block; + } else if else_dest == &old_block { + *else_dest = new_block; + } + } + Instruction::Switch(Switch { + op: _, + arg: _, + ref mut arms, + ref mut default, + }) => { + for (_, ref mut dest_block) in arms { + if dest_block == &old_block { + *dest_block = new_block; + } + } + if default == &old_block { + *default = new_block; + } + } + _ => panic!("{} must be a branch instruction", inst), + } + self.func_ctx.ssa.declare_block_predecessor(new_block, inst); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)] +/// An error encountered when calling [`FunctionBuilderExt::try_use_var`]. +pub enum UseVariableError { + #[error("variable {0} is used before the declaration")] + UsedBeforeDeclared(Variable), +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, thiserror::Error)] +/// An error encountered when calling [`FunctionBuilderExt::try_declare_var`]. +pub enum DeclareVariableError { + #[error("variable {0} is already declared")] + DeclaredMultipleTimes(Variable), +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, thiserror::Error)] +/// An error encountered when defining the initial value of a variable. +pub enum DefVariableError { + #[error("the types of variable {0} and value {1} are not the same. The `Value` supplied to `def_var` must be of the same type as the variable was declared to be of in `declare_var`.")] + TypeMismatch(Variable, Value), + #[error( + "the value of variable {0} was defined (in call `def_val`) before it was declared (in call `declare_var`)" + )] + DefinedBeforeDeclared(Variable), +} + +pub struct FuncInstBuilderExt<'a, 'b: 'a, 'c, 'd: 'c> { + builder: &'a mut FunctionBuilderExt<'b, 'c, 'd>, + ip: InsertionPoint, +} +impl<'a, 'b, 'c, 'd> FuncInstBuilderExt<'a, 'b, 'c, 'd> { + fn new(builder: &'a mut FunctionBuilderExt<'b, 'c, 'd>, block: Block) -> Self { + assert!(builder.data_flow_graph().is_block_inserted(block)); + Self { + builder, + ip: InsertionPoint::after(ProgramPoint::Block(block)), + } + } +} +impl<'a, 'b, 'c, 'd> InstBuilderBase<'a> for FuncInstBuilderExt<'a, 'b, 'c, 'd> { + fn data_flow_graph(&self) -> &DataFlowGraph { + &self.builder.data_flow_graph() + } + + fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph { + self.builder.data_flow_graph_mut() + } + + fn insertion_point(&self) -> InsertionPoint { + self.ip + } + + // This implementation is richer than `InsertBuilder` because we use the data of the + // instruction being inserted to add related info to the DFG and the SSA building system, + // and perform debug sanity checks. + fn build(self, data: Instruction, ty: Type, span: SourceSpan) -> (Inst, &'a mut DataFlowGraph) { + // We only insert the Block in the layout when an instruction is added to it + self.builder.ensure_inserted_block(); + let opcode = data.opcode(); + let inst = self + .builder + .data_flow_graph_mut() + .insert_inst(self.ip, data, ty, span); + + match &self.builder.inner.data_flow_graph().insts[inst].data.item { + Instruction::Br(Br { destination, .. }) => { + // If the user has supplied jump arguments we must adapt the arguments of + // the destination block + self.builder + .func_ctx + .ssa + .declare_block_predecessor(*destination, inst); + } + + Instruction::CondBr(CondBr { + then_dest: (block_then, _), + else_dest: (block_else, _), + .. + }) => { + self.builder + .func_ctx + .ssa + .declare_block_predecessor(*block_then, inst); + if block_then != block_else { + self.builder + .func_ctx + .ssa + .declare_block_predecessor(*block_else, inst); + } + } + Instruction::Switch(Switch { + op: _, + arg: _, + ref arms, + default: _, + }) => { + // Unlike all other jumps/branches, arms are + // capable of having the same successor appear + // multiple times, so we must deduplicate. + let mut unique = EntitySet::::new(); + for (_, dest_block) in arms { + if !unique.insert(*dest_block) { + continue; + } + self.builder + .func_ctx + .ssa + .declare_block_predecessor(*dest_block, inst); + } + } + inst => debug_assert!(!inst.opcode().is_branch()), + } + + if opcode.is_terminator() { + self.builder.fill_current_block() + } + (inst, self.builder.data_flow_graph_mut()) + } +} diff --git a/frontend-wasm/src/lib.rs b/frontend-wasm/src/lib.rs new file mode 100644 index 00000000..ca586a02 --- /dev/null +++ b/frontend-wasm/src/lib.rs @@ -0,0 +1,25 @@ +//! Performs translation from Wasm to MidenIR + +// Coding conventions +#![deny(warnings)] +#![deny(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] + +mod code_translator; +mod config; +mod error; +mod func_translation_state; +mod func_translator; +mod function_builder_ext; +mod module_env; +mod module_translator; +mod sections_translator; +mod ssa; +mod translation_utils; +mod wasm_types; + +#[cfg(test)] +mod test_utils; + +pub use crate::config::WasmTranslationConfig; +pub use crate::module_translator::translate_module; diff --git a/frontend-wasm/src/module_env.rs b/frontend-wasm/src/module_env.rs new file mode 100644 index 00000000..4f8e5ee6 --- /dev/null +++ b/frontend-wasm/src/module_env.rs @@ -0,0 +1,295 @@ +//! Wasm to Miden IR translation environment + +use crate::error::{WasmError, WasmResult}; +use crate::func_translator::FuncTranslator; +use crate::translation_utils::sig_from_funct_type; +use crate::wasm_types::{ + DataSegment, DataSegmentIndex, DefinedFuncIndex, FuncIndex, Global, GlobalIndex, Memory, + MemoryIndex, TypeIndex, +}; +use miden_diagnostics::{DiagnosticsHandler, SourceSpan}; +use miden_hir::cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap}; +use miden_hir::{CallConv, ConstantData, Ident, Linkage, Module, ModuleBuilder, Symbol}; +use miden_hir_type::FunctionType; + +use std::string::String; +use std::vec::Vec; +use wasmparser::{FunctionBody, Validator}; + +/// The main state belonging to a `ModuleEnvironment`. This is split out from +/// `ModuleEnvironment` to allow it to be borrowed separately from the +/// `FuncTranslator` field. +pub struct ModuleInfo { + /// Module name + pub name: Ident, + + /// Function types + pub func_types: PrimaryMap, + + /// Functions, imported and local. + pub functions: PrimaryMap, + + /// Function names. + pub function_names: SecondaryMap, + + /// Module and field names of imported functions as provided by `declare_func_import`. + pub imported_funcs: Vec<(String, String)>, + + /// Memories as provided by `declare_memory`. + pub memories: PrimaryMap, + + /// Globals as provided by `declare_global`. + pub globals: PrimaryMap, + + /// Global names. + global_names: SecondaryMap, + + /// The start function. + pub start_func: Option, +} + +impl ModuleInfo { + pub fn new(id: Ident) -> Self { + Self { + name: id, + func_types: PrimaryMap::new(), + imported_funcs: Vec::new(), + functions: PrimaryMap::new(), + memories: PrimaryMap::new(), + start_func: None, + globals: PrimaryMap::new(), + function_names: SecondaryMap::new(), + global_names: SecondaryMap::new(), + } + } + + pub fn declare_global_name(&mut self, global_index: GlobalIndex, name: String) { + self.global_names[global_index] = name; + } + + pub fn global_name(&self, global_index: GlobalIndex) -> String { + let stored_name = self.global_names[global_index].clone(); + if stored_name.is_empty() { + format!("gv{}", global_index.index()) + } else { + stored_name + } + } +} + +pub struct ModuleEnvironment<'a> { + /// Module information. + pub info: ModuleInfo, + + /// Function translation. + pub trans: FuncTranslator, + + /// Unparsed function bodies (bytes). + pub function_bodies: PrimaryMap>, + + /// Data segments declared in the module + pub data_segments: PrimaryMap>, + + /// Data segment names. + pub data_segment_names: SecondaryMap, +} + +impl<'a> ModuleEnvironment<'a> { + /// Creates a new `ModuleEnvironment` instance. + pub fn new() -> Self { + Self { + info: ModuleInfo::new(Ident::with_empty_span(Symbol::intern("noname"))), + trans: FuncTranslator::new(), + function_bodies: PrimaryMap::new(), + data_segments: PrimaryMap::new(), + data_segment_names: SecondaryMap::new(), + } + } + + /// Get the type for the function at the given index. + pub fn get_func_type(&self, func_index: FuncIndex) -> TypeIndex { + self.info.functions[func_index] + } + + /// Return the number of imported functions within this `ModuleEnvironment`. + pub fn get_num_func_imports(&self) -> usize { + self.info.imported_funcs.len() + } + + /// Return the name of the function, if a name for the function with + /// the corresponding index exists. + pub fn get_func_name(&self, func_index: FuncIndex) -> Option<&str> { + self.info.function_names.get(func_index).map(String::as_ref) + } + + pub fn build( + mut self, + diagnostics: &DiagnosticsHandler, + validator: &mut Validator, + ) -> WasmResult { + let mut module_builder = ModuleBuilder::new(self.info.name.as_str()); + self.build_globals(&mut module_builder, diagnostics)?; + self.build_data_segments(&mut module_builder, diagnostics)?; + let get_num_func_imports = self.get_num_func_imports(); + for (def_func_index, body) in &self.function_bodies { + let func_index = FuncIndex::new(get_num_func_imports + def_func_index.index()); + let sig_type_idx = self.get_func_type(func_index); + let func_ty = &self.info.func_types[sig_type_idx]; + let func_name = self + .get_func_name(func_index) + .unwrap_or(&format!("func{}", func_index.index())) + .to_string(); + let sig = sig_from_funct_type(func_ty, CallConv::SystemV, Linkage::External); + let mut module_func_builder = + module_builder.function(func_name.as_str(), sig.clone())?; + let mut func_validator = validator + .code_section_entry(&body)? + .into_validator(Default::default()); + self.trans.translate_body( + body, + &mut module_func_builder, + &self.info, + diagnostics, + &mut func_validator, + )?; + // TODO: add diagnostics + module_func_builder + .build(diagnostics) + .map_err(|_| WasmError::InvalidFunctionError)?; + } + let module = module_builder.build(); + Ok(*module) + } + + fn build_globals( + &self, + module_builder: &mut ModuleBuilder, + diagnostics: &DiagnosticsHandler, + ) -> Result<(), WasmError> { + Ok(for (global_idx, global) in &self.info.globals { + let global_name = self.info.global_name(global_idx).clone(); + let init = ConstantData::from(global.init.to_le_bytes(&self.info.globals)); + if let Err(e) = module_builder.declare_global_variable( + &global_name, + global.ty.clone(), + Linkage::External, + Some(init.clone()), + SourceSpan::default(), + ) { + let message = format!("Failed to declare global variable '{global_name}' with initializer '{init}' with error: {:?}", e); + diagnostics + .diagnostic(miden_diagnostics::Severity::Error) + .with_message(message.clone()) + .emit(); + return Err(WasmError::Unexpected(message)); + } + }) + } + + fn build_data_segments( + &self, + module_builder: &mut ModuleBuilder, + diagnostics: &DiagnosticsHandler, + ) -> Result<(), WasmError> { + for (data_segment_idx, data_segment) in &self.data_segments { + let data_segment_name = self.data_segment_names[data_segment_idx].clone(); + let readonly = data_segment_name.contains(".rodata"); + let init = ConstantData::from(data_segment.data); + let offset = data_segment + .offset + .as_i32(&self.info.globals, diagnostics)? as u32; + let size = init.len() as u32; + if let Err(e) = module_builder.declare_data_segment(offset, size, init, readonly) { + let message = format!("Failed to declare data segment '{data_segment_name}' with size '{size}' at '{offset}' with error: {:?}", e); + diagnostics + .diagnostic(miden_diagnostics::Severity::Error) + .with_message(message.clone()) + .emit(); + return Err(WasmError::Unexpected(message)); + } + } + Ok(()) + } + + /// Declares a function signature to the environment. + pub fn declare_type_func(&mut self, func_type: FunctionType) { + self.info.func_types.push(func_type); + } + + /// Declares a function import to the environment. + pub fn declare_func_import(&mut self, index: TypeIndex, module: &'a str, field: &'a str) { + assert_eq!( + self.info.functions.len(), + self.info.imported_funcs.len(), + "Imported functions must be declared first" + ); + self.info.functions.push(index); + self.info + .imported_funcs + .push((String::from(module), String::from(field))); + } + + /// Declares the type (signature) of a local function in the module. + pub fn declare_func_type(&mut self, index: TypeIndex) { + self.info.functions.push(index); + } + + /// Declares a global to the environment. + pub fn declare_global(&mut self, global: Global) { + self.info.globals.push(global); + } + + pub fn declare_global_name(&mut self, global_index: GlobalIndex, name: &'a str) { + self.info + .declare_global_name(global_index, String::from(name)); + } + + /// Declares a memory to the environment + pub fn declare_memory(&mut self, memory: Memory) { + self.info.memories.push(memory); + } + + /// Declares the optional start function. + pub fn declare_start_func(&mut self, func_index: FuncIndex) { + debug_assert!(self.info.start_func.is_none()); + self.info.start_func = Some(func_index); + } + + /// Provides the contents of a function body. + pub fn define_function_body(&mut self, body: FunctionBody<'a>) { + self.function_bodies.push(body); + } + + /// Declares the name of a module to the environment. + pub fn declare_module_name(&mut self, name: &'a str) { + self.info.name = Ident::with_empty_span(Symbol::intern(name)); + } + + /// Declares the name of a function to the environment. + pub fn declare_func_name(&mut self, func_index: FuncIndex, name: &'a str) { + self.info.function_names[func_index] = String::from(name); + } + + pub fn declare_data_segment(&mut self, segment: DataSegment<'a>) { + self.data_segments.push(segment); + } + + pub fn declare_data_segment_name(&mut self, segment_index: DataSegmentIndex, name: &'a str) { + self.data_segment_names[segment_index] = String::from(name); + } + + /// Indicates that a custom section has been found in the wasm file + pub fn custom_section(&mut self, _name: &'a str, _data: &'a [u8]) { + // Do we need to support custom sections? + } + + /// Declares the name of a function's local to the environment. + pub fn declare_local_name( + &mut self, + _func_index: FuncIndex, + _local_index: u32, + _name: &'a str, + ) { + // TODO: Do we need a local's name? + } +} diff --git a/frontend-wasm/src/module_translator.rs b/frontend-wasm/src/module_translator.rs new file mode 100644 index 00000000..42961468 --- /dev/null +++ b/frontend-wasm/src/module_translator.rs @@ -0,0 +1,127 @@ +//! Translation skeleton that traverses the whole WebAssembly module and call helper functions +//! to deal with each part of it. + +use crate::error::WasmResult; +use crate::module_env::ModuleEnvironment; +use crate::sections_translator::{ + parse_data_section, parse_element_section, parse_function_section, parse_global_section, + parse_import_section, parse_memory_section, parse_name_section, parse_type_section, +}; +use crate::wasm_types::FuncIndex; +use crate::{unsupported_diag, WasmTranslationConfig}; +use miden_diagnostics::DiagnosticsHandler; +use miden_hir::Module; +use std::prelude::v1::*; +use wasmparser::{NameSectionReader, Parser, Payload, Validator, WasmFeatures}; + +/// Translate a sequence of bytes forming a valid Wasm binary into Miden IR +pub fn translate_module( + wasm: &[u8], + _config: &WasmTranslationConfig, + diagnostics: &DiagnosticsHandler, +) -> WasmResult { + let mut module_env = ModuleEnvironment::new(); + let env = &mut module_env; + let wasm_features = WasmFeatures::default(); + let mut validator = Validator::new_with_features(wasm_features); + for payload in Parser::new(0).parse_all(wasm) { + match payload? { + Payload::Version { + num, + encoding, + range, + } => { + validator.version(num, encoding, &range)?; + } + Payload::End(offset) => { + let module = module_env.build(diagnostics, &mut validator)?; + validator.end(offset)?; + return Ok(module); + } + + Payload::TypeSection(types) => { + validator.type_section(&types)?; + parse_type_section(types, env, diagnostics)?; + } + + Payload::ImportSection(imports) => { + validator.import_section(&imports)?; + parse_import_section(imports, env, diagnostics)?; + } + + Payload::FunctionSection(functions) => { + validator.function_section(&functions)?; + parse_function_section(functions, env)?; + } + + Payload::TableSection(tables) => { + validator.table_section(&tables)?; + // skip the table section + } + + Payload::MemorySection(memories) => { + validator.memory_section(&memories)?; + parse_memory_section(memories, env)?; + } + + Payload::TagSection(tags) => { + validator.tag_section(&tags)?; + unsupported_diag!(diagnostics, "Tag sections are not supported"); + } + + Payload::GlobalSection(globals) => { + validator.global_section(&globals)?; + parse_global_section(globals, env, diagnostics)?; + } + + Payload::ExportSection(exports) => { + validator.export_section(&exports)?; + // skip the export section + } + + Payload::StartSection { func, range } => { + validator.start_section(func, &range)?; + env.declare_start_func(FuncIndex::from_u32(func)); + } + + Payload::ElementSection(elements) => { + validator.element_section(&elements)?; + parse_element_section(elements, env, diagnostics)?; + } + + Payload::CodeSectionStart { count, range, .. } => { + validator.code_section_start(count, &range)?; + } + + Payload::CodeSectionEntry(body) => { + env.define_function_body(body); + } + + Payload::DataSection(data) => { + validator.data_section(&data)?; + parse_data_section(data, env, diagnostics)?; + } + + Payload::DataCountSection { count, range } => { + validator.data_count_section(count, &range)?; + } + + Payload::CustomSection(s) if s.name() == "name" => { + let result = + parse_name_section(NameSectionReader::new(s.data(), s.data_offset()), env); + if let Err(e) = result { + log::warn!("failed to parse name section {:?}", e); + } + } + + Payload::CustomSection(s) => env.custom_section(s.name(), s.data()), + + other => { + validator.payload(&other)?; + panic!("unimplemented section {:?}", other); + } + } + } + // The parsing should've ended with a Payload::End where we build the Miden IR module + panic!("unexpected end of Webassembly parsing, missing Payload::End"); +} diff --git a/frontend-wasm/src/sections_translator.rs b/frontend-wasm/src/sections_translator.rs new file mode 100644 index 00000000..ab4ed483 --- /dev/null +++ b/frontend-wasm/src/sections_translator.rs @@ -0,0 +1,243 @@ +//! Helper functions to gather information for each of the non-function sections of a +//! WebAssembly module. + +use crate::{ + error::{WasmError, WasmResult}, + module_env::ModuleEnvironment, + unsupported_diag, + wasm_types::{ + convert_func_type, convert_global_type, DataSegment, DataSegmentIndex, DataSegmentOffset, + FuncIndex, GlobalIndex, GlobalInit, TypeIndex, + }, +}; +use miden_diagnostics::DiagnosticsHandler; +use wasmparser::{ + Data, DataKind, DataSectionReader, ElementSectionReader, FunctionSectionReader, + GlobalSectionReader, ImportSectionReader, MemorySectionReader, NameSectionReader, Naming, + Operator, Type, TypeRef, TypeSectionReader, +}; + +/// Parses the Type section of the wasm module. +pub fn parse_type_section<'a>( + types: TypeSectionReader<'a>, + environ: &mut ModuleEnvironment<'a>, + diagnostics: &DiagnosticsHandler, +) -> WasmResult<()> { + for entry in types { + match entry? { + Type::Func(wasm_func_ty) => { + let ty = convert_func_type(&wasm_func_ty)?; + environ.declare_type_func(ty); + } + Type::Array(_) => { + unsupported_diag!(diagnostics, "Array types are not supported"); + } + } + } + Ok(()) +} + +/// Parses the Import section of the wasm module. +pub fn parse_import_section<'a>( + imports: ImportSectionReader<'a>, + environ: &mut ModuleEnvironment<'a>, + diagnostics: &DiagnosticsHandler, +) -> WasmResult<()> { + for entry in imports { + let import = entry?; + match import.ty { + TypeRef::Func(sig) => { + environ.declare_func_import(TypeIndex::from_u32(sig), import.module, import.name); + } + TypeRef::Memory(_) => { + unsupported_diag!(diagnostics, "Memory imports are not supported"); + } + TypeRef::Tag(_) => { + unsupported_diag!(diagnostics, "Tag imports are not supported"); + } + TypeRef::Global(_) => { + unsupported_diag!(diagnostics, "Global imports are not supported"); + } + TypeRef::Table(_) => { + unsupported_diag!(diagnostics, "Table imports are not supported"); + } + } + } + + Ok(()) +} + +/// Parses the Function section of the wasm module. +pub fn parse_function_section<'a>( + functions: FunctionSectionReader, + environ: &mut ModuleEnvironment<'a>, +) -> WasmResult<()> { + let num_functions = functions.count(); + if num_functions == std::u32::MAX { + // We reserve `u32::MAX` for our own use in cranelift-entity. + return Err(WasmError::FuncNumLimitExceeded); + } + + for entry in functions { + let sigindex = entry?; + environ.declare_func_type(TypeIndex::from_u32(sigindex)); + } + + Ok(()) +} + +/// Parses the Memory section of the wasm module. +pub fn parse_memory_section<'a>( + memories: MemorySectionReader, + environ: &mut ModuleEnvironment<'a>, +) -> WasmResult<()> { + for entry in memories { + environ.declare_memory(entry?.into()); + } + Ok(()) +} + +/// Parses the Global section of the wasm module. +pub fn parse_global_section<'a>( + globals: GlobalSectionReader, + environ: &mut ModuleEnvironment<'a>, + diagnostics: &DiagnosticsHandler, +) -> WasmResult<()> { + for entry in globals { + let wasmparser::Global { ty, init_expr } = entry?; + let mut init_expr_reader = init_expr.get_binary_reader(); + let initializer = match init_expr_reader.read_operator()? { + Operator::I32Const { value } => GlobalInit::I32Const(value), + Operator::I64Const { value } => GlobalInit::I64Const(value), + Operator::F32Const { value } => GlobalInit::F32Const(value.bits()), + Operator::F64Const { value } => GlobalInit::F64Const(value.bits()), + Operator::GlobalGet { global_index } => { + GlobalInit::GetGlobal(GlobalIndex::from_u32(global_index)) + } + ref s => { + unsupported_diag!( + diagnostics, + "unsupported init expr in global section: {:?}", + s + ); + } + }; + let global = convert_global_type(&ty, initializer)?; + environ.declare_global(global); + } + + Ok(()) +} + +/// Parses the Element section of the wasm module. +pub fn parse_element_section<'a>( + _elements: ElementSectionReader<'a>, + _environ: &mut ModuleEnvironment<'a>, + _diagnostics: &DiagnosticsHandler, +) -> WasmResult<()> { + // Skip element section, since we don't support tables + // We cannot throw an error here, since rustc will generate an element section (funcref) + Ok(()) +} + +/// Parses the Data section of the wasm module. +pub fn parse_data_section<'a>( + data: DataSectionReader<'a>, + environ: &mut ModuleEnvironment<'a>, + diagnostics: &DiagnosticsHandler, +) -> WasmResult<()> { + for (_index, entry) in data.into_iter().enumerate() { + let Data { + kind, + data, + range: _, + } = entry?; + match kind { + DataKind::Active { + // ignored, since for Wasm spec v1 it's always 0 + memory_index: _, + offset_expr, + } => { + let mut offset_expr_reader = offset_expr.get_binary_reader(); + let offset = match offset_expr_reader.read_operator()? { + Operator::I32Const { value } => DataSegmentOffset::I32Const(value), + Operator::GlobalGet { global_index } => { + DataSegmentOffset::GetGlobal(GlobalIndex::from_u32(global_index)) + } + ref s => { + unsupported_diag!( + diagnostics, + "unsupported init expr in data section offset: {:?}", + s + ); + } + }; + let segment = DataSegment { offset, data }; + environ.declare_data_segment(segment); + } + DataKind::Passive => { + // Passive data segments type is added in Wasm spec 2.0 + unsupported_diag!(diagnostics, "Passive data segments are not supported"); + } + } + } + Ok(()) +} + +/// Parses the Name section of the wasm module. +pub fn parse_name_section<'a>( + names: NameSectionReader<'a>, + environ: &mut ModuleEnvironment<'a>, +) -> WasmResult<()> { + for subsection in names { + match subsection? { + wasmparser::Name::Function(names) => { + for name in names { + let Naming { index, name } = name?; + // We reserve `u32::MAX` for our own use in cranelift-entity. + if index != u32::max_value() { + environ.declare_func_name(FuncIndex::from_u32(index), name); + } + } + } + wasmparser::Name::Module { name, .. } => { + environ.declare_module_name(name); + } + wasmparser::Name::Local(reader) => { + for f in reader { + let f = f?; + if f.index == u32::max_value() { + continue; + } + for name in f.names { + let Naming { index, name } = name?; + environ.declare_local_name(FuncIndex::from_u32(f.index), index, name) + } + } + } + wasmparser::Name::Global(names) => { + for name in names { + let Naming { index, name } = name?; + if index != u32::max_value() { + environ.declare_global_name(GlobalIndex::from_u32(index), name); + } + } + } + wasmparser::Name::Data(names) => { + for name in names { + let Naming { index, name } = name?; + if index != u32::max_value() { + environ.declare_data_segment_name(DataSegmentIndex::from_u32(index), name); + } + } + } + wasmparser::Name::Label(_) + | wasmparser::Name::Type(_) + | wasmparser::Name::Table(_) + | wasmparser::Name::Memory(_) + | wasmparser::Name::Element(_) + | wasmparser::Name::Unknown { .. } => {} + } + } + Ok(()) +} diff --git a/frontend-wasm/src/ssa.rs b/frontend-wasm/src/ssa.rs new file mode 100644 index 00000000..ee769f28 --- /dev/null +++ b/frontend-wasm/src/ssa.rs @@ -0,0 +1,554 @@ +//! A SSA-building API that handles incomplete CFGs. +//! +//! The algorithm is based upon Braun M., Buchwald S., Hack S., Leißa R., Mallon C., +//! Zwinkau A. (2013) Simple and Efficient Construction of Static Single Assignment Form. +//! In: Jhala R., De Bosschere K. (eds) Compiler Construction. CC 2013. +//! Lecture Notes in Computer Science, vol 7791. Springer, Berlin, Heidelberg +//! +//! +//! +//! Based on Cranelift's Wasm -> CLIF translator v11.0.0 + +use core::mem; +use miden_diagnostics::SourceSpan; +use miden_hir::cranelift_entity::packed_option::PackedOption; +use miden_hir::cranelift_entity::{entity_impl, EntityList, EntitySet, ListPool, SecondaryMap}; +use miden_hir::{Block, DataFlowGraph, Inst, Value}; +use miden_hir_type::Type; + +/// Structure containing the data relevant the construction of SSA for a given function. +/// +/// The parameter struct `Variable` corresponds to the way variables are represented in the +/// non-SSA language you're translating from. +/// +/// The SSA building relies on information about the variables used and defined. +/// +/// This SSA building module allows you to def and use variables on the fly while you are +/// constructing the CFG, no need for a separate SSA pass after the CFG is completed. +/// +/// A basic block is said _filled_ if all the instruction that it contains have been translated, +/// and it is said _sealed_ if all of its predecessors have been declared. Only filled predecessors +/// can be declared. +#[derive(Default)] +pub struct SSABuilder { + /// Records for every variable and for every relevant block, the last definition of + /// the variable in the block. + variables: SecondaryMap>>, + + /// Records the position of the basic blocks and the list of values used but not defined in the + /// block. + ssa_blocks: SecondaryMap, + + /// Call stack for use in the `use_var`/`predecessors_lookup` state machine. + calls: Vec, + /// Result stack for use in the `use_var`/`predecessors_lookup` state machine. + results: Vec, + + /// Side effects accumulated in the `use_var`/`predecessors_lookup` state machine. + side_effects: SideEffects, + + /// Reused storage for cycle-detection. + visited: EntitySet, + + /// Storage for pending variable definitions. + variable_pool: ListPool, + + /// Storage for predecessor definitions. + inst_pool: ListPool, +} + +/// An opaque reference to a mutable variable. +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct Variable(u32); +entity_impl!(Variable, "var"); + +/// Side effects of a `use_var` or a `seal_block` method call. +#[derive(Default)] +pub struct SideEffects { + /// When a variable is used but has never been defined before (this happens in the case of + /// unreachable code), a placeholder `iconst` or `fconst` value is added to the right `Block`. + /// This field signals if it is the case and return the `Block` to which the initialization has + /// been added. + pub instructions_added_to_blocks: Vec, +} + +impl SideEffects { + fn is_empty(&self) -> bool { + self.instructions_added_to_blocks.is_empty() + } +} + +#[derive(Clone)] +enum Sealed { + No { + // List of current Block arguments for which an earlier def has not been found yet. + undef_variables: EntityList, + }, + Yes, +} + +impl Default for Sealed { + fn default() -> Self { + Sealed::No { + undef_variables: EntityList::new(), + } + } +} + +#[derive(Clone, Default)] +struct SSABlockData { + // The predecessors of the Block with the block and branch instruction. + predecessors: EntityList, + // A block is sealed if all of its predecessors have been declared. + sealed: Sealed, + // If this block is sealed and it has exactly one predecessor, this is that predecessor. + single_predecessor: PackedOption, +} + +impl SSABuilder { + /// Clears a `SSABuilder` from all its data, letting it in a pristine state without + /// deallocating memory. + pub fn clear(&mut self) { + self.variables.clear(); + self.ssa_blocks.clear(); + self.variable_pool.clear(); + self.inst_pool.clear(); + debug_assert!(self.calls.is_empty()); + debug_assert!(self.results.is_empty()); + debug_assert!(self.side_effects.is_empty()); + } + + /// Tests whether an `SSABuilder` is in a cleared state. + pub fn is_empty(&self) -> bool { + self.variables.is_empty() + && self.ssa_blocks.is_empty() + && self.calls.is_empty() + && self.results.is_empty() + && self.side_effects.is_empty() + } +} + +/// States for the `use_var`/`predecessors_lookup` state machine. +enum Call { + UseVar(Inst), + FinishPredecessorsLookup(Value, Block), +} + +/// Emit instructions to produce a zero value in the given type. +fn emit_zero(_ty: &Type, //, mut cur: FuncCursor +) -> Value { + todo!("emit zero value at the beginning of a block") +} + +/// The following methods are the API of the SSA builder. Here is how it should be used when +/// translating to Miden IR: +/// +/// - for each basic block, create a corresponding data for SSA construction with `declare_block`; +/// +/// - while traversing a basic block and translating instruction, use `def_var` and `use_var` +/// to record definitions and uses of variables, these methods will give you the corresponding +/// SSA values; +/// +/// - when all the instructions in a basic block have translated, the block is said _filled_ and +/// only then you can add it as a predecessor to other blocks with `declare_block_predecessor`; +/// +/// - when you have constructed all the predecessor to a basic block, +/// call `seal_block` on it with the `Function` that you are building. +/// +/// This API will give you the correct SSA values to use as arguments of your instructions, +/// as well as modify the jump instruction and `Block` parameters to account for the SSA +/// Phi functions. +/// +impl SSABuilder { + /// Declares a new definition of a variable in a given basic block. + pub fn def_var(&mut self, var: Variable, val: Value, block: Block) { + self.variables[var][block] = PackedOption::from(val); + } + + /// Declares a use of a variable in a given basic block. Returns the SSA value corresponding + /// to the current SSA definition of this variable and a list of newly created Blocks + /// + /// If the variable has never been defined in this blocks or recursively in its predecessors, + /// this method will silently create an initializer. You are + /// responsible for making sure that you initialize your variables. + pub fn use_var( + &mut self, + dfg: &mut DataFlowGraph, + var: Variable, + ty: Type, + block: Block, + ) -> (Value, SideEffects) { + debug_assert!(self.calls.is_empty()); + debug_assert!(self.results.is_empty()); + debug_assert!(self.side_effects.is_empty()); + + // Prepare the 'calls' and 'results' stacks for the state machine. + self.use_var_nonlocal(dfg, var, ty.clone(), block); + let value = self.run_state_machine(dfg, var, ty); + + let side_effects = mem::take(&mut self.side_effects); + (value, side_effects) + } + + /// Resolve the minimal SSA Value of `var` in `block` by traversing predecessors. + /// + /// This function sets up state for `run_state_machine()` but does not execute it. + fn use_var_nonlocal( + &mut self, + dfg: &mut DataFlowGraph, + var: Variable, + ty: Type, + mut block: Block, + ) { + // First, try Local Value Numbering (Algorithm 1 in the paper). + // If the variable already has a known Value in this block, use that. + if let Some(val) = self.variables[var][block].expand() { + self.results.push(val); + return; + } + + // Otherwise, use Global Value Numbering (Algorithm 2 in the paper). + // This resolves the Value with respect to its predecessors. + // Find the most recent definition of `var`, and the block the definition comes from. + let (val, from) = self.find_var(dfg, var, ty, block); + + // The `from` block returned from `find_var` is guaranteed to be on the path we follow by + // traversing only single-predecessor edges. It might be equal to `block` if there is no + // such path, but in that case `find_var` ensures that the variable is defined in this block + // by a new block parameter. It also might be somewhere in a cycle, but even then this loop + // will terminate the first time it encounters that block, rather than continuing around the + // cycle forever. + // + // Why is it okay to copy the definition to all intervening blocks? For the initial block, + // this may not be the final definition of this variable within this block, but if we've + // gotten here then we know there is no earlier definition in the block already. + // + // For the remaining blocks: Recall that a block is only allowed to be set as a predecessor + // after all its instructions have already been filled in, so when we follow a predecessor + // edge to a block, we know there will never be any more local variable definitions added to + // that block. We also know that `find_var` didn't find a definition for this variable in + // any of the blocks before `from`. + // + // So in either case there is no definition in these blocks yet and we can blindly set one. + let var_defs = &mut self.variables[var]; + while block != from { + debug_assert!(var_defs[block].is_none()); + var_defs[block] = PackedOption::from(val); + block = self.ssa_blocks[block].single_predecessor.unwrap(); + } + } + + /// Find the most recent definition of this variable, returning both the definition and the + /// block in which it was found. If we can't find a definition that's provably the right one for + /// all paths to the current block, then append a block parameter to some block and use that as + /// the definition. Either way, also arrange that the definition will be on the `results` stack + /// when `run_state_machine` is done processing the current step. + /// + /// If a block has exactly one predecessor, and the block is sealed so we know its predecessors + /// will never change, then its definition for this variable is the same as the definition from + /// that one predecessor. In this case it's easy to see that no block parameter is necessary, + /// but we need to look at the predecessor to see if a block parameter might be needed there. + /// That holds transitively across any chain of sealed blocks with exactly one predecessor each. + /// + /// This runs into a problem, though, if such a chain has a cycle: Blindly following a cyclic + /// chain that never defines this variable would lead to an infinite loop in the compiler. It + /// doesn't really matter what code we generate in that case. Since each block in the cycle has + /// exactly one predecessor, there's no way to enter the cycle from the function's entry block; + /// and since all blocks in the cycle are sealed, the entire cycle is permanently dead code. But + /// we still have to prevent the possibility of an infinite loop. + /// + /// To break cycles, we can pick any block within the cycle as the one where we'll add a block + /// parameter. It's convenient to pick the block at which we entered the cycle, because that's + /// the first place where we can detect that we just followed a cycle. Adding a block parameter + /// gives us a definition we can reuse throughout the rest of the cycle. + fn find_var( + &mut self, + dfg: &mut DataFlowGraph, + var: Variable, + ty: Type, + mut block: Block, + ) -> (Value, Block) { + // Try to find an existing definition along single-predecessor edges first. + self.visited.clear(); + let var_defs = &mut self.variables[var]; + while let Some(pred) = self.ssa_blocks[block].single_predecessor.expand() { + if !self.visited.insert(block) { + break; + } + block = pred; + if let Some(val) = var_defs[block].expand() { + self.results.push(val); + return (val, block); + } + } + + // We've promised to return the most recent block where `var` was defined, but we didn't + // find a usable definition. So create one. + let val = dfg.append_block_param(block, ty, SourceSpan::default()); + var_defs[block] = PackedOption::from(val); + + // Now every predecessor needs to pass its definition of this variable to the newly added + // block parameter. To do that we have to "recursively" call `use_var`, but there are two + // problems with doing that. First, we need to keep a fixed bound on stack depth, so we + // can't actually recurse; instead we defer to `run_state_machine`. Second, if we don't + // know all our predecessors yet, we have to defer this work until the block gets sealed. + match &mut self.ssa_blocks[block].sealed { + // Once all the `calls` added here complete, this leaves either `val` or an equivalent + // definition on the `results` stack. + Sealed::Yes => self.begin_predecessors_lookup(val, block), + Sealed::No { undef_variables } => { + undef_variables.push(var, &mut self.variable_pool); + self.results.push(val); + } + } + (val, block) + } + + /// Declares a new basic block to construct corresponding data for SSA construction. + /// No predecessors are declared here and the block is not sealed. + /// Predecessors have to be added with `declare_block_predecessor`. + pub fn declare_block(&mut self, block: Block) { + // Ensure the block exists so seal_one_block will see it even if no predecessors or + // variables get declared for this block. But don't assign anything to it: + // SecondaryMap automatically sets all blocks to `default()`. + let _ = &mut self.ssa_blocks[block]; + } + + /// Declares a new predecessor for a `Block` and record the branch instruction + /// of the predecessor that leads to it. + /// + /// The precedent `Block` must be filled before added as predecessor. + /// Note that you must provide no jump arguments to the branch + /// instruction when you create it since `SSABuilder` will fill them for you. + /// + /// Callers are expected to avoid adding the same predecessor more than once in the case + /// of a jump table. + pub fn declare_block_predecessor(&mut self, block: Block, inst: Inst) { + debug_assert!( + !self.is_sealed(block), + "you cannot add a predecessor to a sealed block" + ); + debug_assert!( + self.ssa_blocks[block] + .predecessors + .as_slice(&self.inst_pool) + .iter() + .all(|&branch| branch != inst), + "you have declared the same predecessor twice!" + ); + self.ssa_blocks[block] + .predecessors + .push(inst, &mut self.inst_pool); + } + + /// Remove a previously declared Block predecessor by giving a reference to the jump + /// instruction. Returns the basic block containing the instruction. + /// + /// Note: use only when you know what you are doing, this might break the SSA building problem + pub fn remove_block_predecessor(&mut self, block: Block, inst: Inst) { + debug_assert!(!self.is_sealed(block)); + let data = &mut self.ssa_blocks[block]; + let pred = data + .predecessors + .as_slice(&self.inst_pool) + .iter() + .position(|&branch| branch == inst) + .expect("the predecessor you are trying to remove is not declared"); + data.predecessors.swap_remove(pred, &mut self.inst_pool); + } + + /// Completes the global value numbering for a `Block`, all of its predecessors having been + /// already sealed. + /// + /// This method modifies the function's `Layout` by adding arguments to the `Block`s to + /// take into account the Phi function placed by the SSA algorithm. + /// + /// Returns the list of newly created blocks for critical edge splitting. + pub fn seal_block(&mut self, block: Block, dfg: &mut DataFlowGraph) -> SideEffects { + debug_assert!( + !self.is_sealed(block), + "Attempting to seal {} which is already sealed.", + block + ); + self.seal_one_block(block, dfg); + mem::take(&mut self.side_effects) + } + + /// Helper function for `seal_block` + fn seal_one_block(&mut self, block: Block, dfg: &mut DataFlowGraph) { + // For each undef var we look up values in the predecessors and create a block parameter + // only if necessary. + let mut undef_variables = + match mem::replace(&mut self.ssa_blocks[block].sealed, Sealed::Yes) { + Sealed::No { undef_variables } => undef_variables, + Sealed::Yes => return, + }; + let ssa_params = undef_variables.len(&self.variable_pool); + + let predecessors = self.predecessors(block); + if predecessors.len() == 1 { + let pred = dfg.insts[predecessors[0]].block; + self.ssa_blocks[block].single_predecessor = PackedOption::from(pred); + } + + // Note that begin_predecessors_lookup requires visiting these variables in the same order + // that they were defined by find_var, because it appends arguments to the jump instructions + // in all the predecessor blocks one variable at a time. + for idx in 0..ssa_params { + let var = undef_variables.get(idx, &self.variable_pool).unwrap(); + + // We need the temporary Value that was assigned to this Variable. If that Value shows + // up as a result from any of our predecessors, then it never got assigned on the loop + // through that block. We get the value from the next block param, where it was first + // allocated in find_var. + let block_params = dfg.block_params(block); + + // On each iteration through this loop, there are (ssa_params - idx) undefined variables + // left to process. Previous iterations through the loop may have removed earlier block + // parameters, but the last (ssa_params - idx) block parameters always correspond to the + // remaining undefined variables. So index from the end of the current block params. + let val = block_params[block_params.len() - (ssa_params - idx)]; + + debug_assert!(self.calls.is_empty()); + debug_assert!(self.results.is_empty()); + // self.side_effects may be non-empty here so that callers can + // accumulate side effects over multiple calls. + self.begin_predecessors_lookup(val, block); + self.run_state_machine(dfg, var, dfg.value_type(val).clone()); + } + + undef_variables.clear(&mut self.variable_pool); + } + + /// Given the local SSA Value of a Variable in a Block, perform a recursive lookup on + /// predecessors to determine if it is redundant with another Value earlier in the CFG. + /// + /// If such a Value exists and is redundant, the local Value is replaced by the + /// corresponding non-local Value. If the original Value was a Block parameter, + /// the parameter may be removed if redundant. Parameters are placed eagerly by callers + /// to avoid infinite loops when looking up a Value for a Block that is in a CFG loop. + /// + /// Doing this lookup for each Value in each Block preserves SSA form during construction. + /// + /// ## Arguments + /// + /// `sentinel` is a dummy Block parameter inserted by `use_var_nonlocal()`. + /// Its purpose is to allow detection of CFG cycles while traversing predecessors. + fn begin_predecessors_lookup(&mut self, sentinel: Value, dest_block: Block) { + self.calls + .push(Call::FinishPredecessorsLookup(sentinel, dest_block)); + // Iterate over the predecessors. + self.calls.extend( + self.ssa_blocks[dest_block] + .predecessors + .as_slice(&self.inst_pool) + .iter() + .rev() + .copied() + .map(Call::UseVar), + ); + } + + /// Examine the values from the predecessors and compute a result value, creating + /// block parameters as needed. + fn finish_predecessors_lookup( + &mut self, + dfg: &mut DataFlowGraph, + sentinel: Value, + dest_block: Block, + ) -> Value { + // Determine how many predecessors are yielding unique, non-temporary Values. + let num_predecessors = self.predecessors(dest_block).len(); + // When this `Drain` is dropped, these elements will get truncated. + let results = self.results.drain(self.results.len() - num_predecessors..); + + let pred_val = { + let mut iter = results.as_slice().iter().filter(|&val| val != &sentinel); + if let Some(val) = iter.next() { + // This variable has at least one non-temporary definition. If they're all the same + // value, we can remove the block parameter and reference that value instead. + if iter.all(|other| other == val) { + Some(*val) + } else { + None + } + } else { + // The variable is used but never defined before. This is an irregularity in the + // code, but rather than throwing an error we silently initialize the variable to + // 0. This will have no effect since this situation happens in unreachable code. + if !dfg.is_block_inserted(dest_block) { + dfg.append_block(dest_block); + } + self.side_effects + .instructions_added_to_blocks + .push(dest_block); + let zero = emit_zero( + dfg.value_type(sentinel), + // FuncCursor::new(func).at_first_insertion_point(dest_block), + ); + Some(zero) + } + }; + + if let Some(pred_val) = pred_val { + // Here all the predecessors use a single value to represent our variable + // so we don't need to have it as a block argument. + dfg.remove_block_param(sentinel); + pred_val + } else { + // There is disagreement in the predecessors on which value to use so we have + // to keep the block argument. + let mut preds = self.ssa_blocks[dest_block].predecessors; + for (idx, &val) in results.as_slice().iter().enumerate() { + let pred = preds.get_mut(idx, &mut self.inst_pool).unwrap(); + let branch = *pred; + assert!( + dfg.insts[branch].opcode().is_branch(), + "you have declared a non-branch instruction as a predecessor to a block!" + ); + dfg.append_branch_destination_argument(branch, dest_block, val); + } + sentinel + } + } + + /// Returns the list of `Block`s that have been declared as predecessors of the argument. + fn predecessors(&self, block: Block) -> &[Inst] { + self.ssa_blocks[block] + .predecessors + .as_slice(&self.inst_pool) + } + + /// Returns whether the given Block has any predecessor or not. + pub fn has_any_predecessors(&self, block: Block) -> bool { + !self.predecessors(block).is_empty() + } + + /// Returns `true` if and only if `seal_block` has been called on the argument. + pub fn is_sealed(&self, block: Block) -> bool { + matches!(self.ssa_blocks[block].sealed, Sealed::Yes) + } + + /// The main algorithm is naturally recursive: when there's a `use_var` in a + /// block with no corresponding local defs, it recurses and performs a + /// `use_var` in each predecessor. To avoid risking running out of callstack + /// space, we keep an explicit stack and use a small state machine rather + /// than literal recursion. + fn run_state_machine(&mut self, func: &mut DataFlowGraph, var: Variable, ty: Type) -> Value { + // Process the calls scheduled in `self.calls` until it is empty. + while let Some(call) = self.calls.pop() { + match call { + Call::UseVar(branch) => { + let block = func.insts[branch].block; + self.use_var_nonlocal(func, var, ty.clone(), block); + } + Call::FinishPredecessorsLookup(sentinel, dest_block) => { + let val = self.finish_predecessors_lookup(func, sentinel, dest_block); + self.results.push(val); + } + } + } + debug_assert_eq!(self.results.len(), 1); + self.results.pop().unwrap() + } +} diff --git a/frontend-wasm/src/test_utils.rs b/frontend-wasm/src/test_utils.rs new file mode 100644 index 00000000..63b96d07 --- /dev/null +++ b/frontend-wasm/src/test_utils.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use miden_diagnostics::term::termcolor::ColorChoice; +use miden_diagnostics::CodeMap; +use miden_diagnostics::DiagnosticsConfig; +use miden_diagnostics::DiagnosticsHandler; +use miden_diagnostics::Emitter; +use miden_diagnostics::NullEmitter; +use miden_diagnostics::Verbosity; + +pub fn default_emitter(verbosity: Verbosity, color: ColorChoice) -> Arc { + match verbosity { + _ => Arc::new(NullEmitter::new(color)), + } +} + +pub fn test_diagnostics() -> DiagnosticsHandler { + let codemap = Arc::new(CodeMap::new()); + let diagnostics = DiagnosticsHandler::new( + DiagnosticsConfig { + verbosity: Verbosity::Debug, + warnings_as_errors: false, + no_warn: false, + display: Default::default(), + }, + codemap, + default_emitter(Verbosity::Debug, ColorChoice::Auto), + ); + diagnostics +} diff --git a/frontend-wasm/src/translation_utils.rs b/frontend-wasm/src/translation_utils.rs new file mode 100644 index 00000000..0115e0e2 --- /dev/null +++ b/frontend-wasm/src/translation_utils.rs @@ -0,0 +1,57 @@ +//! Helper functions and structures for the translation. + +use miden_diagnostics::SourceSpan; +use miden_hir::{AbiParam, CallConv, InstBuilder, Linkage, Signature, Value}; +use miden_hir_type::{FunctionType, Type}; + +use crate::function_builder_ext::FunctionBuilderExt; + +/// Emit instructions to produce a zero value in the given type. +pub fn emit_zero(ty: &Type, builder: &mut FunctionBuilderExt) -> Value { + match ty { + Type::I1 => builder.ins().i1(false, SourceSpan::default()), + Type::I8 => builder.ins().i8(0, SourceSpan::default()), + Type::I16 => builder.ins().i16(0, SourceSpan::default()), + Type::I32 => builder.ins().i32(0, SourceSpan::default()), + Type::I64 => builder.ins().i64(0, SourceSpan::default()), + Type::I128 => todo!(), + Type::U256 => todo!(), + Type::U8 => todo!(), + Type::U16 => todo!(), + Type::U32 => todo!(), + Type::U64 => todo!(), + Type::U128 => todo!(), + Type::F64 => builder.ins().f64(0.0, SourceSpan::default()), + Type::Felt => todo!(), + Type::Ptr(_) + | Type::NativePtr(_, _) + | Type::Struct(_) + | Type::Array(_, _) + | Type::Unknown + | Type::Unit + | Type::Never => { + panic!("cannot emit zero for type: {:?}", ty); + } + } +} + +pub fn sig_from_funct_type( + func_type: &FunctionType, + call_conv: CallConv, + linkage: Linkage, +) -> Signature { + Signature { + params: func_type + .params + .iter() + .map(|ty| AbiParam::new(ty.clone())) + .collect(), + results: func_type + .results + .iter() + .map(|ty| AbiParam::new(ty.clone())) + .collect(), + cc: call_conv, + linkage, + } +} diff --git a/frontend-wasm/src/wasm_types.rs b/frontend-wasm/src/wasm_types.rs new file mode 100644 index 00000000..a63e3337 --- /dev/null +++ b/frontend-wasm/src/wasm_types.rs @@ -0,0 +1,241 @@ +//! Internal types for parsed WebAssembly. + +use miden_diagnostics::DiagnosticsHandler; +use miden_hir::cranelift_entity::entity_impl; +use miden_hir::cranelift_entity::PrimaryMap; +use miden_hir_type::FunctionType; +use miden_hir_type::Type; + +use crate::error::WasmError; +use crate::error::WasmResult; +use crate::module_env::ModuleInfo; +use crate::unsupported_diag; + +/// Index type of a function (imported or defined) inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub struct FuncIndex(u32); +entity_impl!(FuncIndex); + +/// Index type of a defined function inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub struct DefinedFuncIndex(u32); +entity_impl!(DefinedFuncIndex); + +/// Index type of a global variable (imported or defined) inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug, derive_more::Display)] +pub struct GlobalIndex(u32); +entity_impl!(GlobalIndex); + +/// Index type of a linear memory (imported or defined) inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub struct MemoryIndex(u32); +entity_impl!(MemoryIndex); + +/// Index type of a type inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub struct TypeIndex(u32); +entity_impl!(TypeIndex); + +/// Index type of a data segment inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] +pub struct DataSegmentIndex(u32); +entity_impl!(DataSegmentIndex); + +/// A WebAssembly global. +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct Global { + /// The Miden IR type of the value stored in the global. + pub ty: Type, + /// A flag indicating whether the value may change at runtime. + pub mutability: bool, + /// The initializer expression (constant). + pub init: GlobalInit, +} + +/// Globals are initialized via the `const` operators or by referring to another import. +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum GlobalInit { + /// An `i32.const`. + I32Const(i32), + /// An `i64.const`. + I64Const(i64), + /// An `f32.const`. + F32Const(u32), + /// An `f64.const`. + F64Const(u64), + /// A `global.get` of another global. + GetGlobal(GlobalIndex), +} + +impl GlobalInit { + /// Serialize the initializer constant expression into bytes (little-endian order). + pub fn to_le_bytes(self, globals: &PrimaryMap) -> Vec { + match self { + GlobalInit::I32Const(x) => x.to_le_bytes().to_vec(), + GlobalInit::I64Const(x) => x.to_le_bytes().to_vec(), + GlobalInit::F32Const(x) => x.to_le_bytes().to_vec(), + GlobalInit::F64Const(x) => x.to_le_bytes().to_vec(), + GlobalInit::GetGlobal(global_idx) => { + let global = &globals[global_idx]; + global.init.to_le_bytes(globals) + } + } + } + + pub fn as_i32( + &self, + globals: &PrimaryMap, + diagnostics: &DiagnosticsHandler, + ) -> WasmResult { + Ok(match self { + GlobalInit::I32Const(x) => *x, + GlobalInit::GetGlobal(global_idx) => { + let global = &globals[*global_idx]; + global.init.as_i32(globals, diagnostics)? + } + g => { + unsupported_diag!(diagnostics, "Expected global init to be i32, got: {:?}", g); + } + }) + } +} + +/// WebAssembly linear memory. +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub struct Memory { + /// The minimum number of pages in the memory. + pub minimum: u64, + /// The maximum number of pages in the memory. + pub maximum: Option, + /// Whether or not this is a 64-bit memory + pub memory64: bool, +} + +impl From for Memory { + fn from(ty: wasmparser::MemoryType) -> Memory { + Memory { + minimum: ty.initial, + maximum: ty.maximum, + memory64: ty.memory64, + } + } +} + +/// Offset of a data segment inside a linear memory. +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub enum DataSegmentOffset { + /// An `i32.const` offset. + I32Const(i32), + /// An offset as a `global.get` of another global. + GetGlobal(GlobalIndex), +} + +impl DataSegmentOffset { + /// Returns the offset as a i32, resolving the global if necessary. + pub fn as_i32( + &self, + globals: &PrimaryMap, + diagnostics: &DiagnosticsHandler, + ) -> WasmResult { + Ok(match self { + DataSegmentOffset::I32Const(x) => *x, + DataSegmentOffset::GetGlobal(global_idx) => { + let global = &globals[*global_idx]; + match global.init.as_i32(globals, diagnostics) { + Err(e) => { + diagnostics + .diagnostic(miden_diagnostics::Severity::Error) + .with_message(format!( + "Failed to get data segment offset from global init {:?} with global index {global_idx}", + global.init, + )) + .emit(); + return Err(e); + } + Ok(v) => v, + } + } + }) + } +} + +/// A WebAssembly data segment. +/// https://www.w3.org/TR/wasm-core-1/#data-segments%E2%91%A0 +pub struct DataSegment<'a> { + /// The offset of the data segment inside the linear memory. + pub offset: DataSegmentOffset, + /// The initialization data. + pub data: &'a [u8], +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +pub struct BlockType { + pub params: Vec, + pub results: Vec, +} + +impl BlockType { + pub fn from_wasm( + block_ty: &wasmparser::BlockType, + module_info: &ModuleInfo, + ) -> WasmResult { + Ok(match block_ty { + wasmparser::BlockType::Empty => Self::default(), + wasmparser::BlockType::Type(ty) => Self { + params: vec![], + results: vec![valtype_to_type(ty)?], + }, + wasmparser::BlockType::FuncType(ty_index) => { + let func_type = &module_info.func_types[TypeIndex::from_u32(*ty_index)]; + Self { + params: func_type.params.clone(), + results: func_type.results.clone(), + } + } + }) + } +} + +pub fn convert_global_type(ty: &wasmparser::GlobalType, init: GlobalInit) -> WasmResult { + Ok(Global { + ty: valtype_to_type(&ty.content_type)?, + mutability: ty.mutable, + init, + }) +} + +/// Converts a wasmparser function type into a Miden IR function type +pub fn convert_func_type(ty: &wasmparser::FuncType) -> WasmResult { + let params = ty + .params() + .iter() + .map(|t| valtype_to_type(t)) + .collect::>>()?; + let results = ty + .results() + .iter() + .map(|t| valtype_to_type(t)) + .collect::>>()?; + Ok(FunctionType { results, params }) +} + +pub fn valtype_to_type(ty: &wasmparser::ValType) -> WasmResult { + Ok(match ty { + wasmparser::ValType::I32 => Type::I32, + wasmparser::ValType::I64 => Type::I64, + wasmparser::ValType::F32 => { + todo!("no f32 type in Miden IR") + } + wasmparser::ValType::F64 => Type::F64, + wasmparser::ValType::V128 => { + return Err(WasmError::Unsupported( + "V128 type is not supported".to_string(), + )); + } + wasmparser::ValType::Ref(_) => { + return Err(WasmError::Unsupported( + "Ref type is not supported".to_string(), + )); + } + }) +} diff --git a/frontend-wasm/tests/expected/dlmalloc.mir b/frontend-wasm/tests/expected/dlmalloc.mir new file mode 100644 index 00000000..994517e5 --- /dev/null +++ b/frontend-wasm/tests/expected/dlmalloc.mir @@ -0,0 +1,5743 @@ +module noname +global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; +global external gv1 : i32 = 0x001001c8 { id = gvar1 }; +global external gv2 : i32 = 0x001001d0 { id = gvar2 }; + + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = const.i32 0 : i32 + v4 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1, v2) : i32 + v5 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6pinuse17h92d5107047b03ba7E(v1) : i32 + v6 = const.i32 0 : i32 + v7 = neq v5, v6 : i1 + condbr v7, block4(v1, v2), block5 + +block1: + ret + +block2(v221: i32, v229: i32, v230: i32): + v222 = const.i32 256 : i32 + v223 = cast v221 : u32 + v224 = cast v222 : u32 + v225 = lt v223, v224 : i1 + v226 = cast v225 : i32 + v227 = const.i32 0 : i32 + v228 = neq v226, v227 : i1 + condbr v228, block29, block30 + +block3: + ret + +block4(v103: i32, v104: i32): + v97 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E(v4) : i32 + v98 = const.i32 0 : i32 + v99 = eq v97, v98 : i1 + v100 = cast v99 : i32 + v101 = const.i32 0 : i32 + v102 = neq v100, v101 : i1 + condbr v102, block16, block17 + +block5: + v8 = cast v1 : u32 + v9 = inttoptr v8 : *mut i32 + v10 = load v9 : i32 + v11 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v1) : i32 + v12 = const.i32 0 : i32 + v13 = neq v11, v12 : i1 + condbr v13, block6, block7 + +block6: + v78 = sub v1, v10 : i32 + v79 = add v2, v10 : i32 + v80 = const.i32 16 : i32 + v81 = add v79, v80 : i32 + v82 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE(v0, v78, v81) : i32 + v83 = const.i32 0 : i32 + v84 = eq v82, v83 : i1 + v85 = cast v84 : i32 + v86 = const.i32 0 : i32 + v87 = neq v85, v86 : i1 + condbr v87, block3, block15 + +block7: + v14 = add v10, v2 : i32 + v15 = call noname::_ZN8dlmalloc8dlmalloc5Chunk12minus_offset17h7c3eec81761249d9E(v1, v10) : i32 + v16 = cast v0 : u32 + v17 = add v16, 424 : u32 + v18 = inttoptr v17 : *mut i32 + v19 = load v18 : i32 + v20 = neq v15, v19 : i1 + v21 = cast v20 : i32 + v22 = const.i32 0 : i32 + v23 = neq v21, v22 : i1 + condbr v23, block8, block9 + +block8: + v38 = const.i32 256 : i32 + v39 = cast v10 : u32 + v40 = cast v38 : u32 + v41 = lt v39, v40 : i1 + v42 = cast v41 : i32 + v43 = const.i32 0 : i32 + v44 = neq v42, v43 : i1 + condbr v44, block11, block12 + +block9: + v24 = cast v4 : u32 + v25 = add v24, 4 : u32 + v26 = inttoptr v25 : *mut i32 + v27 = load v26 : i32 + v28 = const.i32 3 : i32 + v29 = band v27, v28 : i32 + v30 = const.i32 3 : i32 + v31 = neq v29, v30 : i1 + v32 = cast v31 : i32 + v33 = const.i32 0 : i32 + v34 = neq v32, v33 : i1 + condbr v34, block4(v15, v14), block10 + +block10: + v35 = cast v0 : u32 + v36 = add v35, 416 : u32 + v37 = inttoptr v36 : *mut i32 + store v37, v14 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(v15, v14, v4) + ret + +block11: + v45 = cast v15 : u32 + v46 = add v45, 12 : u32 + v47 = inttoptr v46 : *mut i32 + v48 = load v47 : i32 + v49 = cast v15 : u32 + v50 = add v49, 8 : u32 + v51 = inttoptr v50 : *mut i32 + v52 = load v51 : i32 + v53 = eq v48, v52 : i1 + v54 = cast v53 : i32 + v55 = const.i32 0 : i32 + v56 = neq v54, v55 : i1 + condbr v56, block13, block14 + +block12: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v0, v15) + br block4(v15, v14) + +block13: + v63 = cast v0 : u32 + v64 = add v63, 408 : u32 + v65 = inttoptr v64 : *mut i32 + v66 = load v65 : i32 + v67 = const.i32 -2 : i32 + v68 = const.i32 3 : i32 + v69 = cast v10 : u32 + v70 = cast v68 : u32 + v71 = shr v69, v70 : u32 + v72 = cast v71 : i32 + v73 = shl v67, v72 : i32 + v74 = band v66, v73 : i32 + v75 = cast v0 : u32 + v76 = add v75, 408 : u32 + v77 = inttoptr v76 : *mut i32 + store v77, v74 + br block4(v15, v14) + +block14: + v57 = cast v52 : u32 + v58 = add v57, 12 : u32 + v59 = inttoptr v58 : *mut i32 + store v59, v48 + v60 = cast v48 : u32 + v61 = add v60, 8 : u32 + v62 = inttoptr v61 : *mut i32 + store v62, v52 + br block4(v15, v14) + +block15: + v88 = cast v0 : u32 + v89 = add v88, 432 : u32 + v90 = inttoptr v89 : *mut i32 + v91 = load v90 : i32 + v92 = sub v91, v81 : i32 + v93 = cast v0 : u32 + v94 = add v93, 432 : u32 + v95 = inttoptr v94 : *mut i32 + store v95, v92 + ret + +block16: + v106 = cast v0 : u32 + v107 = add v106, 428 : u32 + v108 = inttoptr v107 : *mut i32 + v109 = load v108 : i32 + v110 = eq v96, v109 : i1 + v111 = cast v110 : i32 + v112 = const.i32 0 : i32 + v113 = neq v111, v112 : i1 + condbr v113, block19, block20 + +block17: + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(v103, v104, v96) + br block2(v104, v105, v103) + +block18: + v210 = cast v105 : u32 + v211 = add v210, 424 : u32 + v212 = inttoptr v211 : *mut i32 + store v212, v103 + v213 = cast v105 : u32 + v214 = add v213, 416 : u32 + v215 = inttoptr v214 : *mut i32 + v216 = load v215 : i32 + v217 = add v216, v104 : i32 + v218 = cast v105 : u32 + v219 = add v218, 416 : u32 + v220 = inttoptr v219 : *mut i32 + store v220, v217 + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v103, v217) + ret + +block19: + v178 = cast v105 : u32 + v179 = add v178, 428 : u32 + v180 = inttoptr v179 : *mut i32 + store v180, v103 + v181 = cast v105 : u32 + v182 = add v181, 420 : u32 + v183 = inttoptr v182 : *mut i32 + v184 = load v183 : i32 + v185 = add v184, v104 : i32 + v186 = cast v105 : u32 + v187 = add v186, 420 : u32 + v188 = inttoptr v187 : *mut i32 + store v188, v185 + v189 = const.i32 1 : i32 + v190 = bor v185, v189 : i32 + v191 = cast v103 : u32 + v192 = add v191, 4 : u32 + v193 = inttoptr v192 : *mut i32 + store v193, v190 + v194 = cast v105 : u32 + v195 = add v194, 424 : u32 + v196 = inttoptr v195 : *mut i32 + v197 = load v196 : i32 + v198 = neq v103, v197 : i1 + v199 = cast v198 : i32 + v200 = const.i32 0 : i32 + v201 = neq v199, v200 : i1 + condbr v201, block3, block28 + +block20: + v114 = cast v105 : u32 + v115 = add v114, 424 : u32 + v116 = inttoptr v115 : *mut i32 + v117 = load v116 : i32 + v118 = eq v96, v117 : i1 + v119 = cast v118 : i32 + v120 = const.i32 0 : i32 + v121 = neq v119, v120 : i1 + condbr v121, block18, block21 + +block21: + v122 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v96) : i32 + v123 = add v122, v104 : i32 + v124 = const.i32 256 : i32 + v125 = cast v122 : u32 + v126 = cast v124 : u32 + v127 = lt v125, v126 : i1 + v128 = cast v127 : i32 + v129 = const.i32 0 : i32 + v130 = neq v128, v129 : i1 + condbr v130, block23, block24 + +block22: + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v103, v123) + v167 = cast v105 : u32 + v168 = add v167, 424 : u32 + v169 = inttoptr v168 : *mut i32 + v170 = load v169 : i32 + v171 = neq v164, v170 : i1 + v172 = cast v171 : i32 + v173 = const.i32 0 : i32 + v174 = neq v172, v173 : i1 + condbr v174, block2(v165, v166, v164), block27 + +block23: + v131 = cast v96 : u32 + v132 = add v131, 12 : u32 + v133 = inttoptr v132 : *mut i32 + v134 = load v133 : i32 + v135 = cast v96 : u32 + v136 = add v135, 8 : u32 + v137 = inttoptr v136 : *mut i32 + v138 = load v137 : i32 + v139 = eq v134, v138 : i1 + v140 = cast v139 : i32 + v141 = const.i32 0 : i32 + v142 = neq v140, v141 : i1 + condbr v142, block25, block26 + +block24: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v105, v96) + br block22 + +block25: + v149 = cast v105 : u32 + v150 = add v149, 408 : u32 + v151 = inttoptr v150 : *mut i32 + v152 = load v151 : i32 + v153 = const.i32 -2 : i32 + v154 = const.i32 3 : i32 + v155 = cast v122 : u32 + v156 = cast v154 : u32 + v157 = shr v155, v156 : u32 + v158 = cast v157 : i32 + v159 = shl v153, v158 : i32 + v160 = band v152, v159 : i32 + v161 = cast v105 : u32 + v162 = add v161, 408 : u32 + v163 = inttoptr v162 : *mut i32 + store v163, v160 + br block22 + +block26: + v143 = cast v138 : u32 + v144 = add v143, 12 : u32 + v145 = inttoptr v144 : *mut i32 + store v145, v134 + v146 = cast v134 : u32 + v147 = add v146, 8 : u32 + v148 = inttoptr v147 : *mut i32 + store v148, v138 + br block22 + +block27: + v175 = cast v166 : u32 + v176 = add v175, 416 : u32 + v177 = inttoptr v176 : *mut i32 + store v177, v165 + br block3 + +block28: + v202 = const.i32 0 : i32 + v203 = cast v105 : u32 + v204 = add v203, 416 : u32 + v205 = inttoptr v204 : *mut i32 + store v205, v202 + v206 = const.i32 0 : i32 + v207 = cast v105 : u32 + v208 = add v207, 424 : u32 + v209 = inttoptr v208 : *mut i32 + store v209, v206 + ret + +block29: + v231 = const.i32 -8 : i32 + v232 = band v221, v231 : i32 + v233 = add v229, v232 : i32 + v234 = const.i32 144 : i32 + v235 = add v233, v234 : i32 + v236 = cast v229 : u32 + v237 = add v236, 408 : u32 + v238 = inttoptr v237 : *mut i32 + v239 = load v238 : i32 + v240 = const.i32 1 : i32 + v241 = const.i32 3 : i32 + v242 = cast v221 : u32 + v243 = cast v241 : u32 + v244 = shr v242, v243 : u32 + v245 = cast v244 : i32 + v246 = shl v240, v245 : i32 + v247 = band v239, v246 : i32 + v248 = const.i32 0 : i32 + v249 = eq v247, v248 : i1 + v250 = cast v249 : i32 + v251 = const.i32 0 : i32 + v252 = neq v250, v251 : i1 + condbr v252, block32, block33 + +block30: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(v229, v230, v221) + ret + +block31(v266: i32): + v263 = cast v235 : u32 + v264 = add v263, 8 : u32 + v265 = inttoptr v264 : *mut i32 + store v265, v230 + v267 = cast v266 : u32 + v268 = add v267, 12 : u32 + v269 = inttoptr v268 : *mut i32 + store v269, v262 + v270 = cast v262 : u32 + v271 = add v270, 12 : u32 + v272 = inttoptr v271 : *mut i32 + store v272, v261 + v273 = cast v262 : u32 + v274 = add v273, 8 : u32 + v275 = inttoptr v274 : *mut i32 + store v275, v266 + br block1 + +block32: + v257 = bor v239, v246 : i32 + v258 = cast v229 : u32 + v259 = add v258, 408 : u32 + v260 = inttoptr v259 : *mut i32 + store v260, v257 + br block31(v235) + +block33: + v253 = cast v235 : u32 + v254 = add v253, 8 : u32 + v255 = inttoptr v254 : *mut i32 + v256 = load v255 : i32 + br block31(v256) +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i32 0 : i32 + v3 = cast v1 : u32 + v4 = add v3, 24 : u32 + v5 = inttoptr v4 : *mut i32 + v6 = load v5 : i32 + v7 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk4next17he250edbec5d87123E(v1) : i32 + v8 = neq v7, v1 : i1 + v9 = cast v8 : i32 + v10 = const.i32 0 : i32 + v11 = neq v9, v10 : i1 + condbr v11, block4, block5 + +block1: + ret + +block2(v107: i32): + v72 = const.i32 0 : i32 + v73 = eq v6, v72 : i1 + v74 = cast v73 : i32 + v75 = const.i32 0 : i32 + v76 = neq v74, v75 : i1 + condbr v76, block10, block11 + +block3: + v39 = const.i32 16 : i32 + v40 = add v1, v39 : i32 + v41 = const.i32 0 : i32 + v42 = neq v18, v41 : i1 + v43 = select v42, v15, v40 : i32 + br block7(v43, v25) + +block4: + v29 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk4prev17h7a0f1d46544cc14aE(v1) : i32 + v30 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk4next17he250edbec5d87123E(v1) : i32 + v31 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v30) : i32 + v32 = cast v29 : u32 + v33 = add v32, 12 : u32 + v34 = inttoptr v33 : *mut i32 + store v34, v31 + v35 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v29) : i32 + v36 = cast v30 : u32 + v37 = add v36, 8 : u32 + v38 = inttoptr v37 : *mut i32 + store v38, v35 + br block2(v30) + +block5: + v12 = const.i32 20 : i32 + v13 = const.i32 16 : i32 + v14 = const.i32 20 : i32 + v15 = add v1, v14 : i32 + v16 = cast v15 : u32 + v17 = inttoptr v16 : *mut i32 + v18 = load v17 : i32 + v19 = const.i32 0 : i32 + v20 = neq v18, v19 : i1 + v21 = select v20, v12, v13 : i32 + v22 = add v1, v21 : i32 + v23 = cast v22 : u32 + v24 = inttoptr v23 : *mut i32 + v25 = load v24 : i32 + v26 = const.i32 0 : i32 + v27 = neq v25, v26 : i1 + condbr v27, block3, block6 + +block6: + v28 = const.i32 0 : i32 + br block2(v28) + +block7(v44: i32, v45: i32): + v46 = const.i32 20 : i32 + v47 = add v45, v46 : i32 + v48 = const.i32 16 : i32 + v49 = add v45, v48 : i32 + v50 = cast v47 : u32 + v51 = inttoptr v50 : *mut i32 + v52 = load v51 : i32 + v53 = const.i32 0 : i32 + v54 = neq v52, v53 : i1 + v55 = select v54, v47, v49 : i32 + v56 = const.i32 20 : i32 + v57 = const.i32 16 : i32 + v58 = const.i32 0 : i32 + v59 = neq v52, v58 : i1 + v60 = select v59, v56, v57 : i32 + v61 = add v45, v60 : i32 + v62 = cast v61 : u32 + v63 = inttoptr v62 : *mut i32 + v64 = load v63 : i32 + v65 = const.i32 0 : i32 + v66 = neq v64, v65 : i1 + condbr v66, block7(v55, v64), block9 + +block8: + v67 = const.i32 0 : i32 + v68 = cast v44 : u32 + v69 = inttoptr v68 : *mut i32 + store v69, v67 + br block2(v45) + +block9: + br block8 + +block10: + br block1 + +block11: + v81 = cast v1 : u32 + v82 = add v81, 28 : u32 + v83 = inttoptr v82 : *mut i32 + v84 = load v83 : i32 + v85 = const.i32 2 : i32 + v86 = shl v84, v85 : i32 + v87 = add v0, v86 : i32 + v88 = cast v87 : u32 + v89 = inttoptr v88 : *mut i32 + v90 = load v89 : i32 + v91 = eq v90, v79 : i1 + v92 = cast v91 : i32 + v93 = const.i32 0 : i32 + v94 = neq v92, v93 : i1 + condbr v94, block13, block14 + +block12: + v128 = cast v107 : u32 + v129 = add v128, 24 : u32 + v130 = inttoptr v129 : *mut i32 + store v130, v70 + v132 = cast v79 : u32 + v133 = add v132, 16 : u32 + v134 = inttoptr v133 : *mut i32 + v135 = load v134 : i32 + v136 = const.i32 0 : i32 + v137 = eq v135, v136 : i1 + v138 = cast v137 : i32 + v139 = const.i32 0 : i32 + v140 = neq v138, v139 : i1 + condbr v140, block17, block18 + +block13: + v112 = cast v87 : u32 + v113 = inttoptr v112 : *mut i32 + store v113, v107 + v114 = const.i32 0 : i32 + v115 = neq v107, v114 : i1 + condbr v115, block12, block16 + +block14: + v95 = const.i32 16 : i32 + v96 = const.i32 20 : i32 + v97 = cast v70 : u32 + v98 = add v97, 16 : u32 + v99 = inttoptr v98 : *mut i32 + v100 = load v99 : i32 + v101 = eq v100, v79 : i1 + v102 = cast v101 : i32 + v103 = const.i32 0 : i32 + v104 = neq v102, v103 : i1 + v105 = select v104, v95, v96 : i32 + v106 = add v70, v105 : i32 + v108 = cast v106 : u32 + v109 = inttoptr v108 : *mut i32 + store v109, v107 + v110 = const.i32 0 : i32 + v111 = neq v107, v110 : i1 + condbr v111, block12, block15 + +block15: + br block10 + +block16: + v116 = cast v77 : u32 + v117 = add v116, 412 : u32 + v118 = inttoptr v117 : *mut i32 + v119 = load v118 : i32 + v120 = const.i32 -2 : i32 + v121 = shl v120, v84 : i32 + v122 = band v119, v121 : i32 + v123 = cast v77 : u32 + v124 = add v123, 412 : u32 + v125 = inttoptr v124 : *mut i32 + store v125, v122 + ret + +block17: + v148 = const.i32 20 : i32 + v149 = add v131, v148 : i32 + v150 = cast v149 : u32 + v151 = inttoptr v150 : *mut i32 + v152 = load v151 : i32 + v153 = const.i32 0 : i32 + v154 = eq v152, v153 : i1 + v155 = cast v154 : i32 + v156 = const.i32 0 : i32 + v157 = neq v155, v156 : i1 + condbr v157, block10, block19 + +block18: + v141 = cast v126 : u32 + v142 = add v141, 16 : u32 + v143 = inttoptr v142 : *mut i32 + store v143, v135 + v144 = cast v135 : u32 + v145 = add v144, 24 : u32 + v146 = inttoptr v145 : *mut i32 + store v146, v126 + br block17 + +block19: + v159 = const.i32 20 : i32 + v160 = add v126, v159 : i32 + v161 = cast v160 : u32 + v162 = inttoptr v161 : *mut i32 + store v162, v152 + v163 = cast v152 : u32 + v164 = add v163, 24 : u32 + v165 = inttoptr v164 : *mut i32 + store v165, v158 + ret +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = const.i32 0 : i32 + v4 = const.i32 0 : i32 + v5 = const.i32 256 : i32 + v6 = cast v2 : u32 + v7 = cast v5 : u32 + v8 = lt v6, v7 : i1 + v9 = cast v8 : i32 + v10 = const.i32 0 : i32 + v11 = neq v9, v10 : i1 + condbr v11, block2(v4), block3 + +block1: + ret + +block2(v44: i32): + v40 = const.i64 0 : i64 + v41 = cast v1 : u32 + v42 = add v41, 16 : u32 + v43 = inttoptr v42 : *mut i64 + store v43, v40 + v45 = cast v39 : u32 + v46 = add v45, 28 : u32 + v47 = inttoptr v46 : *mut i32 + store v47, v44 + v49 = const.i32 2 : i32 + v50 = shl v44, v49 : i32 + v51 = add v0, v50 : i32 + v52 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v39) : i32 + v53 = cast v48 : u32 + v54 = add v53, 412 : u32 + v55 = inttoptr v54 : *mut i32 + v56 = load v55 : i32 + v57 = const.i32 1 : i32 + v58 = shl v57, v44 : i32 + v59 = band v56, v58 : i32 + v60 = const.i32 0 : i32 + v61 = eq v59, v60 : i1 + v62 = cast v61 : i32 + v63 = const.i32 0 : i32 + v64 = neq v62, v63 : i1 + condbr v64, block6, block7 + +block3: + v12 = const.i32 31 : i32 + v13 = const.i32 16777215 : i32 + v14 = cast v2 : u32 + v15 = cast v13 : u32 + v16 = gt v14, v15 : i1 + v17 = cast v16 : i32 + v18 = const.i32 0 : i32 + v19 = neq v17, v18 : i1 + condbr v19, block2(v12), block4 + +block4: + v20 = const.i32 6 : i32 + v21 = const.i32 8 : i32 + v22 = cast v2 : u32 + v23 = cast v21 : u32 + v24 = shr v22, v23 : u32 + v25 = cast v24 : i32 + v26 = popcnt v25 : i32 + v27 = sub v20, v26 : i32 + v28 = cast v2 : u32 + v29 = cast v27 : u32 + v30 = shr v28, v29 : u32 + v31 = cast v30 : i32 + v32 = const.i32 1 : i32 + v33 = band v31, v32 : i32 + v34 = const.i32 1 : i32 + v35 = shl v26, v34 : i32 + v36 = sub v33, v35 : i32 + v37 = const.i32 62 : i32 + v38 = add v36, v37 : i32 + br block2(v38) + +block5(v134: i32): + v135 = cast v134 : u32 + v136 = add v135, 8 : u32 + v137 = inttoptr v136 : *mut i32 + store v137, v134 + v138 = cast v134 : u32 + v139 = add v138, 12 : u32 + v140 = inttoptr v139 : *mut i32 + store v140, v134 + br block1 + +block6: + v125 = bor v56, v58 : i32 + v126 = cast v48 : u32 + v127 = add v126, 412 : u32 + v128 = inttoptr v127 : *mut i32 + store v128, v125 + v129 = cast v39 : u32 + v130 = add v129, 24 : u32 + v131 = inttoptr v130 : *mut i32 + store v131, v51 + v132 = cast v51 : u32 + v133 = inttoptr v132 : *mut i32 + store v133, v39 + br block5(v52) + +block7: + v65 = cast v51 : u32 + v66 = inttoptr v65 : *mut i32 + v67 = load v66 : i32 + v69 = call noname::_ZN8dlmalloc8dlmalloc24leftshift_for_tree_index17h31d064fdd867f502E(v44) : i32 + v70 = shl v2, v69 : i32 + br block8(v67, v70) + +block8(v71: i32, v102: i32): + v72 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v71) : i32 + v73 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v72) : i32 + v75 = neq v73, v74 : i1 + v76 = cast v75 : i32 + v77 = const.i32 0 : i32 + v78 = neq v76, v77 : i1 + condbr v78, block10, block11 + +block9: + v120 = cast v114 : u32 + v121 = inttoptr v120 : *mut i32 + store v121, v97 + v122 = cast v97 : u32 + v123 = add v122, 24 : u32 + v124 = inttoptr v123 : *mut i32 + store v124, v71 + br block5(v84) + +block10: + v103 = const.i32 29 : i32 + v104 = cast v102 : u32 + v105 = cast v103 : u32 + v106 = shr v104, v105 : u32 + v107 = cast v106 : i32 + v108 = const.i32 1 : i32 + v109 = shl v102, v108 : i32 + v110 = const.i32 4 : i32 + v111 = band v107, v110 : i32 + v112 = add v71, v111 : i32 + v113 = const.i32 16 : i32 + v114 = add v112, v113 : i32 + v115 = cast v114 : u32 + v116 = inttoptr v115 : *mut i32 + v117 = load v116 : i32 + v118 = const.i32 0 : i32 + v119 = neq v117, v118 : i1 + condbr v119, block8(v117, v109), block12 + +block11: + v79 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v71) : i32 + v80 = cast v79 : u32 + v81 = add v80, 8 : u32 + v82 = inttoptr v81 : *mut i32 + v83 = load v82 : i32 + v85 = cast v83 : u32 + v86 = add v85, 12 : u32 + v87 = inttoptr v86 : *mut i32 + store v87, v84 + v88 = cast v79 : u32 + v89 = add v88, 8 : u32 + v90 = inttoptr v89 : *mut i32 + store v90, v84 + v91 = cast v84 : u32 + v92 = add v91, 12 : u32 + v93 = inttoptr v92 : *mut i32 + store v93, v79 + v94 = cast v84 : u32 + v95 = add v94, 8 : u32 + v96 = inttoptr v95 : *mut i32 + store v96, v83 + v98 = const.i32 0 : i32 + v99 = cast v97 : u32 + v100 = add v99, 24 : u32 + v101 = inttoptr v100 : *mut i32 + store v101, v98 + ret + +block12: + br block9 +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$23release_unused_segments17h25622465f0742468E(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 0 : i32 + v3 = const.i32 136 : i32 + v4 = add v0, v3 : i32 + v5 = cast v4 : u32 + v6 = inttoptr v5 : *mut i32 + v7 = load v6 : i32 + v8 = const.i32 0 : i32 + v9 = neq v7, v8 : i1 + condbr v9, block3, block4 + +block1(v1: i32): + ret v1 + +block2(v135: i32, v136: i32, v149: i32): + v137 = const.i32 4095 : i32 + v138 = const.i32 4095 : i32 + v139 = cast v136 : u32 + v140 = cast v138 : u32 + v141 = gt v139, v140 : i1 + v142 = cast v141 : i32 + v143 = const.i32 0 : i32 + v144 = neq v142, v143 : i1 + v145 = select v144, v136, v137 : i32 + v146 = cast v135 : u32 + v147 = add v146, 448 : u32 + v148 = inttoptr v147 : *mut i32 + store v148, v145 + br block1(v149) + +block3: + v12 = const.i32 128 : i32 + v13 = add v0, v12 : i32 + v14 = const.i32 0 : i32 + v15 = const.i32 0 : i32 + br block5(v7, v0, v13, v14, v15) + +block4: + v10 = const.i32 0 : i32 + v11 = const.i32 0 : i32 + br block2(v0, v10, v11) + +block5(v16: i32, v28: i32, v110: i32, v116: i32, v122: i32): + v17 = cast v16 : u32 + v18 = add v17, 8 : u32 + v19 = inttoptr v18 : *mut i32 + v20 = load v19 : i32 + v21 = cast v16 : u32 + v22 = add v21, 4 : u32 + v23 = inttoptr v22 : *mut i32 + v24 = load v23 : i32 + v25 = cast v16 : u32 + v26 = inttoptr v25 : *mut i32 + v27 = load v26 : i32 + v29 = cast v16 : u32 + v30 = add v29, 12 : u32 + v31 = inttoptr v30 : *mut i32 + v32 = load v31 : i32 + v33 = const.i32 1 : i32 + v34 = cast v32 : u32 + v35 = cast v33 : u32 + v36 = shr v34, v35 : u32 + v37 = cast v36 : i32 + v38 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$16can_release_part17h43bfb7d8666fcc31E(v28, v37) : i32 + v39 = const.i32 0 : i32 + v40 = eq v38, v39 : i1 + v41 = cast v40 : i32 + v42 = const.i32 0 : i32 + v43 = neq v41, v42 : i1 + condbr v43, block8(v122, v20, v28, v116), block9 + +block6: + br block2(v130, v125, v133) + +block7(v120: i32, v126: i32, v130: i32, v132: i32, v133: i32): + v124 = const.i32 1 : i32 + v125 = add v120, v124 : i32 + v128 = const.i32 0 : i32 + v129 = neq v126, v128 : i1 + condbr v129, block5(v126, v130, v132, v133, v125), block18 + +block8(v123: i32, v127: i32, v131: i32, v134: i32): + br block7(v123, v127, v131, v16, v134) + +block9: + v44 = call noname::_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E(v16) : i32 + v45 = const.i32 0 : i32 + v46 = neq v44, v45 : i1 + condbr v46, block8(v122, v20, v28, v116), block10 + +block10: + v47 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v27) : i32 + v48 = const.i32 8 : i32 + v49 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v47, v48) : i32 + v50 = sub v49, v47 : i32 + v51 = add v27, v50 : i32 + v52 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v51) : i32 + v53 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v54 = const.i32 8 : i32 + v55 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v53, v54) : i32 + v56 = const.i32 20 : i32 + v57 = const.i32 8 : i32 + v58 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v56, v57) : i32 + v59 = const.i32 16 : i32 + v60 = const.i32 8 : i32 + v61 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v59, v60) : i32 + v62 = call noname::_ZN8dlmalloc8dlmalloc5Chunk5inuse17h2d327e4c36b84dfeE(v51) : i32 + v63 = const.i32 0 : i32 + v64 = neq v62, v63 : i1 + condbr v64, block8(v122, v20, v28, v116), block11 + +block11: + v65 = add v51, v52 : i32 + v66 = add v53, v24 : i32 + v67 = add v55, v58 : i32 + v68 = add v67, v61 : i32 + v69 = sub v66, v68 : i32 + v70 = add v27, v69 : i32 + v71 = cast v65 : u32 + v72 = cast v70 : u32 + v73 = lt v71, v72 : i1 + v74 = cast v73 : i32 + v75 = const.i32 0 : i32 + v76 = neq v74, v75 : i1 + condbr v76, block8(v122, v20, v28, v116), block12 + +block12: + v77 = cast v28 : u32 + v78 = add v77, 424 : u32 + v79 = inttoptr v78 : *mut i32 + v80 = load v79 : i32 + v81 = eq v51, v80 : i1 + v82 = cast v81 : i32 + v83 = const.i32 0 : i32 + v84 = neq v82, v83 : i1 + condbr v84, block14, block15 + +block13: + v96 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE(v28, v27, v24) : i32 + v97 = const.i32 0 : i32 + v98 = neq v96, v97 : i1 + condbr v98, block16, block17 + +block14: + v85 = const.i32 0 : i32 + v86 = cast v28 : u32 + v87 = add v86, 416 : u32 + v88 = inttoptr v87 : *mut i32 + store v88, v85 + v89 = const.i32 0 : i32 + v90 = cast v28 : u32 + v91 = add v90, 424 : u32 + v92 = inttoptr v91 : *mut i32 + store v92, v89 + br block13 + +block15: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v28, v51) + br block13 + +block16: + v101 = cast v93 : u32 + v102 = add v101, 432 : u32 + v103 = inttoptr v102 : *mut i32 + v104 = load v103 : i32 + v105 = sub v104, v95 : i32 + v106 = cast v93 : u32 + v107 = add v106, 432 : u32 + v108 = inttoptr v107 : *mut i32 + store v108, v105 + v112 = cast v110 : u32 + v113 = add v112, 8 : u32 + v114 = inttoptr v113 : *mut i32 + store v114, v20 + v117 = add v95, v116 : i32 + br block7(v122, v111, v93, v109, v117) + +block17: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(v93, v51, v52) + br block8(v121, v111, v93, v115) + +block18: + br block6 +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i32 0 : i32 + v3 = call noname::_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E(v1) : i32 + v4 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v3) : i32 + v5 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v3, v4) : i32 + v6 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6pinuse17h92d5107047b03ba7E(v3) : i32 + v7 = const.i32 0 : i32 + v8 = neq v6, v7 : i1 + condbr v8, block3(v3, v4), block4 + +block1: + ret + +block2: + br block1 + +block3(v104: i32, v105: i32): + v98 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E(v5) : i32 + v99 = const.i32 0 : i32 + v100 = eq v98, v99 : i1 + v101 = cast v100 : i32 + v102 = const.i32 0 : i32 + v103 = neq v101, v102 : i1 + condbr v103, block16, block17 + +block4: + v9 = cast v3 : u32 + v10 = inttoptr v9 : *mut i32 + v11 = load v10 : i32 + v12 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v3) : i32 + v13 = const.i32 0 : i32 + v14 = neq v12, v13 : i1 + condbr v14, block5, block6 + +block5: + v79 = sub v3, v11 : i32 + v80 = add v4, v11 : i32 + v81 = const.i32 16 : i32 + v82 = add v80, v81 : i32 + v83 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE(v0, v79, v82) : i32 + v84 = const.i32 0 : i32 + v85 = eq v83, v84 : i1 + v86 = cast v85 : i32 + v87 = const.i32 0 : i32 + v88 = neq v86, v87 : i1 + condbr v88, block2, block14 + +block6: + v15 = add v11, v4 : i32 + v16 = call noname::_ZN8dlmalloc8dlmalloc5Chunk12minus_offset17h7c3eec81761249d9E(v3, v11) : i32 + v17 = cast v0 : u32 + v18 = add v17, 424 : u32 + v19 = inttoptr v18 : *mut i32 + v20 = load v19 : i32 + v21 = neq v16, v20 : i1 + v22 = cast v21 : i32 + v23 = const.i32 0 : i32 + v24 = neq v22, v23 : i1 + condbr v24, block7, block8 + +block7: + v39 = const.i32 256 : i32 + v40 = cast v11 : u32 + v41 = cast v39 : u32 + v42 = lt v40, v41 : i1 + v43 = cast v42 : i32 + v44 = const.i32 0 : i32 + v45 = neq v43, v44 : i1 + condbr v45, block10, block11 + +block8: + v25 = cast v5 : u32 + v26 = add v25, 4 : u32 + v27 = inttoptr v26 : *mut i32 + v28 = load v27 : i32 + v29 = const.i32 3 : i32 + v30 = band v28, v29 : i32 + v31 = const.i32 3 : i32 + v32 = neq v30, v31 : i1 + v33 = cast v32 : i32 + v34 = const.i32 0 : i32 + v35 = neq v33, v34 : i1 + condbr v35, block3(v16, v15), block9 + +block9: + v36 = cast v0 : u32 + v37 = add v36, 416 : u32 + v38 = inttoptr v37 : *mut i32 + store v38, v15 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(v16, v15, v5) + ret + +block10: + v46 = cast v16 : u32 + v47 = add v46, 12 : u32 + v48 = inttoptr v47 : *mut i32 + v49 = load v48 : i32 + v50 = cast v16 : u32 + v51 = add v50, 8 : u32 + v52 = inttoptr v51 : *mut i32 + v53 = load v52 : i32 + v54 = eq v49, v53 : i1 + v55 = cast v54 : i32 + v56 = const.i32 0 : i32 + v57 = neq v55, v56 : i1 + condbr v57, block12, block13 + +block11: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v0, v16) + br block3(v16, v15) + +block12: + v64 = cast v0 : u32 + v65 = add v64, 408 : u32 + v66 = inttoptr v65 : *mut i32 + v67 = load v66 : i32 + v68 = const.i32 -2 : i32 + v69 = const.i32 3 : i32 + v70 = cast v11 : u32 + v71 = cast v69 : u32 + v72 = shr v70, v71 : u32 + v73 = cast v72 : i32 + v74 = shl v68, v73 : i32 + v75 = band v67, v74 : i32 + v76 = cast v0 : u32 + v77 = add v76, 408 : u32 + v78 = inttoptr v77 : *mut i32 + store v78, v75 + br block3(v16, v15) + +block13: + v58 = cast v53 : u32 + v59 = add v58, 12 : u32 + v60 = inttoptr v59 : *mut i32 + store v60, v49 + v61 = cast v49 : u32 + v62 = add v61, 8 : u32 + v63 = inttoptr v62 : *mut i32 + store v63, v53 + br block3(v16, v15) + +block14: + v89 = cast v0 : u32 + v90 = add v89, 432 : u32 + v91 = inttoptr v90 : *mut i32 + v92 = load v91 : i32 + v93 = sub v92, v82 : i32 + v94 = cast v0 : u32 + v95 = add v94, 432 : u32 + v96 = inttoptr v95 : *mut i32 + store v96, v93 + ret + +block15(v513: i32, v521: i32, v522: i32): + v514 = const.i32 256 : i32 + v515 = cast v513 : u32 + v516 = cast v514 : u32 + v517 = lt v515, v516 : i1 + v518 = cast v517 : i32 + v519 = const.i32 0 : i32 + v520 = neq v518, v519 : i1 + condbr v520, block55, block56 + +block16: + v107 = cast v0 : u32 + v108 = add v107, 428 : u32 + v109 = inttoptr v108 : *mut i32 + v110 = load v109 : i32 + v111 = eq v97, v110 : i1 + v112 = cast v111 : i32 + v113 = const.i32 0 : i32 + v114 = neq v112, v113 : i1 + condbr v114, block21, block22 + +block17: + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(v104, v105, v97) + br block15(v105, v106, v104) + +block18: + v224 = cast v106 : u32 + v225 = add v224, 440 : u32 + v226 = inttoptr v225 : *mut i32 + v227 = load v226 : i32 + v228 = cast v186 : u32 + v229 = cast v227 : u32 + v230 = lte v228, v229 : i1 + v231 = cast v230 : i32 + v232 = const.i32 0 : i32 + v233 = neq v231, v232 : i1 + condbr v233, block2, block31 + +block19: + v214 = const.i32 0 : i32 + v215 = cast v106 : u32 + v216 = add v215, 416 : u32 + v217 = inttoptr v216 : *mut i32 + store v217, v214 + v218 = const.i32 0 : i32 + v219 = cast v106 : u32 + v220 = add v219, 424 : u32 + v221 = inttoptr v220 : *mut i32 + store v221, v218 + br block18 + +block20: + v203 = cast v106 : u32 + v204 = add v203, 424 : u32 + v205 = inttoptr v204 : *mut i32 + store v205, v104 + v206 = cast v106 : u32 + v207 = add v206, 416 : u32 + v208 = inttoptr v207 : *mut i32 + v209 = load v208 : i32 + v210 = add v209, v105 : i32 + v211 = cast v106 : u32 + v212 = add v211, 416 : u32 + v213 = inttoptr v212 : *mut i32 + store v213, v210 + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v104, v210) + ret + +block21: + v179 = cast v106 : u32 + v180 = add v179, 428 : u32 + v181 = inttoptr v180 : *mut i32 + store v181, v104 + v182 = cast v106 : u32 + v183 = add v182, 420 : u32 + v184 = inttoptr v183 : *mut i32 + v185 = load v184 : i32 + v186 = add v185, v105 : i32 + v187 = cast v106 : u32 + v188 = add v187, 420 : u32 + v189 = inttoptr v188 : *mut i32 + store v189, v186 + v190 = const.i32 1 : i32 + v191 = bor v186, v190 : i32 + v192 = cast v104 : u32 + v193 = add v192, 4 : u32 + v194 = inttoptr v193 : *mut i32 + store v194, v191 + v195 = cast v106 : u32 + v196 = add v195, 424 : u32 + v197 = inttoptr v196 : *mut i32 + v198 = load v197 : i32 + v199 = eq v104, v198 : i1 + v200 = cast v199 : i32 + v201 = const.i32 0 : i32 + v202 = neq v200, v201 : i1 + condbr v202, block19, block30 + +block22: + v115 = cast v106 : u32 + v116 = add v115, 424 : u32 + v117 = inttoptr v116 : *mut i32 + v118 = load v117 : i32 + v119 = eq v97, v118 : i1 + v120 = cast v119 : i32 + v121 = const.i32 0 : i32 + v122 = neq v120, v121 : i1 + condbr v122, block20, block23 + +block23: + v123 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v97) : i32 + v124 = add v123, v105 : i32 + v125 = const.i32 256 : i32 + v126 = cast v123 : u32 + v127 = cast v125 : u32 + v128 = lt v126, v127 : i1 + v129 = cast v128 : i32 + v130 = const.i32 0 : i32 + v131 = neq v129, v130 : i1 + condbr v131, block25, block26 + +block24: + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v104, v124) + v168 = cast v106 : u32 + v169 = add v168, 424 : u32 + v170 = inttoptr v169 : *mut i32 + v171 = load v170 : i32 + v172 = neq v165, v171 : i1 + v173 = cast v172 : i32 + v174 = const.i32 0 : i32 + v175 = neq v173, v174 : i1 + condbr v175, block15(v166, v167, v165), block29 + +block25: + v132 = cast v97 : u32 + v133 = add v132, 12 : u32 + v134 = inttoptr v133 : *mut i32 + v135 = load v134 : i32 + v136 = cast v97 : u32 + v137 = add v136, 8 : u32 + v138 = inttoptr v137 : *mut i32 + v139 = load v138 : i32 + v140 = eq v135, v139 : i1 + v141 = cast v140 : i32 + v142 = const.i32 0 : i32 + v143 = neq v141, v142 : i1 + condbr v143, block27, block28 + +block26: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v106, v97) + br block24 + +block27: + v150 = cast v106 : u32 + v151 = add v150, 408 : u32 + v152 = inttoptr v151 : *mut i32 + v153 = load v152 : i32 + v154 = const.i32 -2 : i32 + v155 = const.i32 3 : i32 + v156 = cast v123 : u32 + v157 = cast v155 : u32 + v158 = shr v156, v157 : u32 + v159 = cast v158 : i32 + v160 = shl v154, v159 : i32 + v161 = band v153, v160 : i32 + v162 = cast v106 : u32 + v163 = add v162, 408 : u32 + v164 = inttoptr v163 : *mut i32 + store v164, v161 + br block24 + +block28: + v144 = cast v139 : u32 + v145 = add v144, 12 : u32 + v146 = inttoptr v145 : *mut i32 + store v146, v135 + v147 = cast v135 : u32 + v148 = add v147, 8 : u32 + v149 = inttoptr v148 : *mut i32 + store v149, v139 + br block24 + +block29: + v176 = cast v167 : u32 + v177 = add v176, 416 : u32 + v178 = inttoptr v177 : *mut i32 + store v178, v166 + ret + +block30: + br block18 + +block31: + v234 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v235 = const.i32 8 : i32 + v236 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v234, v235) : i32 + v237 = const.i32 20 : i32 + v238 = const.i32 8 : i32 + v239 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v237, v238) : i32 + v240 = const.i32 16 : i32 + v241 = const.i32 8 : i32 + v242 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v240, v241) : i32 + v243 = const.i32 0 : i32 + v244 = const.i32 16 : i32 + v245 = const.i32 8 : i32 + v246 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v244, v245) : i32 + v247 = const.i32 2 : i32 + v248 = shl v246, v247 : i32 + v249 = sub v243, v248 : i32 + v250 = add v236, v239 : i32 + v251 = add v242, v250 : i32 + v252 = sub v234, v251 : i32 + v253 = const.i32 -65544 : i32 + v254 = add v252, v253 : i32 + v255 = const.i32 -9 : i32 + v256 = band v254, v255 : i32 + v257 = const.i32 -3 : i32 + v258 = add v256, v257 : i32 + v259 = cast v249 : u32 + v260 = cast v258 : u32 + v261 = lt v259, v260 : i1 + v262 = cast v261 : i32 + v263 = const.i32 0 : i32 + v264 = neq v262, v263 : i1 + v265 = select v264, v249, v258 : i32 + v266 = const.i32 0 : i32 + v267 = eq v265, v266 : i1 + v268 = cast v267 : i32 + v269 = const.i32 0 : i32 + v270 = neq v268, v269 : i1 + condbr v270, block2, block32 + +block32: + v271 = cast v223 : u32 + v272 = add v271, 428 : u32 + v273 = inttoptr v272 : *mut i32 + v274 = load v273 : i32 + v275 = const.i32 0 : i32 + v276 = eq v274, v275 : i1 + v277 = cast v276 : i32 + v278 = const.i32 0 : i32 + v279 = neq v277, v278 : i1 + condbr v279, block2, block33 + +block33: + v280 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v281 = const.i32 8 : i32 + v282 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v280, v281) : i32 + v283 = const.i32 20 : i32 + v284 = const.i32 8 : i32 + v285 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v283, v284) : i32 + v286 = const.i32 16 : i32 + v287 = const.i32 8 : i32 + v288 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v286, v287) : i32 + v289 = const.i32 0 : i32 + v290 = cast v223 : u32 + v291 = add v290, 420 : u32 + v292 = inttoptr v291 : *mut i32 + v293 = load v292 : i32 + v294 = sub v282, v280 : i32 + v295 = add v285, v294 : i32 + v296 = add v288, v295 : i32 + v297 = cast v293 : u32 + v298 = cast v296 : u32 + v299 = lte v297, v298 : i1 + v300 = cast v299 : i32 + v301 = const.i32 0 : i32 + v302 = neq v300, v301 : i1 + condbr v302, block34(v223, v289), block35 + +block34(v486: i32, v489: i32): + v487 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$23release_unused_segments17h25622465f0742468E(v486) : i32 + v488 = const.i32 0 : i32 + v490 = sub v488, v489 : i32 + v491 = neq v487, v490 : i1 + v492 = cast v491 : i32 + v493 = const.i32 0 : i32 + v494 = neq v492, v493 : i1 + condbr v494, block2, block53 + +block35: + v303 = sub v293, v296 : i32 + v304 = const.i32 65535 : i32 + v305 = add v303, v304 : i32 + v306 = const.i32 -65536 : i32 + v307 = band v305, v306 : i32 + v308 = const.i32 -65536 : i32 + v309 = add v307, v308 : i32 + v310 = const.i32 128 : i32 + v311 = add v223, v310 : i32 + br block37(v311) + +block36(v340: i32, v345: i32, v368: i32, v392: i32, v451: i32): + v339 = const.i32 0 : i32 + v341 = call noname::_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E(v340) : i32 + v342 = const.i32 0 : i32 + v343 = neq v341, v342 : i1 + condbr v343, block34(v345, v339), block43 + +block37(v312: i32): + v313 = cast v312 : u32 + v314 = inttoptr v313 : *mut i32 + v315 = load v314 : i32 + v317 = cast v315 : u32 + v318 = cast v316 : u32 + v319 = gt v317, v318 : i1 + v320 = cast v319 : i32 + v321 = const.i32 0 : i32 + v322 = neq v320, v321 : i1 + condbr v322, block39, block40 + +block38: + v338 = const.i32 0 : i32 + br block36(v338, v347, v370, v394, v453) + +block39: + v331 = cast v312 : u32 + v332 = add v331, 8 : u32 + v333 = inttoptr v332 : *mut i32 + v334 = load v333 : i32 + v335 = const.i32 0 : i32 + v336 = neq v334, v335 : i1 + condbr v336, block37(v334), block42 + +block40: + v323 = call noname::_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E(v312) : i32 + v324 = cast v323 : u32 + v325 = cast v316 : u32 + v326 = gt v324, v325 : i1 + v327 = cast v326 : i32 + v328 = const.i32 0 : i32 + v329 = neq v327, v328 : i1 + condbr v329, block36(v312, v223, v309, v311, v307), block41 + +block41: + br block39 + +block42: + br block38 + +block43: + v344 = const.i32 0 : i32 + v348 = cast v340 : u32 + v349 = add v348, 12 : u32 + v350 = inttoptr v349 : *mut i32 + v351 = load v350 : i32 + v352 = const.i32 1 : i32 + v353 = cast v351 : u32 + v354 = cast v352 : u32 + v355 = shr v353, v354 : u32 + v356 = cast v355 : i32 + v357 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$16can_release_part17h43bfb7d8666fcc31E(v345, v356) : i32 + v358 = const.i32 0 : i32 + v359 = eq v357, v358 : i1 + v360 = cast v359 : i32 + v361 = const.i32 0 : i32 + v362 = neq v360, v361 : i1 + condbr v362, block34(v345, v344), block44 + +block44: + v363 = const.i32 0 : i32 + v364 = cast v340 : u32 + v365 = add v364, 4 : u32 + v366 = inttoptr v365 : *mut i32 + v367 = load v366 : i32 + v371 = cast v367 : u32 + v372 = cast v368 : u32 + v373 = lt v371, v372 : i1 + v374 = cast v373 : i32 + v375 = const.i32 0 : i32 + v376 = neq v374, v375 : i1 + condbr v376, block34(v345, v363), block45 + +block45: + br block46(v392) + +block46(v378: i32): + v379 = call noname::_ZN8dlmalloc8dlmalloc7Segment5holds17h8f6de4ee6718009bE(v377, v378) : i32 + v380 = const.i32 0 : i32 + v381 = eq v379, v380 : i1 + v382 = cast v381 : i32 + v383 = const.i32 0 : i32 + v384 = neq v382, v383 : i1 + condbr v384, block48, block49 + +block47: + v396 = cast v377 : u32 + v397 = inttoptr v396 : *mut i32 + v398 = load v397 : i32 + v399 = cast v377 : u32 + v400 = add v399, 4 : u32 + v401 = inttoptr v400 : *mut i32 + v402 = load v401 : i32 + v404 = sub v402, v368 : i32 + v405 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9free_part17h74489c9e7a3aa967E(v345, v398, v402, v404) : i32 + v406 = const.i32 0 : i32 + v407 = const.i32 0 : i32 + v408 = eq v403, v407 : i1 + v409 = cast v408 : i32 + v410 = const.i32 0 : i32 + v411 = neq v409, v410 : i1 + condbr v411, block34(v395, v406), block51 + +block48: + v386 = cast v378 : u32 + v387 = add v386, 8 : u32 + v388 = inttoptr v387 : *mut i32 + v389 = load v388 : i32 + v390 = const.i32 0 : i32 + v391 = neq v389, v390 : i1 + condbr v391, block46(v389), block50 + +block49: + v385 = const.i32 0 : i32 + br block34(v395, v385) + +block50: + br block47 + +block51: + v412 = const.i32 0 : i32 + v413 = const.i32 0 : i32 + v414 = eq v405, v413 : i1 + v415 = cast v414 : i32 + v416 = const.i32 0 : i32 + v417 = neq v415, v416 : i1 + condbr v417, block34(v395, v412), block52 + +block52: + v418 = cast v377 : u32 + v419 = add v418, 4 : u32 + v420 = inttoptr v419 : *mut i32 + v421 = load v420 : i32 + v422 = sub v421, v403 : i32 + v423 = cast v377 : u32 + v424 = add v423, 4 : u32 + v425 = inttoptr v424 : *mut i32 + store v425, v422 + v426 = cast v395 : u32 + v427 = add v426, 432 : u32 + v428 = inttoptr v427 : *mut i32 + v429 = load v428 : i32 + v430 = sub v429, v403 : i32 + v431 = cast v395 : u32 + v432 = add v431, 432 : u32 + v433 = inttoptr v432 : *mut i32 + store v433, v430 + v434 = cast v395 : u32 + v435 = add v434, 420 : u32 + v436 = inttoptr v435 : *mut i32 + v437 = load v436 : i32 + v438 = cast v395 : u32 + v439 = add v438, 428 : u32 + v440 = inttoptr v439 : *mut i32 + v441 = load v440 : i32 + v442 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v441) : i32 + v443 = const.i32 8 : i32 + v444 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v442, v443) : i32 + v445 = sub v444, v442 : i32 + v446 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v441, v445) : i32 + v447 = cast v395 : u32 + v448 = add v447, 428 : u32 + v449 = inttoptr v448 : *mut i32 + store v449, v446 + v454 = add v451, v445 : i32 + v455 = sub v437, v454 : i32 + v456 = const.i32 65536 : i32 + v457 = add v455, v456 : i32 + v458 = cast v395 : u32 + v459 = add v458, 420 : u32 + v460 = inttoptr v459 : *mut i32 + store v460, v457 + v461 = const.i32 1 : i32 + v462 = bor v457, v461 : i32 + v463 = cast v446 : u32 + v464 = add v463, 4 : u32 + v465 = inttoptr v464 : *mut i32 + store v465, v462 + v466 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v467 = const.i32 8 : i32 + v468 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v466, v467) : i32 + v469 = const.i32 20 : i32 + v470 = const.i32 8 : i32 + v471 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v469, v470) : i32 + v472 = const.i32 16 : i32 + v473 = const.i32 8 : i32 + v474 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v472, v473) : i32 + v475 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v446, v457) : i32 + v476 = const.i32 2097152 : i32 + v477 = cast v395 : u32 + v478 = add v477, 440 : u32 + v479 = inttoptr v478 : *mut i32 + store v479, v476 + v480 = sub v468, v466 : i32 + v481 = add v471, v480 : i32 + v482 = add v474, v481 : i32 + v483 = cast v475 : u32 + v484 = add v483, 4 : u32 + v485 = inttoptr v484 : *mut i32 + store v485, v482 + br block34(v395, v403) + +block53: + v495 = cast v486 : u32 + v496 = add v495, 420 : u32 + v497 = inttoptr v496 : *mut i32 + v498 = load v497 : i32 + v499 = cast v486 : u32 + v500 = add v499, 440 : u32 + v501 = inttoptr v500 : *mut i32 + v502 = load v501 : i32 + v503 = cast v498 : u32 + v504 = cast v502 : u32 + v505 = lte v503, v504 : i1 + v506 = cast v505 : i32 + v507 = const.i32 0 : i32 + v508 = neq v506, v507 : i1 + condbr v508, block2, block54 + +block54: + v509 = const.i32 -1 : i32 + v510 = cast v486 : u32 + v511 = add v510, 440 : u32 + v512 = inttoptr v511 : *mut i32 + store v512, v509 + ret + +block55: + v535 = const.i32 -8 : i32 + v536 = band v513, v535 : i32 + v537 = add v521, v536 : i32 + v538 = const.i32 144 : i32 + v539 = add v537, v538 : i32 + v540 = cast v521 : u32 + v541 = add v540, 408 : u32 + v542 = inttoptr v541 : *mut i32 + v543 = load v542 : i32 + v544 = const.i32 1 : i32 + v545 = const.i32 3 : i32 + v546 = cast v513 : u32 + v547 = cast v545 : u32 + v548 = shr v546, v547 : u32 + v549 = cast v548 : i32 + v550 = shl v544, v549 : i32 + v551 = band v543, v550 : i32 + v552 = const.i32 0 : i32 + v553 = eq v551, v552 : i1 + v554 = cast v553 : i32 + v555 = const.i32 0 : i32 + v556 = neq v554, v555 : i1 + condbr v556, block59, block60 + +block56: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(v521, v522, v513) + v523 = cast v521 : u32 + v524 = add v523, 448 : u32 + v525 = inttoptr v524 : *mut i32 + v526 = load v525 : i32 + v527 = const.i32 -1 : i32 + v528 = add v526, v527 : i32 + v529 = cast v521 : u32 + v530 = add v529, 448 : u32 + v531 = inttoptr v530 : *mut i32 + store v531, v528 + v532 = const.i32 0 : i32 + v533 = neq v528, v532 : i1 + condbr v533, block2, block57 + +block57: + v534 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$23release_unused_segments17h25622465f0742468E(v521) : i32 + ret + +block58(v570: i32): + v567 = cast v539 : u32 + v568 = add v567, 8 : u32 + v569 = inttoptr v568 : *mut i32 + store v569, v522 + v571 = cast v570 : u32 + v572 = add v571, 12 : u32 + v573 = inttoptr v572 : *mut i32 + store v573, v566 + v574 = cast v566 : u32 + v575 = add v574, 12 : u32 + v576 = inttoptr v575 : *mut i32 + store v576, v565 + v577 = cast v566 : u32 + v578 = add v577, 8 : u32 + v579 = inttoptr v578 : *mut i32 + store v579, v570 + br block2 + +block59: + v561 = bor v543, v550 : i32 + v562 = cast v521 : u32 + v563 = add v562, 408 : u32 + v564 = inttoptr v563 : *mut i32 + store v564, v561 + br block58(v539) + +block60: + v557 = cast v539 : u32 + v558 = add v557, 8 : u32 + v559 = inttoptr v558 : *mut i32 + v560 = load v559 : i32 + br block58(v560) +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = const.i32 0 : i32 + v4 = const.i64 0 : i64 + v5 = global.load (@__stack_pointer) as *mut i8 : i32 + v6 = const.i32 16 : i32 + v7 = sub v5, v6 : i32 + v8 = global.symbol @__stack_pointer : *mut i32 + store v8, v7 + v9 = const.i32 245 : i32 + v10 = cast v1 : u32 + v11 = cast v9 : u32 + v12 = lt v10, v11 : i1 + v13 = cast v12 : i32 + v14 = const.i32 0 : i32 + v15 = neq v13, v14 : i1 + condbr v15, block7, block8 + +block1(v2: i32): + ret v2 + +block2(v1571: i32, v1595: i32): + v1592 = const.i32 16 : i32 + v1593 = add v1571, v1592 : i32 + v1594 = global.symbol @__stack_pointer : *mut i32 + store v1594, v1593 + br block1(v1595) + +block3(v709: i32, v717: i32, v739: i32): + v713 = cast v709 : u32 + v714 = add v713, 416 : u32 + v715 = inttoptr v714 : *mut i32 + v716 = load v715 : i32 + v723 = cast v716 : u32 + v724 = cast v717 : u32 + v725 = gte v723, v724 : i1 + v726 = cast v725 : i32 + v727 = const.i32 0 : i32 + v728 = neq v726, v727 : i1 + condbr v728, block83, block84 + +block4(v607: i32, v613: i32, v621: i32, v628: i32, v749: i32): + v608 = const.i32 0 : i32 + v609 = eq v607, v608 : i1 + v610 = cast v609 : i32 + v611 = const.i32 0 : i32 + v612 = neq v610, v611 : i1 + condbr v612, block3(v613, v621, v749), block63 + +block5(v598: i32, v599: i32, v601: i32, v604: i32, v616: i32, v752: i32): + br block60(v598, v599, v604) + +block6(v531: i32, v533: i32, v540: i32, v545: i32, v555: i32, v603: i32, v606: i32, v745: i32): + v536 = bor v531, v533 : i32 + v537 = const.i32 0 : i32 + v538 = neq v536, v537 : i1 + condbr v538, block56(v531, v533), block57 + +block7: + v183 = const.i32 16 : i32 + v184 = const.i32 4 : i32 + v185 = add v1, v184 : i32 + v186 = const.i32 16 : i32 + v187 = const.i32 8 : i32 + v188 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v186, v187) : i32 + v189 = const.i32 -5 : i32 + v190 = add v188, v189 : i32 + v191 = cast v190 : u32 + v192 = cast v1 : u32 + v193 = gt v191, v192 : i1 + v194 = cast v193 : i32 + v195 = const.i32 0 : i32 + v196 = neq v194, v195 : i1 + v197 = select v196, v183, v185 : i32 + v198 = const.i32 8 : i32 + v199 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v197, v198) : i32 + v200 = cast v0 : u32 + v201 = add v200, 408 : u32 + v202 = inttoptr v201 : *mut i32 + v203 = load v202 : i32 + v204 = const.i32 3 : i32 + v205 = cast v199 : u32 + v206 = cast v204 : u32 + v207 = shr v205, v206 : u32 + v208 = cast v207 : i32 + v209 = cast v203 : u32 + v210 = cast v208 : u32 + v211 = shr v209, v210 : u32 + v212 = cast v211 : i32 + v213 = const.i32 3 : i32 + v214 = band v212, v213 : i32 + v215 = const.i32 0 : i32 + v216 = eq v214, v215 : i1 + v217 = cast v216 : i32 + v218 = const.i32 0 : i32 + v219 = neq v217, v218 : i1 + condbr v219, block23, block24 + +block8: + v16 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v17 = const.i32 8 : i32 + v18 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v16, v17) : i32 + v19 = const.i32 20 : i32 + v20 = const.i32 8 : i32 + v21 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v19, v20) : i32 + v22 = const.i32 16 : i32 + v23 = const.i32 8 : i32 + v24 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v22, v23) : i32 + v25 = const.i32 0 : i32 + v26 = const.i32 0 : i32 + v27 = const.i32 16 : i32 + v28 = const.i32 8 : i32 + v29 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v27, v28) : i32 + v30 = const.i32 2 : i32 + v31 = shl v29, v30 : i32 + v32 = sub v26, v31 : i32 + v33 = add v18, v21 : i32 + v34 = add v24, v33 : i32 + v35 = sub v16, v34 : i32 + v36 = const.i32 -65544 : i32 + v37 = add v35, v36 : i32 + v38 = const.i32 -9 : i32 + v39 = band v37, v38 : i32 + v40 = const.i32 -3 : i32 + v41 = add v39, v40 : i32 + v42 = cast v32 : u32 + v43 = cast v41 : u32 + v44 = lt v42, v43 : i1 + v45 = cast v44 : i32 + v46 = const.i32 0 : i32 + v47 = neq v45, v46 : i1 + v48 = select v47, v32, v41 : i32 + v49 = cast v48 : u32 + v50 = cast v1 : u32 + v51 = lte v49, v50 : i1 + v52 = cast v51 : i32 + v53 = const.i32 0 : i32 + v54 = neq v52, v53 : i1 + condbr v54, block2(v7, v25), block9 + +block9: + v55 = const.i32 4 : i32 + v56 = add v1, v55 : i32 + v57 = const.i32 8 : i32 + v58 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v56, v57) : i32 + v59 = cast v0 : u32 + v60 = add v59, 412 : u32 + v61 = inttoptr v60 : *mut i32 + v62 = load v61 : i32 + v63 = const.i32 0 : i32 + v64 = eq v62, v63 : i1 + v65 = cast v64 : i32 + v66 = const.i32 0 : i32 + v67 = neq v65, v66 : i1 + condbr v67, block3(v0, v58, v7), block10 + +block10: + v68 = const.i32 0 : i32 + v69 = const.i32 0 : i32 + v70 = const.i32 256 : i32 + v71 = cast v58 : u32 + v72 = cast v70 : u32 + v73 = lt v71, v72 : i1 + v74 = cast v73 : i32 + v75 = const.i32 0 : i32 + v76 = neq v74, v75 : i1 + condbr v76, block11(v69), block12 + +block11(v108: i32): + v104 = const.i32 0 : i32 + v106 = sub v104, v58 : i32 + v109 = const.i32 2 : i32 + v110 = shl v108, v109 : i32 + v111 = add v0, v110 : i32 + v112 = cast v111 : u32 + v113 = inttoptr v112 : *mut i32 + v114 = load v113 : i32 + v115 = const.i32 0 : i32 + v116 = neq v114, v115 : i1 + condbr v116, block14, block15 + +block12: + v77 = const.i32 31 : i32 + v78 = const.i32 16777215 : i32 + v79 = cast v58 : u32 + v80 = cast v78 : u32 + v81 = gt v79, v80 : i1 + v82 = cast v81 : i32 + v83 = const.i32 0 : i32 + v84 = neq v82, v83 : i1 + condbr v84, block11(v77), block13 + +block13: + v85 = const.i32 6 : i32 + v86 = const.i32 8 : i32 + v87 = cast v58 : u32 + v88 = cast v86 : u32 + v89 = shr v87, v88 : u32 + v90 = cast v89 : i32 + v91 = popcnt v90 : i32 + v92 = sub v85, v91 : i32 + v93 = cast v58 : u32 + v94 = cast v92 : u32 + v95 = shr v93, v94 : u32 + v96 = cast v95 : i32 + v97 = const.i32 1 : i32 + v98 = band v96, v97 : i32 + v99 = const.i32 1 : i32 + v100 = shl v91, v99 : i32 + v101 = sub v98, v100 : i32 + v102 = const.i32 62 : i32 + v103 = add v101, v102 : i32 + br block11(v103) + +block14: + v118 = call noname::_ZN8dlmalloc8dlmalloc24leftshift_for_tree_index17h31d064fdd867f502E(v108) : i32 + v119 = shl v105, v118 : i32 + v120 = const.i32 0 : i32 + v121 = const.i32 0 : i32 + br block16(v114, v106, v120, v119, v121, v108, v546, v107, v746) + +block15: + v117 = const.i32 0 : i32 + br block6(v68, v117, v108, v62, v107, v105, v106, v7) + +block16(v122: i32, v133: i32, v150: i32, v152: i32, v535: i32, v542: i32, v548: i32, v557: i32, v748: i32): + v123 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v122) : i32 + v124 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v123) : i32 + v126 = cast v124 : u32 + v127 = cast v125 : u32 + v128 = lt v126, v127 : i1 + v129 = cast v128 : i32 + v130 = const.i32 0 : i32 + v131 = neq v129, v130 : i1 + condbr v131, block18(v133, v535), block19 + +block17: + +block18(v182: i32, v534: i32): + v144 = const.i32 20 : i32 + v145 = add v122, v144 : i32 + v146 = cast v145 : u32 + v147 = inttoptr v146 : *mut i32 + v148 = load v147 : i32 + v153 = const.i32 29 : i32 + v154 = cast v152 : u32 + v155 = cast v153 : u32 + v156 = shr v154, v155 : u32 + v157 = cast v156 : i32 + v158 = const.i32 4 : i32 + v159 = band v157, v158 : i32 + v160 = add v143, v159 : i32 + v161 = const.i32 16 : i32 + v162 = add v160, v161 : i32 + v163 = cast v162 : u32 + v164 = inttoptr v163 : *mut i32 + v165 = load v164 : i32 + v166 = neq v148, v165 : i1 + v167 = cast v166 : i32 + v168 = const.i32 0 : i32 + v169 = neq v167, v168 : i1 + v170 = select v169, v148, v150 : i32 + v171 = const.i32 0 : i32 + v172 = neq v148, v171 : i1 + v173 = select v172, v170, v149 : i32 + v174 = const.i32 1 : i32 + v175 = shl v151, v174 : i32 + v176 = const.i32 0 : i32 + v177 = eq v165, v176 : i1 + v178 = cast v177 : i32 + v179 = const.i32 0 : i32 + v180 = neq v178, v179 : i1 + condbr v180, block6(v173, v534, v542, v548, v557, v181, v182, v748), block22 + +block19: + v132 = sub v124, v125 : i32 + v134 = cast v132 : u32 + v135 = cast v133 : u32 + v136 = gte v134, v135 : i1 + v137 = cast v136 : i32 + v138 = const.i32 0 : i32 + v139 = neq v137, v138 : i1 + condbr v139, block18(v133, v535), block20 + +block20: + v140 = const.i32 0 : i32 + v141 = neq v132, v140 : i1 + condbr v141, block18(v132, v122), block21 + +block21: + v142 = const.i32 0 : i32 + br block5(v122, v122, v125, v142, v557, v748) + +block22: + br block16(v165, v182, v173, v175, v534, v541, v547, v556, v747) + +block23: + v260 = cast v0 : u32 + v261 = add v260, 416 : u32 + v262 = inttoptr v261 : *mut i32 + v263 = load v262 : i32 + v264 = cast v199 : u32 + v265 = cast v263 : u32 + v266 = lte v264, v265 : i1 + v267 = cast v266 : i32 + v268 = const.i32 0 : i32 + v269 = neq v267, v268 : i1 + condbr v269, block3(v0, v199, v7), block28 + +block24: + v220 = const.i32 -1 : i32 + v221 = bxor v212, v220 : i32 + v222 = const.i32 1 : i32 + v223 = band v221, v222 : i32 + v224 = add v223, v208 : i32 + v225 = const.i32 3 : i32 + v226 = shl v224, v225 : i32 + v227 = add v0, v226 : i32 + v228 = const.i32 152 : i32 + v229 = add v227, v228 : i32 + v230 = cast v229 : u32 + v231 = inttoptr v230 : *mut i32 + v232 = load v231 : i32 + v233 = cast v232 : u32 + v234 = add v233, 8 : u32 + v235 = inttoptr v234 : *mut i32 + v236 = load v235 : i32 + v237 = const.i32 144 : i32 + v238 = add v227, v237 : i32 + v239 = eq v236, v238 : i1 + v240 = cast v239 : i32 + v241 = const.i32 0 : i32 + v242 = neq v240, v241 : i1 + condbr v242, block26, block27 + +block25: + v257 = const.i32 3 : i32 + v258 = shl v224, v257 : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E(v232, v258) + v259 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v255) : i32 + br block2(v7, v259) + +block26: + v249 = const.i32 -2 : i32 + v250 = shl v249, v224 : i32 + v251 = band v203, v250 : i32 + v252 = cast v0 : u32 + v253 = add v252, 408 : u32 + v254 = inttoptr v253 : *mut i32 + store v254, v251 + br block25 + +block27: + v243 = cast v236 : u32 + v244 = add v243, 12 : u32 + v245 = inttoptr v244 : *mut i32 + store v245, v238 + v246 = cast v238 : u32 + v247 = add v246, 8 : u32 + v248 = inttoptr v247 : *mut i32 + store v248, v236 + br block25 + +block28: + v270 = const.i32 0 : i32 + v271 = neq v212, v270 : i1 + condbr v271, block35, block36 + +block29(v710: i32, v740: i32): + v525 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v319) : i32 + v526 = const.i32 0 : i32 + v527 = eq v525, v526 : i1 + v528 = cast v527 : i32 + v529 = const.i32 0 : i32 + v530 = neq v528, v529 : i1 + condbr v530, block3(v710, v320, v740), block55 + +block30: + v514 = cast v322 : u32 + v515 = add v514, 424 : u32 + v516 = inttoptr v515 : *mut i32 + store v516, v334 + v519 = cast v510 : u32 + v520 = add v519, 416 : u32 + v521 = inttoptr v520 : *mut i32 + store v521, v324 + br block29(v510, v741) + +block31(v1573: i32): + v499 = cast v390 : u32 + v500 = add v499, 424 : u32 + v501 = inttoptr v500 : *mut i32 + store v501, v385 + v504 = cast v495 : u32 + v505 = add v504, 416 : u32 + v506 = inttoptr v505 : *mut i32 + store v506, v389 + v509 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v383) : i32 + br block2(v1573, v509) + +block32: + v448 = const.i32 -8 : i32 + v449 = band v394, v448 : i32 + v450 = add v342, v449 : i32 + v451 = cast v390 : u32 + v452 = add v451, 424 : u32 + v453 = inttoptr v452 : *mut i32 + v454 = load v453 : i32 + v455 = cast v390 : u32 + v456 = add v455, 408 : u32 + v457 = inttoptr v456 : *mut i32 + v458 = load v457 : i32 + v459 = const.i32 1 : i32 + v460 = const.i32 3 : i32 + v461 = cast v394 : u32 + v462 = cast v460 : u32 + v463 = shr v461, v462 : u32 + v464 = cast v463 : i32 + v465 = shl v459, v464 : i32 + v466 = band v458, v465 : i32 + v467 = const.i32 0 : i32 + v468 = eq v466, v467 : i1 + v469 = cast v468 : i32 + v470 = const.i32 0 : i32 + v471 = neq v469, v470 : i1 + condbr v471, block53, block54 + +block33: + v446 = add v324, v320 : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E(v319, v446) + br block29(v322, v7) + +block34: + v397 = const.i32 -8 : i32 + v398 = band v338, v397 : i32 + v399 = add v322, v398 : i32 + v400 = const.i32 144 : i32 + v401 = add v399, v400 : i32 + v402 = cast v322 : u32 + v403 = add v402, 424 : u32 + v404 = inttoptr v403 : *mut i32 + v405 = load v404 : i32 + v406 = cast v322 : u32 + v407 = add v406, 408 : u32 + v408 = inttoptr v407 : *mut i32 + v409 = load v408 : i32 + v410 = const.i32 1 : i32 + v411 = const.i32 3 : i32 + v412 = cast v338 : u32 + v413 = cast v411 : u32 + v414 = shr v412, v413 : u32 + v415 = cast v414 : i32 + v416 = shl v410, v415 : i32 + v417 = band v409, v416 : i32 + v418 = const.i32 0 : i32 + v419 = eq v417, v418 : i1 + v420 = cast v419 : i32 + v421 = const.i32 0 : i32 + v422 = neq v420, v421 : i1 + condbr v422, block50, block51 + +block35: + v341 = const.i32 144 : i32 + v342 = add v0, v341 : i32 + v343 = const.i32 1 : i32 + v344 = const.i32 31 : i32 + v345 = band v208, v344 : i32 + v346 = shl v343, v345 : i32 + v347 = call noname::_ZN8dlmalloc8dlmalloc9left_bits17hb6cbe146b8019d98E(v346) : i32 + v348 = shl v212, v345 : i32 + v349 = band v347, v348 : i32 + v350 = call noname::_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE(v349) : i32 + v351 = popcnt v350 : i32 + v352 = const.i32 3 : i32 + v353 = shl v351, v352 : i32 + v354 = add v342, v353 : i32 + v355 = cast v354 : u32 + v356 = add v355, 8 : u32 + v357 = inttoptr v356 : *mut i32 + v358 = load v357 : i32 + v359 = cast v358 : u32 + v360 = add v359, 8 : u32 + v361 = inttoptr v360 : *mut i32 + v362 = load v361 : i32 + v363 = eq v362, v354 : i1 + v364 = cast v363 : i32 + v365 = const.i32 0 : i32 + v366 = neq v364, v365 : i1 + condbr v366, block46, block47 + +block36: + v272 = cast v0 : u32 + v273 = add v272, 412 : u32 + v274 = inttoptr v273 : *mut i32 + v275 = load v274 : i32 + v276 = const.i32 0 : i32 + v277 = eq v275, v276 : i1 + v278 = cast v277 : i32 + v279 = const.i32 0 : i32 + v280 = neq v278, v279 : i1 + condbr v280, block3(v0, v199, v7), block37 + +block37: + v281 = call noname::_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE(v275) : i32 + v282 = popcnt v281 : i32 + v283 = const.i32 2 : i32 + v284 = shl v282, v283 : i32 + v285 = add v0, v284 : i32 + v286 = cast v285 : u32 + v287 = inttoptr v286 : *mut i32 + v288 = load v287 : i32 + v289 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v288) : i32 + v290 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v289) : i32 + v291 = sub v290, v199 : i32 + v292 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE(v288) : i32 + v293 = const.i32 0 : i32 + v294 = eq v292, v293 : i1 + v295 = cast v294 : i32 + v296 = const.i32 0 : i32 + v297 = neq v295, v296 : i1 + condbr v297, block38(v288, v199, v291), block39 + +block38(v318: i32, v320: i32, v324: i32): + v319 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v318) : i32 + v321 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v319, v320) : i32 + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v0, v318) + v325 = const.i32 16 : i32 + v326 = const.i32 8 : i32 + v327 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v325, v326) : i32 + v328 = cast v324 : u32 + v329 = cast v327 : u32 + v330 = lt v328, v329 : i1 + v331 = cast v330 : i32 + v332 = const.i32 0 : i32 + v333 = neq v331, v332 : i1 + condbr v333, block33, block43 + +block39: + br block40(v292, v291, v288) + +block40(v298: i32, v303: i32, v311: i32): + v299 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v298) : i32 + v300 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v299) : i32 + v302 = sub v300, v301 : i32 + v304 = cast v302 : u32 + v305 = cast v303 : u32 + v306 = lt v304, v305 : i1 + v307 = cast v306 : i32 + v308 = const.i32 0 : i32 + v309 = neq v307, v308 : i1 + v310 = select v309, v302, v303 : i32 + v312 = const.i32 0 : i32 + v313 = neq v307, v312 : i1 + v314 = select v313, v298, v311 : i32 + v315 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE(v298) : i32 + v316 = const.i32 0 : i32 + v317 = neq v315, v316 : i1 + condbr v317, block40(v315, v310, v314), block42 + +block41: + br block38(v314, v301, v310) + +block42: + br block41 + +block43: + v334 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v321) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v319, v320) + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v334, v324) + v335 = cast v322 : u32 + v336 = add v335, 416 : u32 + v337 = inttoptr v336 : *mut i32 + v338 = load v337 : i32 + v339 = const.i32 0 : i32 + v340 = neq v338, v339 : i1 + condbr v340, block34, block44 + +block44: + br block30 + +block45: + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v358, v199) + v385 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v383, v384) : i32 + v387 = const.i32 3 : i32 + v388 = shl v351, v387 : i32 + v389 = sub v388, v384 : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v385, v389) + v391 = cast v0 : u32 + v392 = add v391, 416 : u32 + v393 = inttoptr v392 : *mut i32 + v394 = load v393 : i32 + v395 = const.i32 0 : i32 + v396 = neq v394, v395 : i1 + condbr v396, block32, block48 + +block46: + v373 = cast v0 : u32 + v374 = add v373, 408 : u32 + v375 = inttoptr v374 : *mut i32 + v376 = load v375 : i32 + v377 = const.i32 -2 : i32 + v378 = shl v377, v351 : i32 + v379 = band v376, v378 : i32 + v380 = cast v0 : u32 + v381 = add v380, 408 : u32 + v382 = inttoptr v381 : *mut i32 + store v382, v379 + br block45 + +block47: + v367 = cast v362 : u32 + v368 = add v367, 12 : u32 + v369 = inttoptr v368 : *mut i32 + store v369, v354 + v370 = cast v354 : u32 + v371 = add v370, 8 : u32 + v372 = inttoptr v371 : *mut i32 + store v372, v362 + br block45 + +block48: + br block31(v7) + +block49(v436: i32): + v433 = cast v401 : u32 + v434 = add v433, 8 : u32 + v435 = inttoptr v434 : *mut i32 + store v435, v405 + v437 = cast v436 : u32 + v438 = add v437, 12 : u32 + v439 = inttoptr v438 : *mut i32 + store v439, v432 + v440 = cast v432 : u32 + v441 = add v440, 12 : u32 + v442 = inttoptr v441 : *mut i32 + store v442, v431 + v443 = cast v432 : u32 + v444 = add v443, 8 : u32 + v445 = inttoptr v444 : *mut i32 + store v445, v436 + br block30 + +block50: + v427 = bor v409, v416 : i32 + v428 = cast v322 : u32 + v429 = add v428, 408 : u32 + v430 = inttoptr v429 : *mut i32 + store v430, v427 + br block49(v401) + +block51: + v423 = cast v401 : u32 + v424 = add v423, 8 : u32 + v425 = inttoptr v424 : *mut i32 + v426 = load v425 : i32 + br block49(v426) + +block52(v485: i32): + v482 = cast v450 : u32 + v483 = add v482, 8 : u32 + v484 = inttoptr v483 : *mut i32 + store v484, v454 + v486 = cast v485 : u32 + v487 = add v486, 12 : u32 + v488 = inttoptr v487 : *mut i32 + store v488, v481 + v489 = cast v481 : u32 + v490 = add v489, 12 : u32 + v491 = inttoptr v490 : *mut i32 + store v491, v480 + v492 = cast v481 : u32 + v493 = add v492, 8 : u32 + v494 = inttoptr v493 : *mut i32 + store v494, v485 + br block31(v1574) + +block53: + v476 = bor v458, v465 : i32 + v477 = cast v390 : u32 + v478 = add v477, 408 : u32 + v479 = inttoptr v478 : *mut i32 + store v479, v476 + br block52(v450) + +block54: + v472 = cast v450 : u32 + v473 = add v472, 8 : u32 + v474 = inttoptr v473 : *mut i32 + v475 = load v474 : i32 + br block52(v475) + +block55: + br block2(v740, v525) + +block56(v567: i32, v600: i32): + v568 = const.i32 0 : i32 + v569 = eq v567, v568 : i1 + v570 = cast v569 : i32 + v571 = const.i32 0 : i32 + v572 = neq v570, v571 : i1 + condbr v572, block4(v600, v555, v602, v605, v745), block59 + +block57: + v539 = const.i32 1 : i32 + v543 = shl v539, v540 : i32 + v544 = call noname::_ZN8dlmalloc8dlmalloc9left_bits17hb6cbe146b8019d98E(v543) : i32 + v549 = band v544, v545 : i32 + v550 = const.i32 0 : i32 + v551 = eq v549, v550 : i1 + v552 = cast v551 : i32 + v553 = const.i32 0 : i32 + v554 = neq v552, v553 : i1 + condbr v554, block3(v555, v603, v745), block58 + +block58: + v558 = call noname::_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE(v549) : i32 + v559 = popcnt v558 : i32 + v560 = const.i32 2 : i32 + v561 = shl v559, v560 : i32 + v562 = add v555, v561 : i32 + v563 = cast v562 : u32 + v564 = inttoptr v563 : *mut i32 + v565 = load v564 : i32 + v566 = const.i32 0 : i32 + br block56(v565, v566) + +block59: + br block5(v567, v600, v603, v606, v614, v750) + +block60(v573: i32, v574: i32, v583: i32): + v575 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v573) : i32 + v576 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v575) : i32 + v578 = cast v576 : u32 + v579 = cast v577 : u32 + v580 = gte v578, v579 : i1 + v581 = cast v580 : i32 + v582 = sub v576, v577 : i32 + v584 = cast v582 : u32 + v585 = cast v583 : u32 + v586 = lt v584, v585 : i1 + v587 = cast v586 : i32 + v588 = band v581, v587 : i32 + v589 = const.i32 0 : i32 + v590 = neq v588, v589 : i1 + v591 = select v590, v573, v574 : i32 + v592 = const.i32 0 : i32 + v593 = neq v588, v592 : i1 + v594 = select v593, v582, v583 : i32 + v595 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE(v573) : i32 + v596 = const.i32 0 : i32 + v597 = neq v595, v596 : i1 + condbr v597, block60(v595, v591, v594), block62 + +block61: + br block4(v591, v616, v577, v594, v752) + +block62: + br block61 + +block63: + v617 = cast v613 : u32 + v618 = add v617, 416 : u32 + v619 = inttoptr v618 : *mut i32 + v620 = load v619 : i32 + v622 = cast v620 : u32 + v623 = cast v621 : u32 + v624 = lt v622, v623 : i1 + v625 = cast v624 : i32 + v626 = const.i32 0 : i32 + v627 = neq v625, v626 : i1 + condbr v627, block64, block65 + +block64: + v637 = call noname::_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(v607) : i32 + v639 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v637, v621) : i32 + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v613, v636) + v642 = const.i32 16 : i32 + v643 = const.i32 8 : i32 + v644 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v642, v643) : i32 + v645 = cast v628 : u32 + v646 = cast v644 : u32 + v647 = lt v645, v646 : i1 + v648 = cast v647 : i32 + v649 = const.i32 0 : i32 + v650 = neq v648, v649 : i1 + condbr v650, block68, block69 + +block65: + v629 = sub v620, v621 : i32 + v630 = cast v628 : u32 + v631 = cast v629 : u32 + v632 = gte v630, v631 : i1 + v633 = cast v632 : i32 + v634 = const.i32 0 : i32 + v635 = neq v633, v634 : i1 + condbr v635, block3(v613, v621, v749), block66 + +block66: + br block64 + +block67(v753: i32): + v706 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v637) : i32 + v707 = const.i32 0 : i32 + v708 = neq v706, v707 : i1 + condbr v708, block2(v753, v706), block75 + +block68: + v703 = add v641, v638 : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E(v637, v703) + br block67(v754) + +block69: + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v637, v638) + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v639, v641) + v651 = const.i32 256 : i32 + v652 = cast v641 : u32 + v653 = cast v651 : u32 + v654 = lt v652, v653 : i1 + v655 = cast v654 : i32 + v656 = const.i32 0 : i32 + v657 = neq v655, v656 : i1 + condbr v657, block70, block71 + +block70: + v658 = const.i32 -8 : i32 + v659 = band v641, v658 : i32 + v660 = add v640, v659 : i32 + v661 = const.i32 144 : i32 + v662 = add v660, v661 : i32 + v663 = cast v640 : u32 + v664 = add v663, 408 : u32 + v665 = inttoptr v664 : *mut i32 + v666 = load v665 : i32 + v667 = const.i32 1 : i32 + v668 = const.i32 3 : i32 + v669 = cast v641 : u32 + v670 = cast v668 : u32 + v671 = shr v669, v670 : u32 + v672 = cast v671 : i32 + v673 = shl v667, v672 : i32 + v674 = band v666, v673 : i32 + v675 = const.i32 0 : i32 + v676 = eq v674, v675 : i1 + v677 = cast v676 : i32 + v678 = const.i32 0 : i32 + v679 = neq v677, v678 : i1 + condbr v679, block73, block74 + +block71: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(v640, v639, v641) + br block67(v749) + +block72(v693: i32): + v690 = cast v662 : u32 + v691 = add v690, 8 : u32 + v692 = inttoptr v691 : *mut i32 + store v692, v639 + v694 = cast v693 : u32 + v695 = add v694, 12 : u32 + v696 = inttoptr v695 : *mut i32 + store v696, v689 + v697 = cast v689 : u32 + v698 = add v697, 12 : u32 + v699 = inttoptr v698 : *mut i32 + store v699, v688 + v700 = cast v689 : u32 + v701 = add v700, 8 : u32 + v702 = inttoptr v701 : *mut i32 + store v702, v693 + br block67(v754) + +block73: + v684 = bor v666, v673 : i32 + v685 = cast v640 : u32 + v686 = add v685, 408 : u32 + v687 = inttoptr v686 : *mut i32 + store v687, v684 + br block72(v662) + +block74: + v680 = cast v662 : u32 + v681 = add v680, 8 : u32 + v682 = inttoptr v681 : *mut i32 + v683 = load v682 : i32 + br block72(v683) + +block75: + br block3(v640, v638, v753) + +block76(v1531: i32, v1537: i32, v1582: i32, v1596: i32): + v1533 = cast v1531 : u32 + v1534 = add v1533, 420 : u32 + v1535 = inttoptr v1534 : *mut i32 + v1536 = load v1535 : i32 + v1547 = cast v1536 : u32 + v1548 = cast v1537 : u32 + v1549 = lte v1547, v1548 : i1 + v1550 = cast v1549 : i32 + v1551 = const.i32 0 : i32 + v1552 = neq v1550, v1551 : i1 + condbr v1552, block2(v1582, v1596), block144 + +block77(v1413: i32, v1418: i32): + v1414 = const.i32 4095 : i32 + v1415 = cast v1413 : u32 + v1416 = add v1415, 448 : u32 + v1417 = inttoptr v1416 : *mut i32 + store v1417, v1414 + v1419 = cast v1413 : u32 + v1420 = add v1419, 128 : u32 + v1421 = inttoptr v1420 : *mut i32 + store v1421, v1418 + v1422 = const.i32 140 : i32 + v1423 = add v1413, v1422 : i32 + v1426 = cast v1423 : u32 + v1427 = inttoptr v1426 : *mut i32 + store v1427, v788 + v1428 = const.i32 132 : i32 + v1429 = add v1413, v1428 : i32 + v1432 = cast v1429 : u32 + v1433 = inttoptr v1432 : *mut i32 + store v1433, v796 + v1434 = const.i32 0 : i32 + br block141(v1434) + +block78(v1352: i32, v1353: i32): + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(v1176, v1352, v1353) + v1354 = const.i32 256 : i32 + v1355 = cast v1352 : u32 + v1356 = cast v1354 : u32 + v1357 = lt v1355, v1356 : i1 + v1358 = cast v1357 : i32 + v1359 = const.i32 0 : i32 + v1360 = neq v1358, v1359 : i1 + condbr v1360, block136, block137 + +block79: + v1337 = const.i32 0 : i32 + v1338 = cast v709 : u32 + v1339 = add v1338, 424 : u32 + v1340 = inttoptr v1339 : *mut i32 + store v1340, v1337 + v1341 = cast v709 : u32 + v1342 = add v1341, 416 : u32 + v1343 = inttoptr v1342 : *mut i32 + v1344 = load v1343 : i32 + v1345 = const.i32 0 : i32 + v1346 = cast v709 : u32 + v1347 = add v1346, 416 : u32 + v1348 = inttoptr v1347 : *mut i32 + store v1348, v1345 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E(v1284, v1344) + v1349 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1284) : i32 + br block2(v739, v1349) + +block80: + v1325 = cast v915 : u32 + v1326 = add v1325, 424 : u32 + v1327 = inttoptr v1326 : *mut i32 + store v1327, v1176 + v1328 = cast v915 : u32 + v1329 = add v1328, 416 : u32 + v1330 = inttoptr v1329 : *mut i32 + v1331 = load v1330 : i32 + v1332 = add v1331, v1180 : i32 + v1333 = cast v915 : u32 + v1334 = add v1333, 416 : u32 + v1335 = inttoptr v1334 : *mut i32 + store v1335, v1332 + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v1176, v1332) + v1336 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1172) : i32 + br block2(v1576, v1336) + +block81: + v1308 = cast v839 : u32 + v1309 = add v1308, 4 : u32 + v1310 = inttoptr v1309 : *mut i32 + v1311 = load v1310 : i32 + v1312 = add v1311, v885 : i32 + v1313 = cast v839 : u32 + v1314 = add v1313, 4 : u32 + v1315 = inttoptr v1314 : *mut i32 + store v1315, v1312 + v1316 = cast v860 : u32 + v1317 = add v1316, 428 : u32 + v1318 = inttoptr v1317 : *mut i32 + v1319 = load v1318 : i32 + v1320 = cast v860 : u32 + v1321 = add v1320, 420 : u32 + v1322 = inttoptr v1321 : *mut i32 + v1323 = load v1322 : i32 + v1324 = add v1323, v885 : i32 + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8init_top17he4cefe3b36a3bd87E(v860, v1319, v1324) + br block76(v860, v1175, v1578, v1603) + +block82: + v1305 = cast v709 : u32 + v1306 = add v1305, 444 : u32 + v1307 = inttoptr v1306 : *mut i32 + store v1307, v779 + br block77(v1303, v1304) + +block83: + v1281 = cast v709 : u32 + v1282 = add v1281, 424 : u32 + v1283 = inttoptr v1282 : *mut i32 + v1284 = load v1283 : i32 + v1285 = sub v716, v717 : i32 + v1286 = const.i32 16 : i32 + v1287 = const.i32 8 : i32 + v1288 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1286, v1287) : i32 + v1289 = cast v1285 : u32 + v1290 = cast v1288 : u32 + v1291 = lt v1289, v1290 : i1 + v1292 = cast v1291 : i32 + v1293 = const.i32 0 : i32 + v1294 = neq v1292, v1293 : i1 + condbr v1294, block79, block135 + +block84: + v729 = cast v709 : u32 + v730 = add v729, 420 : u32 + v731 = inttoptr v730 : *mut i32 + v732 = load v731 : i32 + v733 = cast v732 : u32 + v734 = cast v717 : u32 + v735 = gt v733, v734 : i1 + v736 = cast v735 : i32 + v737 = const.i32 0 : i32 + v738 = neq v736, v737 : i1 + condbr v738, block85, block86 + +block85: + v1263 = sub v732, v717 : i32 + v1264 = cast v709 : u32 + v1265 = add v1264, 420 : u32 + v1266 = inttoptr v1265 : *mut i32 + store v1266, v1263 + v1267 = cast v709 : u32 + v1268 = add v1267, 428 : u32 + v1269 = inttoptr v1268 : *mut i32 + v1270 = load v1269 : i32 + v1271 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1270, v717) : i32 + v1272 = cast v709 : u32 + v1273 = add v1272, 428 : u32 + v1274 = inttoptr v1273 : *mut i32 + store v1274, v1271 + v1275 = const.i32 1 : i32 + v1276 = bor v1263, v1275 : i32 + v1277 = cast v1271 : u32 + v1278 = add v1277, 4 : u32 + v1279 = inttoptr v1278 : *mut i32 + store v1279, v1276 + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v1270, v717) + v1280 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1270) : i32 + br block2(v739, v1280) + +block86: + v756 = const.i32 4 : i32 + v757 = add v739, v756 : i32 + v758 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v759 = sub v717, v758 : i32 + v760 = const.i32 8 : i32 + v761 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v758, v760) : i32 + v762 = add v759, v761 : i32 + v763 = const.i32 20 : i32 + v764 = const.i32 8 : i32 + v765 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v763, v764) : i32 + v766 = add v762, v765 : i32 + v767 = const.i32 16 : i32 + v768 = const.i32 8 : i32 + v769 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v767, v768) : i32 + v770 = add v766, v769 : i32 + v771 = const.i32 8 : i32 + v772 = add v770, v771 : i32 + v773 = const.i32 65536 : i32 + v774 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v772, v773) : i32 + call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5alloc17hdbf1e2bcc01bc909E(v757, v709, v774) + v775 = const.i32 0 : i32 + v776 = cast v739 : u32 + v777 = add v776, 4 : u32 + v778 = inttoptr v777 : *mut i32 + v779 = load v778 : i32 + v780 = const.i32 0 : i32 + v781 = eq v779, v780 : i1 + v782 = cast v781 : i32 + v783 = const.i32 0 : i32 + v784 = neq v782, v783 : i1 + condbr v784, block2(v739, v775), block87 + +block87: + v785 = cast v739 : u32 + v786 = add v785, 12 : u32 + v787 = inttoptr v786 : *mut i32 + v788 = load v787 : i32 + v789 = cast v709 : u32 + v790 = add v789, 432 : u32 + v791 = inttoptr v790 : *mut i32 + v792 = load v791 : i32 + v793 = cast v739 : u32 + v794 = add v793, 8 : u32 + v795 = inttoptr v794 : *mut i32 + v796 = load v795 : i32 + v797 = add v792, v796 : i32 + v798 = cast v709 : u32 + v799 = add v798, 432 : u32 + v800 = inttoptr v799 : *mut i32 + store v800, v797 + v801 = cast v709 : u32 + v802 = add v801, 436 : u32 + v803 = inttoptr v802 : *mut i32 + v804 = load v803 : i32 + v805 = cast v804 : u32 + v806 = cast v797 : u32 + v807 = gt v805, v806 : i1 + v808 = cast v807 : i32 + v809 = const.i32 0 : i32 + v810 = neq v808, v809 : i1 + v811 = select v810, v804, v797 : i32 + v812 = cast v709 : u32 + v813 = add v812, 436 : u32 + v814 = inttoptr v813 : *mut i32 + store v814, v811 + v815 = cast v709 : u32 + v816 = add v815, 428 : u32 + v817 = inttoptr v816 : *mut i32 + v818 = load v817 : i32 + v819 = const.i32 0 : i32 + v820 = neq v818, v819 : i1 + condbr v820, block88, block89 + +block88: + v836 = const.i32 128 : i32 + v837 = add v709, v836 : i32 + br block94(v837) + +block89: + v821 = cast v709 : u32 + v822 = add v821, 444 : u32 + v823 = inttoptr v822 : *mut i32 + v824 = load v823 : i32 + v825 = const.i32 0 : i32 + v826 = eq v824, v825 : i1 + v827 = cast v826 : i32 + v828 = const.i32 0 : i32 + v829 = neq v827, v828 : i1 + condbr v829, block82, block90 + +block90: + v830 = cast v779 : u32 + v831 = cast v824 : u32 + v832 = lt v830, v831 : i1 + v833 = cast v832 : i32 + v834 = const.i32 0 : i32 + v835 = neq v833, v834 : i1 + condbr v835, block82, block91 + +block91: + br block77(v709, v779) + +block92(v884: i32, v887: i32, v1174: i32, v1577: i32, v1602: i32): + v869 = cast v868 : u32 + v870 = add v869, 444 : u32 + v871 = inttoptr v870 : *mut i32 + v872 = load v871 : i32 + v874 = cast v873 : u32 + v875 = cast v872 : u32 + v876 = gt v874, v875 : i1 + v877 = cast v876 : i32 + v878 = const.i32 0 : i32 + v879 = neq v877, v878 : i1 + v880 = select v879, v872, v838 : i32 + v881 = cast v860 : u32 + v882 = add v881, 444 : u32 + v883 = inttoptr v882 : *mut i32 + store v883, v880 + v886 = add v873, v884 : i32 + br block104(v887) + +block93: + v851 = call noname::_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E(v839) : i32 + v852 = const.i32 0 : i32 + v853 = neq v851, v852 : i1 + condbr v853, block92(v885, v888, v1175, v1578, v1603), block98 + +block94(v839: i32): + v840 = call noname::_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E(v839) : i32 + v841 = eq v838, v840 : i1 + v842 = cast v841 : i32 + v843 = const.i32 0 : i32 + v844 = neq v842, v843 : i1 + condbr v844, block93, block96 + +block95: + +block96: + v845 = cast v839 : u32 + v846 = add v845, 8 : u32 + v847 = inttoptr v846 : *mut i32 + v848 = load v847 : i32 + v849 = const.i32 0 : i32 + v850 = neq v848, v849 : i1 + condbr v850, block94(v848), block97 + +block97: + br block92(v796, v837, v717, v739, v775) + +block98: + v854 = call noname::_ZN8dlmalloc8dlmalloc7Segment9sys_flags17h224550055bf7775bE(v839) : i32 + v856 = neq v854, v788 : i1 + v857 = cast v856 : i32 + v858 = const.i32 0 : i32 + v859 = neq v857, v858 : i1 + condbr v859, block92(v885, v888, v1175, v1578, v1603), block99 + +block99: + v861 = cast v709 : u32 + v862 = add v861, 428 : u32 + v863 = inttoptr v862 : *mut i32 + v864 = load v863 : i32 + v865 = call noname::_ZN8dlmalloc8dlmalloc7Segment5holds17h8f6de4ee6718009bE(v839, v864) : i32 + v866 = const.i32 0 : i32 + v867 = neq v865, v866 : i1 + condbr v867, block81, block100 + +block100: + br block92(v885, v888, v1175, v1578, v1603) + +block101: + v1152 = cast v889 : u32 + v1153 = inttoptr v1152 : *mut i32 + v1154 = load v1153 : i32 + v1155 = cast v889 : u32 + v1156 = inttoptr v1155 : *mut i32 + store v1156, v991 + v1157 = cast v889 : u32 + v1158 = add v1157, 4 : u32 + v1159 = inttoptr v1158 : *mut i32 + v1160 = load v1159 : i32 + v1161 = add v1160, v1004 : i32 + v1162 = cast v889 : u32 + v1163 = add v1162, 4 : u32 + v1164 = inttoptr v1163 : *mut i32 + store v1164, v1161 + v1165 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v991) : i32 + v1166 = const.i32 8 : i32 + v1167 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1165, v1166) : i32 + v1168 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1154) : i32 + v1169 = const.i32 8 : i32 + v1170 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1168, v1169) : i32 + v1171 = sub v1167, v1165 : i32 + v1172 = add v991, v1171 : i32 + v1176 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1172, v1174) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v1172, v1173) + v1177 = sub v1170, v1168 : i32 + v1178 = add v1154, v1177 : i32 + v1179 = add v1173, v1172 : i32 + v1180 = sub v1178, v1179 : i32 + v1181 = cast v915 : u32 + v1182 = add v1181, 428 : u32 + v1183 = inttoptr v1182 : *mut i32 + v1184 = load v1183 : i32 + v1185 = eq v1178, v1184 : i1 + v1186 = cast v1185 : i32 + v1187 = const.i32 0 : i32 + v1188 = neq v1186, v1187 : i1 + condbr v1188, block126, block127 + +block102(v914: i32, v920: i32, v990: i32, v1003: i32, v1600: i32): + v916 = cast v914 : u32 + v917 = add v916, 428 : u32 + v918 = inttoptr v917 : *mut i32 + v919 = load v918 : i32 + br block111(v920) + +block103: + v904 = call noname::_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E(v889) : i32 + v905 = const.i32 0 : i32 + v906 = neq v904, v905 : i1 + condbr v906, block102(v915, v921, v991, v1004, v1601), block108 + +block104(v889: i32): + v890 = cast v889 : u32 + v891 = inttoptr v890 : *mut i32 + v892 = load v891 : i32 + v894 = eq v892, v893 : i1 + v895 = cast v894 : i32 + v896 = const.i32 0 : i32 + v897 = neq v895, v896 : i1 + condbr v897, block103, block106 + +block105: + +block106: + v898 = cast v889 : u32 + v899 = add v898, 8 : u32 + v900 = inttoptr v899 : *mut i32 + v901 = load v900 : i32 + v902 = const.i32 0 : i32 + v903 = neq v901, v902 : i1 + condbr v903, block104(v901), block107 + +block107: + br block102(v868, v887, v873, v884, v1602) + +block108: + v907 = call noname::_ZN8dlmalloc8dlmalloc7Segment9sys_flags17h224550055bf7775bE(v889) : i32 + v910 = eq v907, v855 : i1 + v911 = cast v910 : i32 + v912 = const.i32 0 : i32 + v913 = neq v911, v912 : i1 + condbr v913, block101, block109 + +block109: + br block102(v915, v921, v991, v1004, v1601) + +block110(v949: i32, v957: i32, v985: i32, v988: i32, v1001: i32, v1039: i32, v1058: i32, v1539: i32, v1584: i32, v1598: i32): + v950 = call noname::_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E(v949) : i32 + v951 = const.i32 20 : i32 + v952 = const.i32 8 : i32 + v953 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v951, v952) : i32 + v954 = sub v950, v953 : i32 + v955 = const.i32 -23 : i32 + v956 = add v954, v955 : i32 + v958 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v956) : i32 + v959 = const.i32 8 : i32 + v960 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v958, v959) : i32 + v961 = sub v960, v958 : i32 + v962 = add v956, v961 : i32 + v963 = const.i32 16 : i32 + v964 = const.i32 8 : i32 + v965 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v963, v964) : i32 + v966 = add v957, v965 : i32 + v967 = cast v962 : u32 + v968 = cast v966 : u32 + v969 = lt v967, v968 : i1 + v970 = cast v969 : i32 + v971 = const.i32 0 : i32 + v972 = neq v970, v971 : i1 + v973 = select v972, v957, v962 : i32 + v974 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v973) : i32 + v975 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v973, v953) : i32 + v976 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v977 = const.i32 8 : i32 + v978 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v976, v977) : i32 + v979 = const.i32 20 : i32 + v980 = const.i32 8 : i32 + v981 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v979, v980) : i32 + v982 = const.i32 16 : i32 + v983 = const.i32 8 : i32 + v984 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v982, v983) : i32 + v993 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v988) : i32 + v994 = const.i32 8 : i32 + v995 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v993, v994) : i32 + v996 = sub v995, v993 : i32 + v997 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v988, v996) : i32 + v998 = cast v985 : u32 + v999 = add v998, 428 : u32 + v1000 = inttoptr v999 : *mut i32 + store v1000, v997 + v1006 = add v976, v1001 : i32 + v1007 = add v978, v981 : i32 + v1008 = add v984, v1007 : i32 + v1009 = add v1008, v996 : i32 + v1010 = sub v1006, v1009 : i32 + v1011 = cast v985 : u32 + v1012 = add v1011, 420 : u32 + v1013 = inttoptr v1012 : *mut i32 + store v1013, v1010 + v1014 = const.i32 1 : i32 + v1015 = bor v1010, v1014 : i32 + v1016 = cast v997 : u32 + v1017 = add v1016, 4 : u32 + v1018 = inttoptr v1017 : *mut i32 + store v1018, v1015 + v1019 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v1020 = const.i32 8 : i32 + v1021 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1019, v1020) : i32 + v1022 = const.i32 20 : i32 + v1023 = const.i32 8 : i32 + v1024 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1022, v1023) : i32 + v1025 = const.i32 16 : i32 + v1026 = const.i32 8 : i32 + v1027 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1025, v1026) : i32 + v1028 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v997, v1010) : i32 + v1029 = const.i32 2097152 : i32 + v1030 = cast v985 : u32 + v1031 = add v1030, 440 : u32 + v1032 = inttoptr v1031 : *mut i32 + store v1032, v1029 + v1033 = sub v1021, v1019 : i32 + v1034 = add v1024, v1033 : i32 + v1035 = add v1027, v1034 : i32 + v1036 = cast v1028 : u32 + v1037 = add v1036, 4 : u32 + v1038 = inttoptr v1037 : *mut i32 + store v1038, v1035 + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v973, v953) + v1042 = cast v1039 : u32 + v1043 = inttoptr v1042 : *mut i64 + v1044 = load v1043 : i64 + v1045 = const.i32 8 : i32 + v1046 = add v974, v1045 : i32 + v1047 = const.i32 8 : i32 + v1048 = add v1039, v1047 : i32 + v1049 = cast v1048 : u32 + v1050 = inttoptr v1049 : *mut i64 + v1051 = load v1050 : i64 + v1052 = cast v1046 : u32 + v1053 = inttoptr v1052 : *mut i64 + store v1053, v1051 + v1054 = cast v974 : u32 + v1055 = inttoptr v1054 : *mut i64 + store v1055, v1044 + v1056 = const.i32 140 : i32 + v1057 = add v985, v1056 : i32 + v1062 = cast v1057 : u32 + v1063 = inttoptr v1062 : *mut i32 + store v1063, v1058 + v1064 = const.i32 132 : i32 + v1065 = add v985, v1064 : i32 + v1066 = cast v1065 : u32 + v1067 = inttoptr v1066 : *mut i32 + store v1067, v1001 + v1068 = cast v985 : u32 + v1069 = add v1068, 128 : u32 + v1070 = inttoptr v1069 : *mut i32 + store v1070, v988 + v1071 = const.i32 136 : i32 + v1072 = add v985, v1071 : i32 + v1073 = cast v1072 : u32 + v1074 = inttoptr v1073 : *mut i32 + store v1074, v974 + br block117(v975) + +block111(v922: i32): + v923 = cast v922 : u32 + v924 = inttoptr v923 : *mut i32 + v925 = load v924 : i32 + v927 = cast v925 : u32 + v928 = cast v926 : u32 + v929 = gt v927, v928 : i1 + v930 = cast v929 : i32 + v931 = const.i32 0 : i32 + v932 = neq v930, v931 : i1 + condbr v932, block113, block114 + +block112: + v948 = const.i32 0 : i32 + br block110(v948, v947, v987, v992, v1005, v1041, v1061, v1542, v1587, v1604) + +block113: + v941 = cast v922 : u32 + v942 = add v941, 8 : u32 + v943 = inttoptr v942 : *mut i32 + v944 = load v943 : i32 + v945 = const.i32 0 : i32 + v946 = neq v944, v945 : i1 + condbr v946, block111(v944), block116 + +block114: + v933 = call noname::_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E(v922) : i32 + v934 = cast v933 : u32 + v935 = cast v926 : u32 + v936 = gt v934, v935 : i1 + v937 = cast v936 : i32 + v938 = const.i32 0 : i32 + v939 = neq v937, v938 : i1 + condbr v939, block110(v922, v926, v914, v990, v1003, v920, v908, v1173, v1576, v1600), block115 + +block115: + br block113 + +block116: + br block112 + +block117(v1075: i32): + v1076 = const.i32 4 : i32 + v1077 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1075, v1076) : i32 + v1078 = call noname::_ZN8dlmalloc8dlmalloc5Chunk14fencepost_head17he07aaa52f3b50dfdE() : i32 + v1079 = cast v1075 : u32 + v1080 = add v1079, 4 : u32 + v1081 = inttoptr v1080 : *mut i32 + store v1081, v1078 + v1082 = const.i32 4 : i32 + v1083 = add v1077, v1082 : i32 + v1085 = cast v1083 : u32 + v1086 = cast v1084 : u32 + v1087 = lt v1085, v1086 : i1 + v1088 = cast v1087 : i32 + v1089 = const.i32 0 : i32 + v1090 = neq v1088, v1089 : i1 + condbr v1090, block117(v1077), block119 + +block118: + v1093 = eq v973, v957 : i1 + v1094 = cast v1093 : i32 + v1095 = const.i32 0 : i32 + v1096 = neq v1094, v1095 : i1 + condbr v1096, block76(v1106, v1539, v1584, v1598), block120 + +block119: + br block118 + +block120: + v1097 = sub v1091, v1092 : i32 + v1098 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1092, v1097) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(v1092, v1097, v1098) + v1099 = const.i32 256 : i32 + v1100 = cast v1097 : u32 + v1101 = cast v1099 : u32 + v1102 = lt v1100, v1101 : i1 + v1103 = cast v1102 : i32 + v1104 = const.i32 0 : i32 + v1105 = neq v1103, v1104 : i1 + condbr v1105, block121, block122 + +block121: + v1107 = const.i32 -8 : i32 + v1108 = band v1097, v1107 : i32 + v1109 = add v1106, v1108 : i32 + v1110 = const.i32 144 : i32 + v1111 = add v1109, v1110 : i32 + v1112 = cast v1106 : u32 + v1113 = add v1112, 408 : u32 + v1114 = inttoptr v1113 : *mut i32 + v1115 = load v1114 : i32 + v1116 = const.i32 1 : i32 + v1117 = const.i32 3 : i32 + v1118 = cast v1097 : u32 + v1119 = cast v1117 : u32 + v1120 = shr v1118, v1119 : u32 + v1121 = cast v1120 : i32 + v1122 = shl v1116, v1121 : i32 + v1123 = band v1115, v1122 : i32 + v1124 = const.i32 0 : i32 + v1125 = eq v1123, v1124 : i1 + v1126 = cast v1125 : i32 + v1127 = const.i32 0 : i32 + v1128 = neq v1126, v1127 : i1 + condbr v1128, block124, block125 + +block122: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(v985, v1092, v1097) + br block76(v1106, v1538, v1583, v1597) + +block123(v1142: i32): + v1139 = cast v1111 : u32 + v1140 = add v1139, 8 : u32 + v1141 = inttoptr v1140 : *mut i32 + store v1141, v1092 + v1143 = cast v1142 : u32 + v1144 = add v1143, 12 : u32 + v1145 = inttoptr v1144 : *mut i32 + store v1145, v1138 + v1146 = cast v1138 : u32 + v1147 = add v1146, 12 : u32 + v1148 = inttoptr v1147 : *mut i32 + store v1148, v1137 + v1149 = cast v1138 : u32 + v1150 = add v1149, 8 : u32 + v1151 = inttoptr v1150 : *mut i32 + store v1151, v1142 + br block76(v1106, v1538, v1583, v1597) + +block124: + v1133 = bor v1115, v1122 : i32 + v1134 = cast v1106 : u32 + v1135 = add v1134, 408 : u32 + v1136 = inttoptr v1135 : *mut i32 + store v1136, v1133 + br block123(v1111) + +block125: + v1129 = cast v1111 : u32 + v1130 = add v1129, 8 : u32 + v1131 = inttoptr v1130 : *mut i32 + v1132 = load v1131 : i32 + br block123(v1132) + +block126: + v1246 = cast v915 : u32 + v1247 = add v1246, 428 : u32 + v1248 = inttoptr v1247 : *mut i32 + store v1248, v1176 + v1249 = cast v915 : u32 + v1250 = add v1249, 420 : u32 + v1251 = inttoptr v1250 : *mut i32 + v1252 = load v1251 : i32 + v1253 = add v1252, v1180 : i32 + v1254 = cast v915 : u32 + v1255 = add v1254, 420 : u32 + v1256 = inttoptr v1255 : *mut i32 + store v1256, v1253 + v1257 = const.i32 1 : i32 + v1258 = bor v1253, v1257 : i32 + v1259 = cast v1176 : u32 + v1260 = add v1259, 4 : u32 + v1261 = inttoptr v1260 : *mut i32 + store v1261, v1258 + v1262 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1172) : i32 + br block2(v1577, v1262) + +block127: + v1189 = cast v915 : u32 + v1190 = add v1189, 424 : u32 + v1191 = inttoptr v1190 : *mut i32 + v1192 = load v1191 : i32 + v1193 = eq v1178, v1192 : i1 + v1194 = cast v1193 : i32 + v1195 = const.i32 0 : i32 + v1196 = neq v1194, v1195 : i1 + condbr v1196, block80, block128 + +block128: + v1197 = call noname::_ZN8dlmalloc8dlmalloc5Chunk5inuse17h2d327e4c36b84dfeE(v1178) : i32 + v1198 = const.i32 0 : i32 + v1199 = neq v1197, v1198 : i1 + condbr v1199, block78(v1180, v1178), block129 + +block129: + v1200 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v1178) : i32 + v1201 = const.i32 256 : i32 + v1202 = cast v1200 : u32 + v1203 = cast v1201 : u32 + v1204 = lt v1202, v1203 : i1 + v1205 = cast v1204 : i32 + v1206 = const.i32 0 : i32 + v1207 = neq v1205, v1206 : i1 + condbr v1207, block131, block132 + +block130: + v1243 = add v1200, v1180 : i32 + v1245 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1178, v1241) : i32 + br block78(v1243, v1245) + +block131: + v1208 = cast v1178 : u32 + v1209 = add v1208, 12 : u32 + v1210 = inttoptr v1209 : *mut i32 + v1211 = load v1210 : i32 + v1212 = cast v1178 : u32 + v1213 = add v1212, 8 : u32 + v1214 = inttoptr v1213 : *mut i32 + v1215 = load v1214 : i32 + v1216 = eq v1211, v1215 : i1 + v1217 = cast v1216 : i32 + v1218 = const.i32 0 : i32 + v1219 = neq v1217, v1218 : i1 + condbr v1219, block133, block134 + +block132: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v915, v1178) + br block130 + +block133: + v1226 = cast v915 : u32 + v1227 = add v1226, 408 : u32 + v1228 = inttoptr v1227 : *mut i32 + v1229 = load v1228 : i32 + v1230 = const.i32 -2 : i32 + v1231 = const.i32 3 : i32 + v1232 = cast v1200 : u32 + v1233 = cast v1231 : u32 + v1234 = shr v1232, v1233 : u32 + v1235 = cast v1234 : i32 + v1236 = shl v1230, v1235 : i32 + v1237 = band v1229, v1236 : i32 + v1238 = cast v915 : u32 + v1239 = add v1238, 408 : u32 + v1240 = inttoptr v1239 : *mut i32 + store v1240, v1237 + br block130 + +block134: + v1220 = cast v1215 : u32 + v1221 = add v1220, 12 : u32 + v1222 = inttoptr v1221 : *mut i32 + store v1222, v1211 + v1223 = cast v1211 : u32 + v1224 = add v1223, 8 : u32 + v1225 = inttoptr v1224 : *mut i32 + store v1225, v1215 + br block130 + +block135: + v1295 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1284, v717) : i32 + v1296 = cast v709 : u32 + v1297 = add v1296, 416 : u32 + v1298 = inttoptr v1297 : *mut i32 + store v1298, v1285 + v1299 = cast v709 : u32 + v1300 = add v1299, 424 : u32 + v1301 = inttoptr v1300 : *mut i32 + store v1301, v1295 + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v1295, v1285) + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v1284, v717) + v1302 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1284) : i32 + br block2(v739, v1302) + +block136: + v1366 = const.i32 -8 : i32 + v1367 = band v1352, v1366 : i32 + v1368 = add v1361, v1367 : i32 + v1369 = const.i32 144 : i32 + v1370 = add v1368, v1369 : i32 + v1371 = cast v1361 : u32 + v1372 = add v1371, 408 : u32 + v1373 = inttoptr v1372 : *mut i32 + v1374 = load v1373 : i32 + v1375 = const.i32 1 : i32 + v1376 = const.i32 3 : i32 + v1377 = cast v1352 : u32 + v1378 = cast v1376 : u32 + v1379 = shr v1377, v1378 : u32 + v1380 = cast v1379 : i32 + v1381 = shl v1375, v1380 : i32 + v1382 = band v1374, v1381 : i32 + v1383 = const.i32 0 : i32 + v1384 = eq v1382, v1383 : i1 + v1385 = cast v1384 : i32 + v1386 = const.i32 0 : i32 + v1387 = neq v1385, v1386 : i1 + condbr v1387, block139, block140 + +block137: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E(v915, v1350, v1352) + v1365 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1172) : i32 + br block2(v1576, v1365) + +block138(v1401: i32): + v1398 = cast v1370 : u32 + v1399 = add v1398, 8 : u32 + v1400 = inttoptr v1399 : *mut i32 + store v1400, v1350 + v1402 = cast v1401 : u32 + v1403 = add v1402, 12 : u32 + v1404 = inttoptr v1403 : *mut i32 + store v1404, v1397 + v1405 = cast v1397 : u32 + v1406 = add v1405, 12 : u32 + v1407 = inttoptr v1406 : *mut i32 + store v1407, v1396 + v1408 = cast v1397 : u32 + v1409 = add v1408, 8 : u32 + v1410 = inttoptr v1409 : *mut i32 + store v1410, v1401 + v1412 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1363) : i32 + br block2(v1579, v1412) + +block139: + v1392 = bor v1374, v1381 : i32 + v1393 = cast v1361 : u32 + v1394 = add v1393, 408 : u32 + v1395 = inttoptr v1394 : *mut i32 + store v1395, v1392 + br block138(v1370) + +block140: + v1388 = cast v1370 : u32 + v1389 = add v1388, 8 : u32 + v1390 = inttoptr v1389 : *mut i32 + v1391 = load v1390 : i32 + br block138(v1391) + +block141(v1436: i32): + v1437 = add v1435, v1436 : i32 + v1438 = const.i32 164 : i32 + v1439 = add v1437, v1438 : i32 + v1440 = const.i32 152 : i32 + v1441 = add v1437, v1440 : i32 + v1442 = cast v1439 : u32 + v1443 = inttoptr v1442 : *mut i32 + store v1443, v1441 + v1444 = const.i32 144 : i32 + v1445 = add v1437, v1444 : i32 + v1446 = cast v1441 : u32 + v1447 = inttoptr v1446 : *mut i32 + store v1447, v1445 + v1448 = const.i32 156 : i32 + v1449 = add v1437, v1448 : i32 + v1450 = cast v1449 : u32 + v1451 = inttoptr v1450 : *mut i32 + store v1451, v1445 + v1452 = const.i32 172 : i32 + v1453 = add v1437, v1452 : i32 + v1454 = const.i32 160 : i32 + v1455 = add v1437, v1454 : i32 + v1456 = cast v1453 : u32 + v1457 = inttoptr v1456 : *mut i32 + store v1457, v1455 + v1458 = cast v1455 : u32 + v1459 = inttoptr v1458 : *mut i32 + store v1459, v1441 + v1460 = const.i32 180 : i32 + v1461 = add v1437, v1460 : i32 + v1462 = const.i32 168 : i32 + v1463 = add v1437, v1462 : i32 + v1464 = cast v1461 : u32 + v1465 = inttoptr v1464 : *mut i32 + store v1465, v1463 + v1466 = cast v1463 : u32 + v1467 = inttoptr v1466 : *mut i32 + store v1467, v1455 + v1468 = const.i32 176 : i32 + v1469 = add v1437, v1468 : i32 + v1470 = cast v1469 : u32 + v1471 = inttoptr v1470 : *mut i32 + store v1471, v1463 + v1472 = const.i32 32 : i32 + v1473 = add v1436, v1472 : i32 + v1474 = const.i32 256 : i32 + v1475 = neq v1473, v1474 : i1 + v1476 = cast v1475 : i32 + v1477 = const.i32 0 : i32 + v1478 = neq v1476, v1477 : i1 + condbr v1478, block141(v1473), block143 + +block142: + v1479 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v1480 = const.i32 8 : i32 + v1481 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1479, v1480) : i32 + v1482 = const.i32 20 : i32 + v1483 = const.i32 8 : i32 + v1484 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1482, v1483) : i32 + v1485 = const.i32 16 : i32 + v1486 = const.i32 8 : i32 + v1487 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1485, v1486) : i32 + v1489 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1488) : i32 + v1490 = const.i32 8 : i32 + v1491 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1489, v1490) : i32 + v1492 = sub v1491, v1489 : i32 + v1493 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1418, v1492) : i32 + v1494 = cast v1435 : u32 + v1495 = add v1494, 428 : u32 + v1496 = inttoptr v1495 : *mut i32 + store v1496, v1493 + v1498 = add v1479, v1430 : i32 + v1499 = add v1481, v1484 : i32 + v1500 = add v1487, v1499 : i32 + v1501 = add v1500, v1492 : i32 + v1502 = sub v1498, v1501 : i32 + v1503 = cast v1435 : u32 + v1504 = add v1503, 420 : u32 + v1505 = inttoptr v1504 : *mut i32 + store v1505, v1502 + v1506 = const.i32 1 : i32 + v1507 = bor v1502, v1506 : i32 + v1508 = cast v1493 : u32 + v1509 = add v1508, 4 : u32 + v1510 = inttoptr v1509 : *mut i32 + store v1510, v1507 + v1511 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v1512 = const.i32 8 : i32 + v1513 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1511, v1512) : i32 + v1514 = const.i32 20 : i32 + v1515 = const.i32 8 : i32 + v1516 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1514, v1515) : i32 + v1517 = const.i32 16 : i32 + v1518 = const.i32 8 : i32 + v1519 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v1517, v1518) : i32 + v1520 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1493, v1502) : i32 + v1521 = const.i32 2097152 : i32 + v1522 = cast v1435 : u32 + v1523 = add v1522, 440 : u32 + v1524 = inttoptr v1523 : *mut i32 + store v1524, v1521 + v1525 = sub v1513, v1511 : i32 + v1526 = add v1516, v1525 : i32 + v1527 = add v1519, v1526 : i32 + v1528 = cast v1520 : u32 + v1529 = add v1528, 4 : u32 + v1530 = inttoptr v1529 : *mut i32 + store v1530, v1527 + br block76(v1435, v717, v739, v775) + +block143: + br block142 + +block144: + v1553 = sub v1536, v1537 : i32 + v1554 = cast v1531 : u32 + v1555 = add v1554, 420 : u32 + v1556 = inttoptr v1555 : *mut i32 + store v1556, v1553 + v1557 = cast v1531 : u32 + v1558 = add v1557, 428 : u32 + v1559 = inttoptr v1558 : *mut i32 + v1560 = load v1559 : i32 + v1561 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1560, v1537) : i32 + v1562 = cast v1531 : u32 + v1563 = add v1562, 428 : u32 + v1564 = inttoptr v1563 : *mut i32 + store v1564, v1561 + v1565 = const.i32 1 : i32 + v1566 = bor v1553, v1565 : i32 + v1567 = cast v1561 : u32 + v1568 = add v1567, 4 : u32 + v1569 = inttoptr v1568 : *mut i32 + store v1569, v1566 + call noname::_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(v1560, v1537) + v1570 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1560) : i32 + br block2(v1582, v1570) +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8init_top17he4cefe3b36a3bd87E(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = const.i32 0 : i32 + v4 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v1) : i32 + v5 = const.i32 8 : i32 + v6 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v4, v5) : i32 + v7 = sub v6, v4 : i32 + v8 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v1, v7) : i32 + v9 = sub v2, v7 : i32 + v10 = cast v0 : u32 + v11 = add v10, 420 : u32 + v12 = inttoptr v11 : *mut i32 + store v12, v9 + v13 = cast v0 : u32 + v14 = add v13, 428 : u32 + v15 = inttoptr v14 : *mut i32 + store v15, v8 + v16 = const.i32 1 : i32 + v17 = bor v9, v16 : i32 + v18 = cast v8 : u32 + v19 = add v18, 4 : u32 + v20 = inttoptr v19 : *mut i32 + store v20, v17 + v21 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v22 = const.i32 8 : i32 + v23 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v21, v22) : i32 + v24 = const.i32 20 : i32 + v25 = const.i32 8 : i32 + v26 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v24, v25) : i32 + v27 = const.i32 16 : i32 + v28 = const.i32 8 : i32 + v29 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v27, v28) : i32 + v30 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v8, v9) : i32 + v31 = const.i32 2097152 : i32 + v32 = cast v0 : u32 + v33 = add v32, 440 : u32 + v34 = inttoptr v33 : *mut i32 + store v34, v31 + v35 = sub v23, v21 : i32 + v36 = add v26, v35 : i32 + v37 = add v29, v36 : i32 + v38 = cast v30 : u32 + v39 = add v38, 4 : u32 + v40 = inttoptr v39 : *mut i32 + store v40, v37 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8memalign17he8794c5d1cb954f9E(i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32): + v4 = const.i32 0 : i32 + v5 = const.i32 16 : i32 + v6 = const.i32 8 : i32 + v7 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v5, v6) : i32 + v8 = cast v7 : u32 + v9 = cast v1 : u32 + v10 = lte v8, v9 : i1 + v11 = cast v10 : i32 + v12 = const.i32 0 : i32 + v13 = neq v11, v12 : i1 + condbr v13, block2(v1), block3 + +block1(v3: i32): + ret v3 + +block2(v50: i32): + v17 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v18 = const.i32 8 : i32 + v19 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v17, v18) : i32 + v20 = const.i32 20 : i32 + v21 = const.i32 8 : i32 + v22 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v20, v21) : i32 + v23 = const.i32 16 : i32 + v24 = const.i32 8 : i32 + v25 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v23, v24) : i32 + v26 = const.i32 0 : i32 + v27 = const.i32 0 : i32 + v28 = const.i32 16 : i32 + v29 = const.i32 8 : i32 + v30 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v28, v29) : i32 + v31 = const.i32 2 : i32 + v32 = shl v30, v31 : i32 + v33 = sub v27, v32 : i32 + v34 = add v19, v22 : i32 + v35 = add v25, v34 : i32 + v36 = sub v17, v35 : i32 + v37 = const.i32 -65544 : i32 + v38 = add v36, v37 : i32 + v39 = const.i32 -9 : i32 + v40 = band v38, v39 : i32 + v41 = const.i32 -3 : i32 + v42 = add v40, v41 : i32 + v43 = cast v33 : u32 + v44 = cast v42 : u32 + v45 = lt v43, v44 : i1 + v46 = cast v45 : i32 + v47 = const.i32 0 : i32 + v48 = neq v46, v47 : i1 + v49 = select v48, v33, v42 : i32 + v51 = sub v49, v50 : i32 + v53 = cast v51 : u32 + v54 = cast v2 : u32 + v55 = lte v53, v54 : i1 + v56 = cast v55 : i32 + v57 = const.i32 0 : i32 + v58 = neq v56, v57 : i1 + condbr v58, block4(v26), block5 + +block3: + v14 = const.i32 16 : i32 + v15 = const.i32 8 : i32 + v16 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v14, v15) : i32 + br block2(v16) + +block4(v151: i32): + br block1(v151) + +block5: + v60 = const.i32 16 : i32 + v61 = const.i32 4 : i32 + v62 = add v52, v61 : i32 + v63 = const.i32 16 : i32 + v64 = const.i32 8 : i32 + v65 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v63, v64) : i32 + v66 = const.i32 -5 : i32 + v67 = add v65, v66 : i32 + v68 = cast v67 : u32 + v69 = cast v52 : u32 + v70 = gt v68, v69 : i1 + v71 = cast v70 : i32 + v72 = const.i32 0 : i32 + v73 = neq v71, v72 : i1 + v74 = select v73, v60, v62 : i32 + v75 = const.i32 8 : i32 + v76 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v74, v75) : i32 + v77 = add v50, v76 : i32 + v78 = const.i32 16 : i32 + v79 = const.i32 8 : i32 + v80 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v78, v79) : i32 + v81 = add v77, v80 : i32 + v82 = const.i32 -4 : i32 + v83 = add v81, v82 : i32 + v84 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE(v0, v83) : i32 + v85 = const.i32 0 : i32 + v86 = eq v84, v85 : i1 + v87 = cast v86 : i32 + v88 = const.i32 0 : i32 + v89 = neq v87, v88 : i1 + condbr v89, block4(v26), block6 + +block6: + v90 = call noname::_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E(v84) : i32 + v91 = const.i32 -1 : i32 + v92 = add v50, v91 : i32 + v93 = band v92, v84 : i32 + v94 = const.i32 0 : i32 + v95 = neq v93, v94 : i1 + condbr v95, block8, block9 + +block7(v129: i32): + v130 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v129) : i32 + v131 = const.i32 0 : i32 + v132 = neq v130, v131 : i1 + condbr v132, block12, block13 + +block8: + v96 = add v92, v84 : i32 + v97 = const.i32 0 : i32 + v98 = sub v97, v50 : i32 + v99 = band v96, v98 : i32 + v100 = call noname::_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E(v99) : i32 + v101 = const.i32 16 : i32 + v102 = const.i32 8 : i32 + v103 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v101, v102) : i32 + v104 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v90) : i32 + v105 = const.i32 0 : i32 + v106 = sub v100, v90 : i32 + v107 = cast v106 : u32 + v108 = cast v103 : u32 + v109 = gt v107, v108 : i1 + v110 = cast v109 : i32 + v111 = const.i32 0 : i32 + v112 = neq v110, v111 : i1 + v113 = select v112, v105, v50 : i32 + v114 = add v100, v113 : i32 + v115 = sub v114, v90 : i32 + v116 = sub v104, v115 : i32 + v117 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v90) : i32 + v118 = const.i32 0 : i32 + v119 = neq v117, v118 : i1 + condbr v119, block10, block11 + +block9: + br block7(v90) + +block10: + v120 = cast v90 : u32 + v121 = inttoptr v120 : *mut i32 + v122 = load v121 : i32 + v123 = cast v114 : u32 + v124 = add v123, 4 : u32 + v125 = inttoptr v124 : *mut i32 + store v125, v116 + v126 = add v122, v115 : i32 + v127 = cast v114 : u32 + v128 = inttoptr v127 : *mut i32 + store v128, v126 + br block7(v114) + +block11: + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v114, v116) + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v90, v115) + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE(v59, v90, v115) + br block7(v114) + +block12: + v149 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v129) : i32 + v150 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v148) : i32 + br block4(v149) + +block13: + v133 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v129) : i32 + v134 = const.i32 16 : i32 + v135 = const.i32 8 : i32 + v136 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v134, v135) : i32 + v138 = add v136, v76 : i32 + v139 = cast v133 : u32 + v140 = cast v138 : u32 + v141 = lte v139, v140 : i1 + v142 = cast v141 : i32 + v143 = const.i32 0 : i32 + v144 = neq v142, v143 : i1 + condbr v144, block12, block14 + +block14: + v145 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v129, v137) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v129, v137) + v146 = sub v133, v137 : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v145, v146) + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE(v59, v145, v146) + br block12 +} + +pub fn __main() -> i32 { +block0: + v1 = call noname::vec_alloc() : i32 + br block1(v1) + +block1(v0: i32): + ret v0 +} + +pub fn __rust_alloc(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = const.i32 0 : i32 + v4 = global.load (@__stack_pointer) as *mut i8 : i32 + v5 = const.i32 16 : i32 + v6 = sub v4, v5 : i32 + v7 = global.symbol @__stack_pointer : *mut i32 + store v7, v6 + call noname::_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E() + v8 = const.i32 15 : i32 + v9 = add v6, v8 : i32 + v10 = call noname::_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E(v9) : i32 + v11 = const.i32 9 : i32 + v12 = cast v1 : u32 + v13 = cast v11 : u32 + v14 = lt v12, v13 : i1 + v15 = cast v14 : i32 + v16 = const.i32 0 : i32 + v17 = neq v15, v16 : i1 + condbr v17, block3, block4 + +block1(v2: i32): + ret v2 + +block2(v26: i32): + v21 = const.i32 15 : i32 + v22 = add v6, v21 : i32 + call noname::_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E(v22) + v23 = const.i32 16 : i32 + v24 = add v20, v23 : i32 + v25 = global.symbol @__stack_pointer : *mut i32 + store v25, v24 + br block1(v26) + +block3: + v19 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE(v10, v0) : i32 + br block2(v19) + +block4: + v18 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8memalign17he8794c5d1cb954f9E(v10, v1, v0) : i32 + br block2(v18) +} + +pub fn __rust_dealloc(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = const.i32 0 : i32 + v4 = global.load (@__stack_pointer) as *mut i8 : i32 + v5 = const.i32 16 : i32 + v6 = sub v4, v5 : i32 + v7 = global.symbol @__stack_pointer : *mut i32 + store v7, v6 + call noname::_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E() + v8 = const.i32 15 : i32 + v9 = add v6, v8 : i32 + v10 = call noname::_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E(v9) : i32 + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE(v10, v0) + v11 = const.i32 15 : i32 + v12 = add v6, v11 : i32 + call noname::_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E(v12) + v13 = const.i32 16 : i32 + v14 = add v6, v13 : i32 + v15 = global.symbol @__stack_pointer : *mut i32 + store v15, v14 + br block1 + +block1: + ret +} + +pub fn __rust_realloc(i32, i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32, v3: i32): + v5 = const.i32 0 : i32 + v6 = global.load (@__stack_pointer) as *mut i8 : i32 + v7 = const.i32 16 : i32 + v8 = sub v6, v7 : i32 + v9 = global.symbol @__stack_pointer : *mut i32 + store v9, v8 + call noname::_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E() + v10 = const.i32 15 : i32 + v11 = add v8, v10 : i32 + v12 = call noname::_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E(v11) : i32 + v13 = const.i32 9 : i32 + v14 = cast v2 : u32 + v15 = cast v13 : u32 + v16 = lt v14, v15 : i1 + v17 = cast v16 : i32 + v18 = const.i32 0 : i32 + v19 = neq v17, v18 : i1 + condbr v19, block7, block8 + +block1(v4: i32): + ret v4 + +block2(v404: i32, v416: i32): + v411 = const.i32 15 : i32 + v412 = add v404, v411 : i32 + call noname::_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E(v412) + v413 = const.i32 16 : i32 + v414 = add v404, v413 : i32 + v415 = global.symbol @__stack_pointer : *mut i32 + store v415, v414 + br block1(v416) + +block3(v401: i32, v410: i32): + v402 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v401) : i32 + v403 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(v401) : i32 + br block2(v410, v403) + +block4(v366: i32, v368: i32, v379: i32, v384: i32, v405: i32, v417: i32): + v373 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE(v366, v368) : i32 + v374 = const.i32 0 : i32 + v375 = eq v373, v374 : i1 + v376 = cast v375 : i32 + v377 = const.i32 0 : i32 + v378 = neq v376, v377 : i1 + condbr v378, block2(v405, v417), block45 + +block5: + v351 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v80, v79) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v80, v79) + v352 = cast v12 : u32 + v353 = add v352, 428 : u32 + v354 = inttoptr v353 : *mut i32 + store v354, v351 + v355 = sub v336, v79 : i32 + v356 = const.i32 1 : i32 + v357 = bor v355, v356 : i32 + v358 = cast v351 : u32 + v359 = add v358, 4 : u32 + v360 = inttoptr v359 : *mut i32 + store v360, v357 + v361 = cast v12 : u32 + v362 = add v361, 420 : u32 + v363 = inttoptr v362 : *mut i32 + store v363, v355 + v364 = const.i32 0 : i32 + v365 = neq v80, v364 : i1 + condbr v365, block3(v80, v8), block44 + +block6: + v343 = cast v1 : u32 + v344 = cast v3 : u32 + v345 = lt v343, v344 : i1 + v346 = cast v345 : i32 + v347 = const.i32 0 : i32 + v348 = neq v346, v347 : i1 + v349 = select v348, v1, v3 : i32 + v350 = call noname::memcpy(v20, v0, v349) : i32 + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE(v12, v0) + br block2(v8, v20) + +block7: + v24 = call noname::_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() : i32 + v25 = const.i32 8 : i32 + v26 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v24, v25) : i32 + v27 = const.i32 20 : i32 + v28 = const.i32 8 : i32 + v29 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v27, v28) : i32 + v30 = const.i32 16 : i32 + v31 = const.i32 8 : i32 + v32 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v30, v31) : i32 + v33 = const.i32 0 : i32 + v34 = const.i32 0 : i32 + v35 = const.i32 16 : i32 + v36 = const.i32 8 : i32 + v37 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v35, v36) : i32 + v38 = const.i32 2 : i32 + v39 = shl v37, v38 : i32 + v40 = sub v34, v39 : i32 + v41 = add v26, v29 : i32 + v42 = add v32, v41 : i32 + v43 = sub v24, v42 : i32 + v44 = const.i32 -65544 : i32 + v45 = add v43, v44 : i32 + v46 = const.i32 -9 : i32 + v47 = band v45, v46 : i32 + v48 = const.i32 -3 : i32 + v49 = add v47, v48 : i32 + v50 = cast v40 : u32 + v51 = cast v49 : u32 + v52 = lt v50, v51 : i1 + v53 = cast v52 : i32 + v54 = const.i32 0 : i32 + v55 = neq v53, v54 : i1 + v56 = select v55, v40, v49 : i32 + v57 = cast v56 : u32 + v58 = cast v3 : u32 + v59 = lte v57, v58 : i1 + v60 = cast v59 : i32 + v61 = const.i32 0 : i32 + v62 = neq v60, v61 : i1 + condbr v62, block2(v8, v33), block10 + +block8: + v20 = call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8memalign17he8794c5d1cb954f9E(v12, v2, v3) : i32 + v21 = const.i32 0 : i32 + v22 = neq v20, v21 : i1 + condbr v22, block6, block9 + +block9: + v23 = const.i32 0 : i32 + br block2(v8, v23) + +block10: + v63 = const.i32 16 : i32 + v64 = const.i32 4 : i32 + v65 = add v3, v64 : i32 + v66 = const.i32 16 : i32 + v67 = const.i32 8 : i32 + v68 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v66, v67) : i32 + v69 = const.i32 -5 : i32 + v70 = add v68, v69 : i32 + v71 = cast v70 : u32 + v72 = cast v3 : u32 + v73 = gt v71, v72 : i1 + v74 = cast v73 : i32 + v75 = const.i32 0 : i32 + v76 = neq v74, v75 : i1 + v77 = select v76, v63, v65 : i32 + v78 = const.i32 8 : i32 + v79 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v77, v78) : i32 + v80 = call noname::_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E(v0) : i32 + v81 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v80) : i32 + v82 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v80, v81) : i32 + v83 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v80) : i32 + v84 = const.i32 0 : i32 + v85 = neq v83, v84 : i1 + condbr v85, block17, block18 + +block11: + v332 = cast v12 : u32 + v333 = add v332, 420 : u32 + v334 = inttoptr v333 : *mut i32 + v335 = load v334 : i32 + v336 = add v335, v81 : i32 + v337 = cast v336 : u32 + v338 = cast v79 : u32 + v339 = gt v337, v338 : i1 + v340 = cast v339 : i32 + v341 = const.i32 0 : i32 + v342 = neq v340, v341 : i1 + condbr v342, block5, block43 + +block12: + v330 = const.i32 0 : i32 + v331 = neq v80, v330 : i1 + condbr v331, block3(v329, v409), block42 + +block13: + v318 = sub v81, v79 : i32 + v319 = const.i32 16 : i32 + v320 = const.i32 8 : i32 + v321 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v319, v320) : i32 + v322 = cast v318 : u32 + v323 = cast v321 : u32 + v324 = lt v322, v323 : i1 + v325 = cast v324 : i32 + v326 = const.i32 0 : i32 + v327 = neq v325, v326 : i1 + condbr v327, block12, block41 + +block14: + v281 = cast v12 : u32 + v282 = add v281, 416 : u32 + v283 = inttoptr v282 : *mut i32 + v284 = load v283 : i32 + v285 = add v284, v81 : i32 + v286 = cast v285 : u32 + v287 = cast v79 : u32 + v288 = lt v286, v287 : i1 + v289 = cast v288 : i32 + v290 = const.i32 0 : i32 + v291 = neq v289, v290 : i1 + condbr v291, block4(v12, v3, v0, v80, v8, v33), block36 + +block15: + v263 = const.i32 16 : i32 + v264 = const.i32 8 : i32 + v265 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v263, v264) : i32 + v266 = cast v119 : u32 + v267 = cast v265 : u32 + v268 = lt v266, v267 : i1 + v269 = cast v268 : i32 + v270 = const.i32 0 : i32 + v271 = neq v269, v270 : i1 + condbr v271, block32, block33 + +block16: + v229 = cast v82 : u32 + v230 = add v229, 12 : u32 + v231 = inttoptr v230 : *mut i32 + v232 = load v231 : i32 + v233 = cast v82 : u32 + v234 = add v233, 8 : u32 + v235 = inttoptr v234 : *mut i32 + v236 = load v235 : i32 + v237 = eq v232, v236 : i1 + v238 = cast v237 : i32 + v239 = const.i32 0 : i32 + v240 = neq v238, v239 : i1 + condbr v240, block30, block31 + +block17: + v127 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v80) : i32 + v128 = const.i32 256 : i32 + v129 = cast v79 : u32 + v130 = cast v128 : u32 + v131 = lt v129, v130 : i1 + v132 = cast v131 : i32 + v133 = const.i32 0 : i32 + v134 = neq v132, v133 : i1 + condbr v134, block4(v12, v3, v0, v80, v8, v33), block25 + +block18: + v86 = cast v81 : u32 + v87 = cast v79 : u32 + v88 = gte v86, v87 : i1 + v89 = cast v88 : i32 + v90 = const.i32 0 : i32 + v91 = neq v89, v90 : i1 + condbr v91, block13, block19 + +block19: + v92 = cast v12 : u32 + v93 = add v92, 428 : u32 + v94 = inttoptr v93 : *mut i32 + v95 = load v94 : i32 + v96 = eq v82, v95 : i1 + v97 = cast v96 : i32 + v98 = const.i32 0 : i32 + v99 = neq v97, v98 : i1 + condbr v99, block11, block20 + +block20: + v100 = cast v12 : u32 + v101 = add v100, 424 : u32 + v102 = inttoptr v101 : *mut i32 + v103 = load v102 : i32 + v104 = eq v82, v103 : i1 + v105 = cast v104 : i32 + v106 = const.i32 0 : i32 + v107 = neq v105, v106 : i1 + condbr v107, block14, block21 + +block21: + v108 = call noname::_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E(v82) : i32 + v109 = const.i32 0 : i32 + v110 = neq v108, v109 : i1 + condbr v110, block4(v12, v3, v0, v80, v8, v33), block22 + +block22: + v111 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v82) : i32 + v112 = add v111, v81 : i32 + v113 = cast v112 : u32 + v114 = cast v79 : u32 + v115 = lt v113, v114 : i1 + v116 = cast v115 : i32 + v117 = const.i32 0 : i32 + v118 = neq v116, v117 : i1 + condbr v118, block4(v12, v3, v0, v80, v8, v33), block23 + +block23: + v119 = sub v112, v79 : i32 + v120 = const.i32 256 : i32 + v121 = cast v111 : u32 + v122 = cast v120 : u32 + v123 = lt v121, v122 : i1 + v124 = cast v123 : i32 + v125 = const.i32 0 : i32 + v126 = neq v124, v125 : i1 + condbr v126, block16, block24 + +block24: + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E(v12, v82) + br block15 + +block25: + v135 = const.i32 4 : i32 + v136 = add v79, v135 : i32 + v137 = cast v127 : u32 + v138 = cast v136 : u32 + v139 = lt v137, v138 : i1 + v140 = cast v139 : i32 + v141 = const.i32 0 : i32 + v142 = neq v140, v141 : i1 + condbr v142, block26, block27 + +block26: + v153 = cast v152 : u32 + v154 = inttoptr v153 : *mut i32 + v155 = load v154 : i32 + v156 = sub v80, v155 : i32 + v158 = add v127, v155 : i32 + v159 = const.i32 16 : i32 + v160 = add v158, v159 : i32 + v162 = const.i32 31 : i32 + v163 = add v79, v162 : i32 + v164 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9page_size17h0fdd55b2693d440cE(v151) : i32 + v165 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v163, v164) : i32 + v166 = const.i32 1 : i32 + v167 = call noname::_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5remap17hf5ff3c6a92680f40E(v12, v156, v160, v165, v166) : i32 + v168 = const.i32 0 : i32 + v169 = eq v167, v168 : i1 + v170 = cast v169 : i32 + v171 = const.i32 0 : i32 + v172 = neq v170, v171 : i1 + condbr v172, block4(v151, v3, v0, v152, v8, v33), block29 + +block27: + v143 = sub v127, v79 : i32 + v144 = const.i32 131073 : i32 + v145 = cast v143 : u32 + v146 = cast v144 : u32 + v147 = lt v145, v146 : i1 + v148 = cast v147 : i32 + v149 = const.i32 0 : i32 + v150 = neq v148, v149 : i1 + condbr v150, block12, block28 + +block28: + br block26 + +block29: + v173 = add v167, v155 : i32 + v174 = sub v165, v155 : i32 + v175 = const.i32 -16 : i32 + v176 = add v174, v175 : i32 + v177 = cast v173 : u32 + v178 = add v177, 4 : u32 + v179 = inttoptr v178 : *mut i32 + store v179, v176 + v180 = call noname::_ZN8dlmalloc8dlmalloc5Chunk14fencepost_head17he07aaa52f3b50dfdE() : i32 + v181 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v173, v176) : i32 + v182 = cast v181 : u32 + v183 = add v182, 4 : u32 + v184 = inttoptr v183 : *mut i32 + store v184, v180 + v185 = const.i32 -12 : i32 + v186 = add v174, v185 : i32 + v187 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v173, v186) : i32 + v188 = const.i32 0 : i32 + v189 = cast v187 : u32 + v190 = add v189, 4 : u32 + v191 = inttoptr v190 : *mut i32 + store v191, v188 + v192 = cast v151 : u32 + v193 = add v192, 432 : u32 + v194 = inttoptr v193 : *mut i32 + v195 = load v194 : i32 + v196 = sub v165, v160 : i32 + v197 = add v195, v196 : i32 + v198 = cast v151 : u32 + v199 = add v198, 432 : u32 + v200 = inttoptr v199 : *mut i32 + store v200, v197 + v201 = cast v151 : u32 + v202 = add v201, 444 : u32 + v203 = inttoptr v202 : *mut i32 + v204 = load v203 : i32 + v205 = cast v167 : u32 + v206 = cast v204 : u32 + v207 = gt v205, v206 : i1 + v208 = cast v207 : i32 + v209 = const.i32 0 : i32 + v210 = neq v208, v209 : i1 + v211 = select v210, v204, v167 : i32 + v212 = cast v151 : u32 + v213 = add v212, 444 : u32 + v214 = inttoptr v213 : *mut i32 + store v214, v211 + v215 = cast v151 : u32 + v216 = add v215, 436 : u32 + v217 = inttoptr v216 : *mut i32 + v218 = load v217 : i32 + v219 = cast v218 : u32 + v220 = cast v197 : u32 + v221 = gt v219, v220 : i1 + v222 = cast v221 : i32 + v223 = const.i32 0 : i32 + v224 = neq v222, v223 : i1 + v225 = select v224, v218, v197 : i32 + v226 = cast v151 : u32 + v227 = add v226, 436 : u32 + v228 = inttoptr v227 : *mut i32 + store v228, v225 + br block3(v173, v406) + +block30: + v247 = cast v12 : u32 + v248 = add v247, 408 : u32 + v249 = inttoptr v248 : *mut i32 + v250 = load v249 : i32 + v251 = const.i32 -2 : i32 + v252 = const.i32 3 : i32 + v253 = cast v111 : u32 + v254 = cast v252 : u32 + v255 = shr v253, v254 : u32 + v256 = cast v255 : i32 + v257 = shl v251, v256 : i32 + v258 = band v250, v257 : i32 + v259 = cast v12 : u32 + v260 = add v259, 408 : u32 + v261 = inttoptr v260 : *mut i32 + store v261, v258 + br block15 + +block31: + v241 = cast v236 : u32 + v242 = add v241, 12 : u32 + v243 = inttoptr v242 : *mut i32 + store v243, v232 + v244 = cast v232 : u32 + v245 = add v244, 8 : u32 + v246 = inttoptr v245 : *mut i32 + store v246, v236 + br block15 + +block32: + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v272, v112) + v279 = const.i32 0 : i32 + v280 = neq v272, v279 : i1 + condbr v280, block3(v272, v407), block35 + +block33: + v274 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v80, v79) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v272, v273) + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v274, v262) + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE(v12, v274, v262) + v276 = const.i32 0 : i32 + v277 = neq v272, v276 : i1 + condbr v277, block3(v272, v407), block34 + +block34: + br block4(v275, v3, v0, v272, v8, v33) + +block35: + br block4(v275, v370, v381, v272, v407, v419) + +block36: + v292 = sub v285, v79 : i32 + v293 = const.i32 16 : i32 + v294 = const.i32 8 : i32 + v295 = call noname::_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(v293, v294) : i32 + v296 = cast v292 : u32 + v297 = cast v295 : u32 + v298 = gte v296, v297 : i1 + v299 = cast v298 : i32 + v300 = const.i32 0 : i32 + v301 = neq v299, v300 : i1 + condbr v301, block38, block39 + +block37(v307: i32, v311: i32): + v308 = cast v12 : u32 + v309 = add v308, 424 : u32 + v310 = inttoptr v309 : *mut i32 + store v310, v307 + v312 = cast v306 : u32 + v313 = add v312, 416 : u32 + v314 = inttoptr v313 : *mut i32 + store v314, v311 + v316 = const.i32 0 : i32 + v317 = neq v80, v316 : i1 + condbr v317, block3(v315, v408), block40 + +block38: + v304 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v80, v79) : i32 + v305 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v304, v292) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v80, v79) + call noname::_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(v304, v292) + call noname::_ZN8dlmalloc8dlmalloc5Chunk12clear_pinuse17h3c1a99d0f5bddc22E(v305) + br block37(v304, v292) + +block39: + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v80, v285) + v302 = const.i32 0 : i32 + v303 = const.i32 0 : i32 + br block37(v303, v302) + +block40: + br block4(v306, v3, v0, v315, v8, v33) + +block41: + v328 = call noname::_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(v80, v79) : i32 + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v80, v79) + call noname::_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(v328, v318) + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE(v12, v328, v318) + br block12 + +block42: + br block4(v12, v3, v0, v329, v8, v33) + +block43: + br block4(v12, v3, v0, v80, v8, v33) + +block44: + br block4(v12, v3, v0, v80, v8, v33) + +block45: + v385 = call noname::_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(v384) : i32 + v386 = const.i32 -8 : i32 + v387 = const.i32 -4 : i32 + v388 = call noname::_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(v384) : i32 + v389 = const.i32 0 : i32 + v390 = neq v388, v389 : i1 + v391 = select v390, v386, v387 : i32 + v392 = add v385, v391 : i32 + v393 = cast v392 : u32 + v394 = cast v368 : u32 + v395 = lt v393, v394 : i1 + v396 = cast v395 : i32 + v397 = const.i32 0 : i32 + v398 = neq v396, v397 : i1 + v399 = select v398, v392, v368 : i32 + v400 = call noname::memcpy(v373, v379, v399) : i32 + call noname::_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE(v366, v379) + br block2(v405, v400) +} + +pub fn __rust_alloc_error_handler(i32, i32) { +block0(v0: i32, v1: i32): + call noname::__rdl_oom(v0, v1) + ret + +block1: +} + +pub fn _ZN5alloc7raw_vec11finish_grow17hcefa6a06206fd52bE(i32, i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32, v3: i32): + v4 = const.i32 0 : i32 + v5 = const.i32 0 : i32 + v6 = eq v1, v5 : i1 + v7 = cast v6 : i32 + v8 = const.i32 0 : i32 + v9 = neq v7, v8 : i1 + condbr v9, block4, block5 + +block1: + ret + +block2(v93: i32): + v94 = const.i32 1 : i32 + v95 = cast v93 : u32 + v96 = inttoptr v95 : *mut i32 + store v96, v94 + br block1 + +block3: + v89 = const.i32 0 : i32 + v90 = cast v0 : u32 + v91 = add v90, 4 : u32 + v92 = inttoptr v91 : *mut i32 + store v92, v89 + br block2(v0) + +block4: + v81 = const.i32 0 : i32 + v82 = cast v0 : u32 + v83 = add v82, 4 : u32 + v84 = inttoptr v83 : *mut i32 + store v84, v81 + v85 = const.i32 8 : i32 + v86 = add v0, v85 : i32 + v87 = cast v86 : u32 + v88 = inttoptr v87 : *mut i32 + store v88, v2 + br block2(v0) + +block5: + v10 = const.i32 -1 : i32 + v11 = lte v2, v10 : i1 + v12 = cast v11 : i32 + v13 = const.i32 0 : i32 + v14 = neq v12, v13 : i1 + condbr v14, block3, block6 + +block6: + v15 = cast v3 : u32 + v16 = add v15, 4 : u32 + v17 = inttoptr v16 : *mut i32 + v18 = load v17 : i32 + v19 = const.i32 0 : i32 + v20 = eq v18, v19 : i1 + v21 = cast v20 : i32 + v22 = const.i32 0 : i32 + v23 = neq v21, v22 : i1 + condbr v23, block9, block10 + +block7(v54: i32, v67: i32, v73: i32): + v55 = const.i32 0 : i32 + v56 = eq v54, v55 : i1 + v57 = cast v56 : i32 + v58 = const.i32 0 : i32 + v59 = neq v57, v58 : i1 + condbr v59, block17, block18 + +block8: + v53 = call noname::__rust_alloc(v2, v1) : i32 + br block7(v53, v51, v52) + +block9: + v43 = const.i32 0 : i32 + v44 = neq v2, v43 : i1 + condbr v44, block15, block16 + +block10: + v24 = const.i32 8 : i32 + v25 = add v3, v24 : i32 + v26 = cast v25 : u32 + v27 = inttoptr v26 : *mut i32 + v28 = load v27 : i32 + v29 = const.i32 0 : i32 + v30 = neq v28, v29 : i1 + condbr v30, block11, block12 + +block11: + v39 = cast v3 : u32 + v40 = inttoptr v39 : *mut i32 + v41 = load v40 : i32 + v42 = call noname::__rust_realloc(v41, v28, v1, v2) : i32 + br block7(v42, v2, v1) + +block12: + v31 = const.i32 0 : i32 + v32 = neq v2, v31 : i1 + condbr v32, block13, block14 + +block13: + v33 = const.i32 0 : i32 + v34 = cast v33 : u32 + v35 = add v34, 1048576 : u32 + v36 = inttoptr v35 : *mut u8 + v37 = load v36 : u8 + v38 = zext v37 : i32 + br block8 + +block14: + br block7(v1, v2, v1) + +block15: + v45 = const.i32 0 : i32 + v46 = cast v45 : u32 + v47 = add v46, 1048576 : u32 + v48 = inttoptr v47 : *mut u8 + v49 = load v48 : u8 + v50 = zext v49 : i32 + br block8 + +block16: + br block7(v1, v2, v1) + +block17: + v74 = cast v60 : u32 + v75 = add v74, 4 : u32 + v76 = inttoptr v75 : *mut i32 + store v76, v73 + v77 = const.i32 8 : i32 + v78 = add v60, v77 : i32 + v79 = cast v78 : u32 + v80 = inttoptr v79 : *mut i32 + store v80, v67 + br block2(v60) + +block18: + v62 = cast v0 : u32 + v63 = add v62, 4 : u32 + v64 = inttoptr v63 : *mut i32 + store v64, v54 + v65 = const.i32 8 : i32 + v66 = add v60, v65 : i32 + v68 = cast v66 : u32 + v69 = inttoptr v68 : *mut i32 + store v69, v67 + v70 = const.i32 0 : i32 + v71 = cast v60 : u32 + v72 = inttoptr v71 : *mut i32 + store v72, v70 + ret +} + +pub fn _ZN5alloc7raw_vec19RawVec$LT$T$C$A$GT$16reserve_for_push17h2205b68aee7ddaceE(i32) { +block0(v0: i32): + v1 = const.i32 0 : i32 + v2 = global.load (@__stack_pointer) as *mut i8 : i32 + v3 = const.i32 32 : i32 + v4 = sub v2, v3 : i32 + v5 = global.symbol @__stack_pointer : *mut i32 + store v5, v4 + v6 = cast v0 : u32 + v7 = add v6, 4 : u32 + v8 = inttoptr v7 : *mut i32 + v9 = load v8 : i32 + v10 = const.i32 1 : i32 + v11 = shl v9, v10 : i32 + v12 = const.i32 4 : i32 + v13 = const.i32 4 : i32 + v14 = cast v11 : u32 + v15 = cast v13 : u32 + v16 = gt v14, v15 : i1 + v17 = cast v16 : i32 + v18 = const.i32 0 : i32 + v19 = neq v17, v18 : i1 + v20 = select v19, v11, v12 : i32 + v21 = const.i32 2 : i32 + v22 = shl v20, v21 : i32 + v23 = const.i32 536870912 : i32 + v24 = cast v20 : u32 + v25 = cast v23 : u32 + v26 = lt v24, v25 : i1 + v27 = cast v26 : i32 + v28 = const.i32 2 : i32 + v29 = shl v27, v28 : i32 + v30 = const.i32 0 : i32 + v31 = eq v9, v30 : i1 + v32 = cast v31 : i32 + v33 = const.i32 0 : i32 + v34 = neq v32, v33 : i1 + condbr v34, block3, block4 + +block1: + ret + +block2: + v55 = const.i32 8 : i32 + v56 = add v4, v55 : i32 + v59 = const.i32 20 : i32 + v60 = add v54, v59 : i32 + call noname::_ZN5alloc7raw_vec11finish_grow17hcefa6a06206fd52bE(v56, v29, v22, v60) + v61 = cast v54 : u32 + v62 = add v61, 12 : u32 + v63 = inttoptr v62 : *mut i32 + v64 = load v63 : i32 + v65 = cast v54 : u32 + v66 = add v65, 8 : u32 + v67 = inttoptr v66 : *mut i32 + v68 = load v67 : i32 + v69 = const.i32 0 : i32 + v70 = neq v68, v69 : i1 + condbr v70, block6, block7 + +block3: + v50 = const.i32 0 : i32 + v51 = cast v4 : u32 + v52 = add v51, 24 : u32 + v53 = inttoptr v52 : *mut i32 + store v53, v50 + br block2 + +block4: + v35 = const.i32 4 : i32 + v36 = cast v4 : u32 + v37 = add v36, 24 : u32 + v38 = inttoptr v37 : *mut i32 + store v38, v35 + v39 = const.i32 2 : i32 + v40 = shl v9, v39 : i32 + v41 = cast v4 : u32 + v42 = add v41, 28 : u32 + v43 = inttoptr v42 : *mut i32 + store v43, v40 + v44 = cast v0 : u32 + v45 = inttoptr v44 : *mut i32 + v46 = load v45 : i32 + v47 = cast v4 : u32 + v48 = add v47, 20 : u32 + v49 = inttoptr v48 : *mut i32 + store v49, v46 + br block2 + +block5: + v94 = const.i32 32 : i32 + v95 = add v54, v94 : i32 + v96 = global.symbol @__stack_pointer : *mut i32 + store v96, v95 + br block1 + +block6: + v78 = const.i32 -2147483647 : i32 + v79 = eq v64, v78 : i1 + v80 = cast v79 : i32 + v81 = const.i32 0 : i32 + v82 = neq v80, v81 : i1 + condbr v82, block5, block8 + +block7: + v73 = cast v0 : u32 + v74 = add v73, 4 : u32 + v75 = inttoptr v74 : *mut i32 + store v75, v20 + v76 = cast v71 : u32 + v77 = inttoptr v76 : *mut i32 + store v77, v64 + br block5 + +block8: + v83 = const.i32 0 : i32 + v84 = eq v64, v83 : i1 + v85 = cast v84 : i32 + v86 = const.i32 0 : i32 + v87 = neq v85, v86 : i1 + condbr v87, block9, block10 + +block9: + call noname::_ZN5alloc7raw_vec17capacity_overflow17h6c250c8ca346b5adE() + unreachable + +block10: + v88 = const.i32 16 : i32 + v89 = add v54, v88 : i32 + v90 = cast v89 : u32 + v91 = inttoptr v90 : *mut i32 + v92 = load v91 : i32 + call noname::_ZN5alloc5alloc18handle_alloc_error17h4f3cb0c5afb21c76E(v64, v92) + unreachable +} + +pub fn vec_alloc() -> i32 { +block0: + v1 = const.i32 0 : i32 + v2 = global.load (@__stack_pointer) as *mut i8 : i32 + v3 = const.i32 16 : i32 + v4 = sub v2, v3 : i32 + v5 = global.symbol @__stack_pointer : *mut i32 + store v5, v4 + v6 = const.i32 0 : i32 + v7 = cast v4 : u32 + v8 = add v7, 12 : u32 + v9 = inttoptr v8 : *mut i32 + store v9, v6 + v10 = const.i64 4 : i64 + v11 = cast v4 : u32 + v12 = add v11, 4 : u32 + v13 = inttoptr v12 : *mut i64 + store v13, v10 + v14 = const.i32 4 : i32 + v15 = add v4, v14 : i32 + call noname::_ZN5alloc7raw_vec19RawVec$LT$T$C$A$GT$16reserve_for_push17h2205b68aee7ddaceE(v15) + v16 = cast v4 : u32 + v17 = add v16, 4 : u32 + v18 = inttoptr v17 : *mut i32 + v19 = load v18 : i32 + v20 = cast v4 : u32 + v21 = add v20, 12 : u32 + v22 = inttoptr v21 : *mut i32 + v23 = load v22 : i32 + v24 = const.i32 2 : i32 + v25 = shl v23, v24 : i32 + v26 = add v19, v25 : i32 + v27 = const.i32 1 : i32 + v28 = cast v26 : u32 + v29 = inttoptr v28 : *mut i32 + store v29, v27 + v30 = const.i32 -1 : i32 + v31 = eq v23, v30 : i1 + v32 = cast v31 : i32 + v33 = const.i32 0 : i32 + v34 = neq v32, v33 : i1 + condbr v34, block2, block3 + +block1(v0: i32): + +block2: + unreachable + +block3: + v35 = cast v4 : u32 + v36 = add v35, 8 : u32 + v37 = inttoptr v36 : *mut i32 + v38 = load v37 : i32 + v39 = const.i32 0 : i32 + v40 = eq v38, v39 : i1 + v41 = cast v40 : i32 + v42 = const.i32 0 : i32 + v43 = neq v41, v42 : i1 + condbr v43, block4, block5 + +block4: + v48 = const.i32 16 : i32 + v49 = add v4, v48 : i32 + v50 = global.symbol @__stack_pointer : *mut i32 + store v50, v49 + v51 = const.i32 1 : i32 + ret v51 + +block5: + v44 = const.i32 2 : i32 + v45 = shl v38, v44 : i32 + v46 = const.i32 4 : i32 + call noname::__rust_dealloc(v19, v45, v46) + br block4 +} + +pub fn _ZN5alloc7raw_vec17capacity_overflow17h6c250c8ca346b5adE() { +block0: + unreachable + +block1: +} + +pub fn _ZN5alloc5alloc18handle_alloc_error17h4f3cb0c5afb21c76E(i32, i32) { +block0(v0: i32, v1: i32): + call noname::_ZN5alloc5alloc18handle_alloc_error8rt_error17h63de615f6e977af2E(v0, v1) + unreachable + +block1: +} + +pub fn _ZN5alloc5alloc18handle_alloc_error8rt_error17h63de615f6e977af2E(i32, i32) { +block0(v0: i32, v1: i32): + call noname::__rust_alloc_error_handler(v1, v0) + unreachable + +block1: +} + +pub fn __rdl_oom(i32, i32) { +block0(v0: i32, v1: i32): + unreachable + +block1: +} + +pub fn _ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = add v0, v1 : i32 + v4 = const.i32 -1 : i32 + v5 = add v3, v4 : i32 + v6 = const.i32 0 : i32 + v7 = sub v6, v1 : i32 + v8 = band v5, v7 : i32 + br block1(v8) + +block1(v2: i32): + ret v2 +} + +pub fn _ZN8dlmalloc8dlmalloc9left_bits17hb6cbe146b8019d98E(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 1 : i32 + v3 = shl v0, v2 : i32 + v4 = const.i32 0 : i32 + v5 = sub v4, v3 : i32 + v6 = bor v3, v5 : i32 + br block1(v6) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 0 : i32 + v3 = sub v2, v0 : i32 + v4 = band v3, v0 : i32 + br block1(v4) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc24leftshift_for_tree_index17h31d064fdd867f502E(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 0 : i32 + v3 = const.i32 25 : i32 + v4 = const.i32 1 : i32 + v5 = cast v0 : u32 + v6 = cast v4 : u32 + v7 = shr v5, v6 : u32 + v8 = cast v7 : i32 + v9 = sub v3, v8 : i32 + v10 = const.i32 31 : i32 + v11 = eq v0, v10 : i1 + v12 = cast v11 : i32 + v13 = const.i32 0 : i32 + v14 = neq v12, v13 : i1 + v15 = select v14, v2, v9 : i32 + br block1(v15) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk14fencepost_head17he07aaa52f3b50dfdE() -> i32 { +block0: + v1 = const.i32 7 : i32 + br block1(v1) + +block1(v0: i32): + ret v0 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 4 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + v6 = const.i32 -8 : i32 + v7 = band v5, v6 : i32 + br block1(v7) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 4 : u32 + v4 = inttoptr v3 : *mut u8 + v5 = load v4 : u8 + v6 = zext v5 : i32 + v7 = const.i32 2 : i32 + v8 = band v6, v7 : i32 + v9 = const.i32 1 : i32 + v10 = cast v8 : u32 + v11 = cast v9 : u32 + v12 = shr v10, v11 : u32 + v13 = cast v12 : i32 + br block1(v13) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk6pinuse17h92d5107047b03ba7E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 4 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + v6 = const.i32 1 : i32 + v7 = band v5, v6 : i32 + br block1(v7) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk12clear_pinuse17h3c1a99d0f5bddc22E(i32) { +block0(v0: i32): + v1 = cast v0 : u32 + v2 = add v1, 4 : u32 + v3 = inttoptr v2 : *mut i32 + v4 = load v3 : i32 + v5 = const.i32 -2 : i32 + v6 = band v4, v5 : i32 + v7 = cast v0 : u32 + v8 = add v7, 4 : u32 + v9 = inttoptr v8 : *mut i32 + store v9, v6 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk5inuse17h2d327e4c36b84dfeE(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 4 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + v6 = const.i32 3 : i32 + v7 = band v5, v6 : i32 + v8 = const.i32 1 : i32 + v9 = neq v7, v8 : i1 + v10 = cast v9 : i32 + br block1(v10) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 4 : u32 + v4 = inttoptr v3 : *mut u8 + v5 = load v4 : u8 + v6 = zext v5 : i32 + v7 = const.i32 3 : i32 + v8 = band v6, v7 : i32 + v9 = const.i32 0 : i32 + v10 = eq v8, v9 : i1 + v11 = cast v10 : i32 + br block1(v11) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = cast v0 : u32 + v3 = add v2, 4 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + v6 = const.i32 1 : i32 + v7 = band v5, v6 : i32 + v8 = bor v7, v1 : i32 + v9 = const.i32 2 : i32 + v10 = bor v8, v9 : i32 + v11 = cast v0 : u32 + v12 = add v11, 4 : u32 + v13 = inttoptr v12 : *mut i32 + store v13, v10 + v14 = add v0, v1 : i32 + v15 = cast v14 : u32 + v16 = add v15, 4 : u32 + v17 = inttoptr v16 : *mut i32 + v18 = load v17 : i32 + v19 = const.i32 1 : i32 + v20 = bor v18, v19 : i32 + v21 = cast v14 : u32 + v22 = add v21, 4 : u32 + v23 = inttoptr v22 : *mut i32 + store v23, v20 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i32 3 : i32 + v3 = bor v1, v2 : i32 + v4 = cast v0 : u32 + v5 = add v4, 4 : u32 + v6 = inttoptr v5 : *mut i32 + store v6, v3 + v7 = add v0, v1 : i32 + v8 = cast v7 : u32 + v9 = add v8, 4 : u32 + v10 = inttoptr v9 : *mut i32 + v11 = load v10 : i32 + v12 = const.i32 1 : i32 + v13 = bor v11, v12 : i32 + v14 = cast v7 : u32 + v15 = add v14, 4 : u32 + v16 = inttoptr v15 : *mut i32 + store v16, v13 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i32 3 : i32 + v3 = bor v1, v2 : i32 + v4 = cast v0 : u32 + v5 = add v4, 4 : u32 + v6 = inttoptr v5 : *mut i32 + store v6, v3 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i32 1 : i32 + v3 = bor v1, v2 : i32 + v4 = cast v0 : u32 + v5 = add v4, 4 : u32 + v6 = inttoptr v5 : *mut i32 + store v6, v3 + v7 = add v0, v1 : i32 + v8 = cast v7 : u32 + v9 = inttoptr v8 : *mut i32 + store v9, v1 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = cast v2 : u32 + v4 = add v3, 4 : u32 + v5 = inttoptr v4 : *mut i32 + v6 = load v5 : i32 + v7 = const.i32 -2 : i32 + v8 = band v6, v7 : i32 + v9 = cast v2 : u32 + v10 = add v9, 4 : u32 + v11 = inttoptr v10 : *mut i32 + store v11, v8 + v12 = const.i32 1 : i32 + v13 = bor v1, v12 : i32 + v14 = cast v0 : u32 + v15 = add v14, 4 : u32 + v16 = inttoptr v15 : *mut i32 + store v16, v13 + v17 = add v0, v1 : i32 + v18 = cast v17 : u32 + v19 = inttoptr v18 : *mut i32 + store v19, v1 + br block1 + +block1: + ret +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = add v0, v1 : i32 + br block1(v3) + +block1(v2: i32): + ret v2 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk12minus_offset17h7c3eec81761249d9E(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = sub v0, v1 : i32 + br block1(v3) + +block1(v2: i32): + ret v2 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 8 : i32 + v3 = add v0, v2 : i32 + br block1(v3) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E() -> i32 { +block0: + v1 = const.i32 8 : i32 + br block1(v1) + +block1(v0: i32): + ret v0 +} + +pub fn _ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 -8 : i32 + v3 = add v0, v2 : i32 + br block1(v3) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 0 : i32 + v3 = cast v0 : u32 + v4 = add v3, 16 : u32 + v5 = inttoptr v4 : *mut i32 + v6 = load v5 : i32 + v7 = const.i32 0 : i32 + v8 = neq v6, v7 : i1 + condbr v8, block2(v6), block3 + +block1(v1: i32): + ret v1 + +block2(v14: i32): + br block1(v14) + +block3: + v9 = const.i32 20 : i32 + v10 = add v0, v9 : i32 + v11 = cast v10 : u32 + v12 = inttoptr v11 : *mut i32 + v13 = load v12 : i32 + br block2(v13) +} + +pub fn _ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E(i32) -> i32 { +block0(v0: i32): + br block1(v0) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc9TreeChunk4next17he250edbec5d87123E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 12 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + br block1(v5) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc9TreeChunk4prev17h7a0f1d46544cc14aE(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 8 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + br block1(v5) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 12 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + v6 = const.i32 1 : i32 + v7 = band v5, v6 : i32 + br block1(v7) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc7Segment9sys_flags17h224550055bf7775bE(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = add v2, 12 : u32 + v4 = inttoptr v3 : *mut i32 + v5 = load v4 : i32 + v6 = const.i32 1 : i32 + v7 = cast v5 : u32 + v8 = cast v6 : u32 + v9 = shr v7, v8 : u32 + v10 = cast v9 : i32 + br block1(v10) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc8dlmalloc7Segment5holds17h8f6de4ee6718009bE(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = const.i32 0 : i32 + v4 = const.i32 0 : i32 + v5 = cast v0 : u32 + v6 = inttoptr v5 : *mut i32 + v7 = load v6 : i32 + v8 = cast v7 : u32 + v9 = cast v1 : u32 + v10 = gt v8, v9 : i1 + v11 = cast v10 : i32 + v12 = const.i32 0 : i32 + v13 = neq v11, v12 : i1 + condbr v13, block2(v4), block3 + +block1(v2: i32): + ret v2 + +block2(v23: i32): + br block1(v23) + +block3: + v14 = cast v0 : u32 + v15 = add v14, 4 : u32 + v16 = inttoptr v15 : *mut i32 + v17 = load v16 : i32 + v18 = add v7, v17 : i32 + v19 = cast v18 : u32 + v20 = cast v1 : u32 + v21 = gt v19, v20 : i1 + v22 = cast v21 : i32 + br block2(v22) +} + +pub fn _ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E(i32) -> i32 { +block0(v0: i32): + v2 = cast v0 : u32 + v3 = inttoptr v2 : *mut i32 + v4 = load v3 : i32 + v5 = cast v0 : u32 + v6 = add v5, 4 : u32 + v7 = inttoptr v6 : *mut i32 + v8 = load v7 : i32 + v9 = add v4, v8 : i32 + br block1(v9) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 1048580 : i32 + br block1(v2) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E(i32) { +block0(v0: i32): + br block1 + +block1: + ret +} + +pub fn _ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5alloc17hdbf1e2bcc01bc909E(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = const.i32 0 : i32 + v4 = const.i32 16 : i32 + v5 = cast v2 : u32 + v6 = cast v4 : u32 + v7 = shr v5, v6 : u32 + v8 = cast v7 : i32 + v9 = cast v8 : u32 + v10 = memory.grow v9 : i32 + v11 = const.i32 0 : i32 + v12 = cast v0 : u32 + v13 = add v12, 8 : u32 + v14 = inttoptr v13 : *mut i32 + store v14, v11 + v15 = const.i32 0 : i32 + v16 = const.i32 -65536 : i32 + v17 = band v2, v16 : i32 + v18 = const.i32 -1 : i32 + v19 = eq v10, v18 : i1 + v20 = cast v19 : i32 + v21 = const.i32 0 : i32 + v22 = neq v20, v21 : i1 + v23 = select v22, v15, v17 : i32 + v24 = cast v0 : u32 + v25 = add v24, 4 : u32 + v26 = inttoptr v25 : *mut i32 + store v26, v23 + v27 = const.i32 0 : i32 + v28 = const.i32 16 : i32 + v29 = shl v10, v28 : i32 + v30 = const.i32 0 : i32 + v31 = neq v20, v30 : i1 + v32 = select v31, v27, v29 : i32 + v33 = cast v0 : u32 + v34 = inttoptr v33 : *mut i32 + store v34, v32 + br block1 + +block1: + ret +} + +pub fn _ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5remap17hf5ff3c6a92680f40E(i32, i32, i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i32): + v6 = const.i32 0 : i32 + br block1(v6) + +block1(v5: i32): + ret v5 +} + +pub fn _ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9free_part17h74489c9e7a3aa967E(i32, i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32, v3: i32): + v5 = const.i32 0 : i32 + br block1(v5) + +block1(v4: i32): + ret v4 +} + +pub fn _ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE(i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32): + v4 = const.i32 0 : i32 + br block1(v4) + +block1(v3: i32): + ret v3 +} + +pub fn _ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$16can_release_part17h43bfb7d8666fcc31E(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = const.i32 0 : i32 + br block1(v3) + +block1(v2: i32): + ret v2 +} + +pub fn _ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9page_size17h0fdd55b2693d440cE(i32) -> i32 { +block0(v0: i32): + v2 = const.i32 65536 : i32 + br block1(v2) + +block1(v1: i32): + ret v1 +} + +pub fn _ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E() { +block0: + br block1 + +block1: + ret +} + +pub fn memcpy(i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32): + v4 = call noname::_ZN17compiler_builtins3mem6memcpy17h7b83c85e899060b3E(v0, v1, v2) : i32 + br block1(v4) + +block1(v3: i32): + ret v3 +} + +pub fn _ZN17compiler_builtins3mem6memcpy17h7b83c85e899060b3E(i32, i32, i32) -> i32 { +block0(v0: i32, v1: i32, v2: i32): + v4 = const.i32 0 : i32 + v5 = const.i32 15 : i32 + v6 = cast v2 : u32 + v7 = cast v5 : u32 + v8 = gt v6, v7 : i1 + v9 = cast v8 : i32 + v10 = const.i32 0 : i32 + v11 = neq v9, v10 : i1 + condbr v11, block3, block4 + +block1(v3: i32): + ret v3 + +block2(v143: i32, v149: i32, v172: i32, v174: i32): + v144 = const.i32 0 : i32 + v145 = eq v143, v144 : i1 + v146 = cast v145 : i32 + v147 = const.i32 0 : i32 + v148 = neq v146, v147 : i1 + condbr v148, block21, block22 + +block3: + v12 = const.i32 0 : i32 + v13 = sub v12, v0 : i32 + v14 = const.i32 3 : i32 + v15 = band v13, v14 : i32 + v16 = add v0, v15 : i32 + v17 = const.i32 0 : i32 + v18 = eq v15, v17 : i1 + v19 = cast v18 : i32 + v20 = const.i32 0 : i32 + v21 = neq v19, v20 : i1 + condbr v21, block5(v16), block6 + +block4: + br block2(v2, v0, v1, v0) + +block5(v42: i32): + v47 = sub v2, v15 : i32 + v48 = const.i32 -4 : i32 + v49 = band v47, v48 : i32 + v50 = add v42, v49 : i32 + v53 = add v1, v45 : i32 + v54 = const.i32 3 : i32 + v55 = band v53, v54 : i32 + v56 = const.i32 0 : i32 + v57 = eq v55, v56 : i1 + v58 = cast v57 : i32 + v59 = const.i32 0 : i32 + v60 = neq v58, v59 : i1 + condbr v60, block11, block12 + +block6: + br block7(v0, v1) + +block7(v22: i32, v23: i32): + v24 = cast v23 : u32 + v25 = inttoptr v24 : *mut u8 + v26 = load v25 : u8 + v27 = zext v26 : i32 + v28 = trunc v27 : u8 + v29 = cast v22 : u32 + v30 = inttoptr v29 : *mut u8 + store v30, v28 + v31 = const.i32 1 : i32 + v32 = add v23, v31 : i32 + v33 = const.i32 1 : i32 + v34 = add v22, v33 : i32 + v36 = cast v34 : u32 + v37 = cast v35 : u32 + v38 = lt v36, v37 : i1 + v39 = cast v38 : i32 + v40 = const.i32 0 : i32 + v41 = neq v39, v40 : i1 + condbr v41, block7(v34, v32), block9 + +block8: + br block5(v35) + +block9: + br block8 + +block10(v150: i32, v175: i32): + v134 = const.i32 3 : i32 + v135 = band v47, v134 : i32 + v142 = add v53, v49 : i32 + br block2(v135, v150, v142, v175) + +block11: + v108 = const.i32 1 : i32 + v109 = lt v49, v108 : i1 + v110 = cast v109 : i32 + v111 = const.i32 0 : i32 + v112 = neq v110, v111 : i1 + condbr v112, block10(v50, v176), block17 + +block12: + v61 = const.i32 1 : i32 + v62 = lt v49, v61 : i1 + v63 = cast v62 : i32 + v64 = const.i32 0 : i32 + v65 = neq v63, v64 : i1 + condbr v65, block10(v50, v0), block13 + +block13: + v66 = const.i32 3 : i32 + v67 = shl v53, v66 : i32 + v68 = const.i32 24 : i32 + v69 = band v67, v68 : i32 + v70 = const.i32 -4 : i32 + v71 = band v53, v70 : i32 + v72 = const.i32 4 : i32 + v73 = add v71, v72 : i32 + v74 = const.i32 0 : i32 + v75 = sub v74, v67 : i32 + v76 = const.i32 24 : i32 + v77 = band v75, v76 : i32 + v78 = cast v71 : u32 + v79 = inttoptr v78 : *mut i32 + v80 = load v79 : i32 + br block14(v42, v80, v73) + +block14(v81: i32, v82: i32, v88: i32): + v84 = cast v82 : u32 + v85 = cast v83 : u32 + v86 = shr v84, v85 : u32 + v87 = cast v86 : i32 + v89 = cast v88 : u32 + v90 = inttoptr v89 : *mut i32 + v91 = load v90 : i32 + v93 = shl v91, v92 : i32 + v94 = bor v87, v93 : i32 + v95 = cast v81 : u32 + v96 = inttoptr v95 : *mut i32 + store v96, v94 + v97 = const.i32 4 : i32 + v98 = add v88, v97 : i32 + v99 = const.i32 4 : i32 + v100 = add v81, v99 : i32 + v102 = cast v100 : u32 + v103 = cast v101 : u32 + v104 = lt v102, v103 : i1 + v105 = cast v104 : i32 + v106 = const.i32 0 : i32 + v107 = neq v105, v106 : i1 + condbr v107, block14(v100, v91, v98), block16 + +block15: + +block16: + br block10(v101, v176) + +block17: + br block18(v42, v53) + +block18(v113: i32, v114: i32): + v115 = cast v114 : u32 + v116 = inttoptr v115 : *mut i32 + v117 = load v116 : i32 + v118 = cast v113 : u32 + v119 = inttoptr v118 : *mut i32 + store v119, v117 + v120 = const.i32 4 : i32 + v121 = add v114, v120 : i32 + v122 = const.i32 4 : i32 + v123 = add v113, v122 : i32 + v125 = cast v123 : u32 + v126 = cast v124 : u32 + v127 = lt v125, v126 : i1 + v128 = cast v127 : i32 + v129 = const.i32 0 : i32 + v130 = neq v128, v129 : i1 + condbr v130, block18(v123, v121), block20 + +block19: + br block10(v124, v176) + +block20: + br block19 + +block21: + br block1(v174) + +block22: + v151 = add v149, v143 : i32 + br block23(v149, v172) + +block23(v152: i32, v153: i32): + v154 = cast v153 : u32 + v155 = inttoptr v154 : *mut u8 + v156 = load v155 : u8 + v157 = zext v156 : i32 + v158 = trunc v157 : u8 + v159 = cast v152 : u32 + v160 = inttoptr v159 : *mut u8 + store v160, v158 + v161 = const.i32 1 : i32 + v162 = add v153, v161 : i32 + v163 = const.i32 1 : i32 + v164 = add v152, v163 : i32 + v166 = cast v164 : u32 + v167 = cast v165 : u32 + v168 = lt v166, v167 : i1 + v169 = cast v168 : i32 + v170 = const.i32 0 : i32 + v171 = neq v169, v170 : i1 + condbr v171, block23(v164, v162), block25 + +block24: + br block21 + +block25: + br block24 +} diff --git a/frontend-wasm/tests/expected/dlmalloc.wat b/frontend-wasm/tests/expected/dlmalloc.wat new file mode 100644 index 00000000..a5363682 --- /dev/null +++ b/frontend-wasm/tests/expected/dlmalloc.wat @@ -0,0 +1,4646 @@ +(module + (type (;0;) (func (param i32 i32 i32))) + (type (;1;) (func (param i32 i32))) + (type (;2;) (func (param i32) (result i32))) + (type (;3;) (func (param i32 i32) (result i32))) + (type (;4;) (func (param i32 i32 i32) (result i32))) + (type (;5;) (func (result i32))) + (type (;6;) (func (param i32 i32 i32 i32) (result i32))) + (type (;7;) (func (param i32 i32 i32 i32))) + (type (;8;) (func (param i32))) + (type (;9;) (func)) + (type (;10;) (func (param i32 i32 i32 i32 i32) (result i32))) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE (;0;) (type 0) (param i32 i32 i32) + (local i32 i32 i32 i32) + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 3 + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6pinuse17h92d5107047b03ba7E + br_if 0 (;@3;) + local.get 1 + i32.load + local.set 4 + block ;; label = @4 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + br_if 0 (;@4;) + local.get 4 + local.get 2 + i32.add + local.set 2 + block ;; label = @5 + local.get 1 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk12minus_offset17h7c3eec81761249d9E + local.tee 1 + local.get 0 + i32.load offset=424 + i32.ne + br_if 0 (;@5;) + local.get 3 + i32.load offset=4 + i32.const 3 + i32.and + i32.const 3 + i32.ne + br_if 2 (;@3;) + local.get 0 + local.get 2 + i32.store offset=416 + local.get 1 + local.get 2 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E + return + end + block ;; label = @5 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@5;) + local.get 0 + local.get 1 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 2 (;@3;) + end + block ;; label = @5 + local.get 1 + i32.load offset=12 + local.tee 5 + local.get 1 + i32.load offset=8 + local.tee 6 + i32.eq + br_if 0 (;@5;) + local.get 6 + local.get 5 + i32.store offset=12 + local.get 5 + local.get 6 + i32.store offset=8 + br 2 (;@3;) + end + local.get 0 + local.get 0 + i32.load offset=408 + i32.const -2 + local.get 4 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=408 + br 1 (;@3;) + end + local.get 0 + local.get 1 + local.get 4 + i32.sub + local.get 2 + local.get 4 + i32.add + i32.const 16 + i32.add + local.tee 1 + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE + i32.eqz + br_if 1 (;@2;) + local.get 0 + local.get 0 + i32.load offset=432 + local.get 1 + i32.sub + i32.store offset=432 + return + end + block ;; label = @3 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E + i32.eqz + br_if 0 (;@3;) + local.get 1 + local.get 2 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E + br 2 (;@1;) + end + block ;; label = @3 + block ;; label = @4 + local.get 3 + local.get 0 + i32.load offset=428 + i32.eq + br_if 0 (;@4;) + local.get 3 + local.get 0 + i32.load offset=424 + i32.eq + br_if 1 (;@3;) + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 4 + local.get 2 + i32.add + local.set 2 + block ;; label = @5 + block ;; label = @6 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@6;) + local.get 0 + local.get 3 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 1 (;@5;) + end + block ;; label = @6 + local.get 3 + i32.load offset=12 + local.tee 5 + local.get 3 + i32.load offset=8 + local.tee 3 + i32.eq + br_if 0 (;@6;) + local.get 3 + local.get 5 + i32.store offset=12 + local.get 5 + local.get 3 + i32.store offset=8 + br 1 (;@5;) + end + local.get 0 + local.get 0 + i32.load offset=408 + i32.const -2 + local.get 4 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=408 + end + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 1 + local.get 0 + i32.load offset=424 + i32.ne + br_if 3 (;@1;) + local.get 0 + local.get 2 + i32.store offset=416 + br 2 (;@2;) + end + local.get 0 + local.get 1 + i32.store offset=428 + local.get 0 + local.get 0 + i32.load offset=420 + local.get 2 + i32.add + local.tee 2 + i32.store offset=420 + local.get 1 + local.get 2 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 0 + i32.load offset=424 + i32.ne + br_if 1 (;@2;) + local.get 0 + i32.const 0 + i32.store offset=416 + local.get 0 + i32.const 0 + i32.store offset=424 + return + end + local.get 0 + local.get 1 + i32.store offset=424 + local.get 0 + local.get 0 + i32.load offset=416 + local.get 2 + i32.add + local.tee 2 + i32.store offset=416 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + return + end + return + end + block ;; label = @1 + local.get 2 + i32.const 256 + i32.lt_u + br_if 0 (;@1;) + local.get 0 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E + return + end + local.get 0 + local.get 2 + i32.const -8 + i32.and + i32.add + i32.const 144 + i32.add + local.set 3 + block ;; label = @1 + block ;; label = @2 + local.get 0 + i32.load offset=408 + local.tee 4 + i32.const 1 + local.get 2 + i32.const 3 + i32.shr_u + i32.shl + local.tee 2 + i32.and + i32.eqz + br_if 0 (;@2;) + local.get 3 + i32.load offset=8 + local.set 2 + br 1 (;@1;) + end + local.get 0 + local.get 4 + local.get 2 + i32.or + i32.store offset=408 + local.get 3 + local.set 2 + end + local.get 3 + local.get 1 + i32.store offset=8 + local.get 2 + local.get 1 + i32.store offset=12 + local.get 1 + local.get 3 + i32.store offset=12 + local.get 1 + local.get 2 + i32.store offset=8 + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E (;1;) (type 1) (param i32 i32) + (local i32 i32 i32 i32 i32) + local.get 1 + i32.load offset=24 + local.set 2 + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk4next17he250edbec5d87123E + local.get 1 + i32.ne + br_if 0 (;@3;) + local.get 1 + i32.const 20 + i32.const 16 + local.get 1 + i32.const 20 + i32.add + local.tee 3 + i32.load + local.tee 4 + select + i32.add + i32.load + local.tee 5 + br_if 1 (;@2;) + i32.const 0 + local.set 3 + br 2 (;@1;) + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk4prev17h7a0f1d46544cc14aE + local.tee 5 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk4next17he250edbec5d87123E + local.tee 3 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + i32.store offset=12 + local.get 3 + local.get 5 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + i32.store offset=8 + br 1 (;@1;) + end + local.get 3 + local.get 1 + i32.const 16 + i32.add + local.get 4 + select + local.set 4 + loop ;; label = @2 + local.get 4 + local.set 6 + local.get 5 + local.tee 3 + i32.const 20 + i32.add + local.tee 5 + local.get 3 + i32.const 16 + i32.add + local.get 5 + i32.load + local.tee 5 + select + local.set 4 + local.get 3 + i32.const 20 + i32.const 16 + local.get 5 + select + i32.add + i32.load + local.tee 5 + br_if 0 (;@2;) + end + local.get 6 + i32.const 0 + i32.store + end + block ;; label = @1 + local.get 2 + i32.eqz + br_if 0 (;@1;) + block ;; label = @2 + block ;; label = @3 + local.get 0 + local.get 1 + i32.load offset=28 + local.tee 4 + i32.const 2 + i32.shl + i32.add + local.tee 5 + i32.load + local.get 1 + i32.eq + br_if 0 (;@3;) + local.get 2 + i32.const 16 + i32.const 20 + local.get 2 + i32.load offset=16 + local.get 1 + i32.eq + select + i32.add + local.get 3 + i32.store + local.get 3 + br_if 1 (;@2;) + br 2 (;@1;) + end + local.get 5 + local.get 3 + i32.store + local.get 3 + br_if 0 (;@2;) + local.get 0 + local.get 0 + i32.load offset=412 + i32.const -2 + local.get 4 + i32.rotl + i32.and + i32.store offset=412 + return + end + local.get 3 + local.get 2 + i32.store offset=24 + block ;; label = @2 + local.get 1 + i32.load offset=16 + local.tee 5 + i32.eqz + br_if 0 (;@2;) + local.get 3 + local.get 5 + i32.store offset=16 + local.get 5 + local.get 3 + i32.store offset=24 + end + local.get 1 + i32.const 20 + i32.add + i32.load + local.tee 5 + i32.eqz + br_if 0 (;@1;) + local.get 3 + i32.const 20 + i32.add + local.get 5 + i32.store + local.get 5 + local.get 3 + i32.store offset=24 + return + end + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E (;2;) (type 0) (param i32 i32 i32) + (local i32 i32 i32 i32 i32) + i32.const 0 + local.set 3 + block ;; label = @1 + local.get 2 + i32.const 256 + i32.lt_u + br_if 0 (;@1;) + i32.const 31 + local.set 3 + local.get 2 + i32.const 16777215 + i32.gt_u + br_if 0 (;@1;) + local.get 2 + i32.const 6 + local.get 2 + i32.const 8 + i32.shr_u + i32.clz + local.tee 3 + i32.sub + i32.shr_u + i32.const 1 + i32.and + local.get 3 + i32.const 1 + i32.shl + i32.sub + i32.const 62 + i32.add + local.set 3 + end + local.get 1 + i64.const 0 + i64.store offset=16 align=4 + local.get 1 + local.get 3 + i32.store offset=28 + local.get 0 + local.get 3 + i32.const 2 + i32.shl + i32.add + local.set 4 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + local.set 5 + block ;; label = @1 + block ;; label = @2 + local.get 0 + i32.load offset=412 + local.tee 6 + i32.const 1 + local.get 3 + i32.shl + local.tee 7 + i32.and + i32.eqz + br_if 0 (;@2;) + local.get 4 + i32.load + local.set 0 + local.get 2 + local.get 3 + call $_ZN8dlmalloc8dlmalloc24leftshift_for_tree_index17h31d064fdd867f502E + i32.shl + local.set 3 + loop ;; label = @3 + block ;; label = @4 + local.get 0 + local.tee 4 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.get 2 + i32.ne + br_if 0 (;@4;) + local.get 4 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + local.tee 3 + i32.load offset=8 + local.tee 0 + local.get 5 + i32.store offset=12 + local.get 3 + local.get 5 + i32.store offset=8 + local.get 5 + local.get 3 + i32.store offset=12 + local.get 5 + local.get 0 + i32.store offset=8 + local.get 1 + i32.const 0 + i32.store offset=24 + return + end + local.get 3 + i32.const 29 + i32.shr_u + local.set 0 + local.get 3 + i32.const 1 + i32.shl + local.set 3 + local.get 4 + local.get 0 + i32.const 4 + i32.and + i32.add + i32.const 16 + i32.add + local.tee 6 + i32.load + local.tee 0 + br_if 0 (;@3;) + end + local.get 6 + local.get 1 + i32.store + local.get 1 + local.get 4 + i32.store offset=24 + br 1 (;@1;) + end + local.get 0 + local.get 6 + local.get 7 + i32.or + i32.store offset=412 + local.get 1 + local.get 4 + i32.store offset=24 + local.get 4 + local.get 1 + i32.store + end + local.get 5 + local.get 5 + i32.store offset=8 + local.get 5 + local.get 5 + i32.store offset=12 + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$23release_unused_segments17h25622465f0742468E (;3;) (type 2) (param i32) (result i32) + (local i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) + block ;; label = @1 + block ;; label = @2 + local.get 0 + i32.const 136 + i32.add + i32.load + local.tee 1 + br_if 0 (;@2;) + i32.const 0 + local.set 2 + i32.const 0 + local.set 3 + br 1 (;@1;) + end + local.get 0 + i32.const 128 + i32.add + local.set 4 + i32.const 0 + local.set 3 + i32.const 0 + local.set 2 + loop ;; label = @2 + local.get 1 + local.tee 5 + i32.load offset=8 + local.set 1 + local.get 5 + i32.load offset=4 + local.set 6 + local.get 5 + i32.load + local.set 7 + block ;; label = @3 + block ;; label = @4 + local.get 0 + local.get 5 + i32.load offset=12 + i32.const 1 + i32.shr_u + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$16can_release_part17h43bfb7d8666fcc31E + i32.eqz + br_if 0 (;@4;) + local.get 5 + call $_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E + br_if 0 (;@4;) + local.get 7 + local.get 7 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 8 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 8 + i32.sub + i32.add + local.tee 8 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.set 9 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 10 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 11 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 12 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 13 + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk5inuse17h2d327e4c36b84dfeE + br_if 0 (;@4;) + local.get 8 + local.get 9 + i32.add + local.get 7 + local.get 10 + local.get 6 + i32.add + local.get 11 + local.get 12 + i32.add + local.get 13 + i32.add + i32.sub + i32.add + i32.lt_u + br_if 0 (;@4;) + block ;; label = @5 + block ;; label = @6 + local.get 8 + local.get 0 + i32.load offset=424 + i32.eq + br_if 0 (;@6;) + local.get 0 + local.get 8 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 1 (;@5;) + end + local.get 0 + i32.const 0 + i32.store offset=416 + local.get 0 + i32.const 0 + i32.store offset=424 + end + block ;; label = @5 + local.get 0 + local.get 7 + local.get 6 + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE + br_if 0 (;@5;) + local.get 0 + local.get 8 + local.get 9 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E + br 1 (;@4;) + end + local.get 0 + local.get 0 + i32.load offset=432 + local.get 6 + i32.sub + i32.store offset=432 + local.get 4 + local.get 1 + i32.store offset=8 + local.get 6 + local.get 3 + i32.add + local.set 3 + br 1 (;@3;) + end + local.get 5 + local.set 4 + end + local.get 2 + i32.const 1 + i32.add + local.set 2 + local.get 1 + br_if 0 (;@2;) + end + end + local.get 0 + local.get 2 + i32.const 4095 + local.get 2 + i32.const 4095 + i32.gt_u + select + i32.store offset=448 + local.get 3 + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE (;4;) (type 1) (param i32 i32) + (local i32 i32 i32 i32 i32 i32) + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E + local.set 1 + local.get 1 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 2 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 3 + block ;; label = @1 + block ;; label = @2 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6pinuse17h92d5107047b03ba7E + br_if 0 (;@2;) + local.get 1 + i32.load + local.set 4 + block ;; label = @3 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + br_if 0 (;@3;) + local.get 4 + local.get 2 + i32.add + local.set 2 + block ;; label = @4 + local.get 1 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk12minus_offset17h7c3eec81761249d9E + local.tee 1 + local.get 0 + i32.load offset=424 + i32.ne + br_if 0 (;@4;) + local.get 3 + i32.load offset=4 + i32.const 3 + i32.and + i32.const 3 + i32.ne + br_if 2 (;@2;) + local.get 0 + local.get 2 + i32.store offset=416 + local.get 1 + local.get 2 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E + return + end + block ;; label = @4 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@4;) + local.get 0 + local.get 1 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 2 (;@2;) + end + block ;; label = @4 + local.get 1 + i32.load offset=12 + local.tee 5 + local.get 1 + i32.load offset=8 + local.tee 6 + i32.eq + br_if 0 (;@4;) + local.get 6 + local.get 5 + i32.store offset=12 + local.get 5 + local.get 6 + i32.store offset=8 + br 2 (;@2;) + end + local.get 0 + local.get 0 + i32.load offset=408 + i32.const -2 + local.get 4 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=408 + br 1 (;@2;) + end + local.get 0 + local.get 1 + local.get 4 + i32.sub + local.get 2 + local.get 4 + i32.add + i32.const 16 + i32.add + local.tee 1 + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE + i32.eqz + br_if 1 (;@1;) + local.get 0 + local.get 0 + i32.load offset=432 + local.get 1 + i32.sub + i32.store offset=432 + return + end + block ;; label = @2 + block ;; label = @3 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E + i32.eqz + br_if 0 (;@3;) + local.get 1 + local.get 2 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E + br 1 (;@2;) + end + block ;; label = @3 + block ;; label = @4 + block ;; label = @5 + block ;; label = @6 + local.get 3 + local.get 0 + i32.load offset=428 + i32.eq + br_if 0 (;@6;) + local.get 3 + local.get 0 + i32.load offset=424 + i32.eq + br_if 1 (;@5;) + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 4 + local.get 2 + i32.add + local.set 2 + block ;; label = @7 + block ;; label = @8 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@8;) + local.get 0 + local.get 3 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 1 (;@7;) + end + block ;; label = @8 + local.get 3 + i32.load offset=12 + local.tee 5 + local.get 3 + i32.load offset=8 + local.tee 3 + i32.eq + br_if 0 (;@8;) + local.get 3 + local.get 5 + i32.store offset=12 + local.get 5 + local.get 3 + i32.store offset=8 + br 1 (;@7;) + end + local.get 0 + local.get 0 + i32.load offset=408 + i32.const -2 + local.get 4 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=408 + end + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 1 + local.get 0 + i32.load offset=424 + i32.ne + br_if 4 (;@2;) + local.get 0 + local.get 2 + i32.store offset=416 + return + end + local.get 0 + local.get 1 + i32.store offset=428 + local.get 0 + local.get 0 + i32.load offset=420 + local.get 2 + i32.add + local.tee 2 + i32.store offset=420 + local.get 1 + local.get 2 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 0 + i32.load offset=424 + i32.eq + br_if 1 (;@4;) + br 2 (;@3;) + end + local.get 0 + local.get 1 + i32.store offset=424 + local.get 0 + local.get 0 + i32.load offset=416 + local.get 2 + i32.add + local.tee 2 + i32.store offset=416 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + return + end + local.get 0 + i32.const 0 + i32.store offset=416 + local.get 0 + i32.const 0 + i32.store offset=424 + end + local.get 2 + local.get 0 + i32.load offset=440 + i32.le_u + br_if 1 (;@1;) + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 1 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 2 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 3 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 4 + i32.const 0 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const 2 + i32.shl + i32.sub + local.tee 5 + local.get 1 + local.get 4 + local.get 2 + local.get 3 + i32.add + i32.add + i32.sub + i32.const -65544 + i32.add + i32.const -9 + i32.and + i32.const -3 + i32.add + local.tee 1 + local.get 5 + local.get 1 + i32.lt_u + select + i32.eqz + br_if 1 (;@1;) + local.get 0 + i32.load offset=428 + local.tee 2 + i32.eqz + br_if 1 (;@1;) + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 1 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 3 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 5 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 0 + local.set 4 + block ;; label = @3 + local.get 0 + i32.load offset=420 + local.tee 7 + local.get 6 + local.get 5 + local.get 3 + local.get 1 + i32.sub + i32.add + i32.add + local.tee 1 + i32.le_u + br_if 0 (;@3;) + local.get 7 + local.get 1 + i32.sub + i32.const 65535 + i32.add + i32.const -65536 + i32.and + local.tee 6 + i32.const -65536 + i32.add + local.set 5 + local.get 0 + i32.const 128 + i32.add + local.tee 3 + local.set 1 + block ;; label = @4 + loop ;; label = @5 + block ;; label = @6 + local.get 1 + i32.load + local.get 2 + i32.gt_u + br_if 0 (;@6;) + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E + local.get 2 + i32.gt_u + br_if 2 (;@4;) + end + local.get 1 + i32.load offset=8 + local.tee 1 + br_if 0 (;@5;) + end + i32.const 0 + local.set 1 + end + i32.const 0 + local.set 4 + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E + br_if 0 (;@3;) + i32.const 0 + local.set 4 + local.get 0 + local.get 1 + i32.load offset=12 + i32.const 1 + i32.shr_u + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$16can_release_part17h43bfb7d8666fcc31E + i32.eqz + br_if 0 (;@3;) + i32.const 0 + local.set 4 + local.get 1 + i32.load offset=4 + local.get 5 + i32.lt_u + br_if 0 (;@3;) + loop ;; label = @4 + block ;; label = @5 + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc7Segment5holds17h8f6de4ee6718009bE + i32.eqz + br_if 0 (;@5;) + i32.const 0 + local.set 4 + br 2 (;@3;) + end + local.get 3 + i32.load offset=8 + local.tee 3 + br_if 0 (;@4;) + end + local.get 0 + local.get 1 + i32.load + local.get 1 + i32.load offset=4 + local.tee 2 + local.get 2 + local.get 5 + i32.sub + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9free_part17h74489c9e7a3aa967E + local.set 2 + i32.const 0 + local.set 4 + local.get 5 + i32.eqz + br_if 0 (;@3;) + i32.const 0 + local.set 4 + local.get 2 + i32.eqz + br_if 0 (;@3;) + local.get 1 + local.get 1 + i32.load offset=4 + local.get 5 + i32.sub + i32.store offset=4 + local.get 0 + local.get 0 + i32.load offset=432 + local.get 5 + i32.sub + i32.store offset=432 + local.get 0 + i32.load offset=420 + local.set 2 + local.get 0 + i32.load offset=428 + local.set 1 + local.get 0 + local.get 1 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 3 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 3 + i32.sub + local.tee 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 1 + i32.store offset=428 + local.get 0 + local.get 2 + local.get 6 + local.get 3 + i32.add + i32.sub + i32.const 65536 + i32.add + local.tee 2 + i32.store offset=420 + local.get 1 + local.get 2 + i32.const 1 + i32.or + i32.store offset=4 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 3 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 4 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 7 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 1 + local.get 0 + i32.const 2097152 + i32.store offset=440 + local.get 1 + local.get 7 + local.get 6 + local.get 4 + local.get 3 + i32.sub + i32.add + i32.add + i32.store offset=4 + local.get 5 + local.set 4 + end + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$23release_unused_segments17h25622465f0742468E + i32.const 0 + local.get 4 + i32.sub + i32.ne + br_if 1 (;@1;) + local.get 0 + i32.load offset=420 + local.get 0 + i32.load offset=440 + i32.le_u + br_if 1 (;@1;) + local.get 0 + i32.const -1 + i32.store offset=440 + return + end + block ;; label = @2 + local.get 2 + i32.const 256 + i32.lt_u + br_if 0 (;@2;) + local.get 0 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E + local.get 0 + local.get 0 + i32.load offset=448 + i32.const -1 + i32.add + local.tee 1 + i32.store offset=448 + local.get 1 + br_if 1 (;@1;) + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$23release_unused_segments17h25622465f0742468E + drop + return + end + local.get 0 + local.get 2 + i32.const -8 + i32.and + i32.add + i32.const 144 + i32.add + local.set 3 + block ;; label = @2 + block ;; label = @3 + local.get 0 + i32.load offset=408 + local.tee 4 + i32.const 1 + local.get 2 + i32.const 3 + i32.shr_u + i32.shl + local.tee 2 + i32.and + i32.eqz + br_if 0 (;@3;) + local.get 3 + i32.load offset=8 + local.set 0 + br 1 (;@2;) + end + local.get 0 + local.get 4 + local.get 2 + i32.or + i32.store offset=408 + local.get 3 + local.set 0 + end + local.get 3 + local.get 1 + i32.store offset=8 + local.get 0 + local.get 1 + i32.store offset=12 + local.get 1 + local.get 3 + i32.store offset=12 + local.get 1 + local.get 0 + i32.store offset=8 + end + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE (;5;) (type 3) (param i32 i32) (result i32) + (local i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i64) + global.get $__stack_pointer + i32.const 16 + i32.sub + local.tee 2 + global.set $__stack_pointer + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + block ;; label = @4 + block ;; label = @5 + block ;; label = @6 + local.get 1 + i32.const 245 + i32.lt_u + br_if 0 (;@6;) + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 3 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 4 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 5 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 0 + local.set 7 + i32.const 0 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const 2 + i32.shl + i32.sub + local.tee 8 + local.get 3 + local.get 6 + local.get 4 + local.get 5 + i32.add + i32.add + i32.sub + i32.const -65544 + i32.add + i32.const -9 + i32.and + i32.const -3 + i32.add + local.tee 3 + local.get 8 + local.get 3 + i32.lt_u + select + local.get 1 + i32.le_u + br_if 5 (;@1;) + local.get 1 + i32.const 4 + i32.add + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 3 + local.get 0 + i32.load offset=412 + local.tee 9 + i32.eqz + br_if 4 (;@2;) + i32.const 0 + local.set 1 + i32.const 0 + local.set 10 + block ;; label = @7 + local.get 3 + i32.const 256 + i32.lt_u + br_if 0 (;@7;) + i32.const 31 + local.set 10 + local.get 3 + i32.const 16777215 + i32.gt_u + br_if 0 (;@7;) + local.get 3 + i32.const 6 + local.get 3 + i32.const 8 + i32.shr_u + i32.clz + local.tee 4 + i32.sub + i32.shr_u + i32.const 1 + i32.and + local.get 4 + i32.const 1 + i32.shl + i32.sub + i32.const 62 + i32.add + local.set 10 + end + i32.const 0 + local.get 3 + i32.sub + local.set 4 + block ;; label = @7 + local.get 0 + local.get 10 + i32.const 2 + i32.shl + i32.add + i32.load + local.tee 7 + br_if 0 (;@7;) + i32.const 0 + local.set 5 + br 2 (;@5;) + end + local.get 3 + local.get 10 + call $_ZN8dlmalloc8dlmalloc24leftshift_for_tree_index17h31d064fdd867f502E + i32.shl + local.set 6 + i32.const 0 + local.set 1 + i32.const 0 + local.set 5 + loop ;; label = @7 + block ;; label = @8 + local.get 7 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 8 + local.get 3 + i32.lt_u + br_if 0 (;@8;) + local.get 8 + local.get 3 + i32.sub + local.tee 8 + local.get 4 + i32.ge_u + br_if 0 (;@8;) + local.get 8 + local.set 4 + local.get 7 + local.set 5 + local.get 8 + br_if 0 (;@8;) + i32.const 0 + local.set 4 + local.get 7 + local.set 5 + local.get 7 + local.set 1 + br 4 (;@4;) + end + local.get 7 + i32.const 20 + i32.add + i32.load + local.tee 8 + local.get 1 + local.get 8 + local.get 7 + local.get 6 + i32.const 29 + i32.shr_u + i32.const 4 + i32.and + i32.add + i32.const 16 + i32.add + i32.load + local.tee 7 + i32.ne + select + local.get 1 + local.get 8 + select + local.set 1 + local.get 6 + i32.const 1 + i32.shl + local.set 6 + local.get 7 + i32.eqz + br_if 2 (;@5;) + br 0 (;@7;) + end + end + i32.const 16 + local.get 1 + i32.const 4 + i32.add + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const -5 + i32.add + local.get 1 + i32.gt_u + select + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 3 + block ;; label = @6 + local.get 0 + i32.load offset=408 + local.tee 5 + local.get 3 + i32.const 3 + i32.shr_u + local.tee 4 + i32.shr_u + local.tee 1 + i32.const 3 + i32.and + i32.eqz + br_if 0 (;@6;) + block ;; label = @7 + block ;; label = @8 + local.get 0 + local.get 1 + i32.const -1 + i32.xor + i32.const 1 + i32.and + local.get 4 + i32.add + local.tee 3 + i32.const 3 + i32.shl + i32.add + local.tee 7 + i32.const 152 + i32.add + i32.load + local.tee 1 + i32.load offset=8 + local.tee 4 + local.get 7 + i32.const 144 + i32.add + local.tee 7 + i32.eq + br_if 0 (;@8;) + local.get 4 + local.get 7 + i32.store offset=12 + local.get 7 + local.get 4 + i32.store offset=8 + br 1 (;@7;) + end + local.get 0 + local.get 5 + i32.const -2 + local.get 3 + i32.rotl + i32.and + i32.store offset=408 + end + local.get 1 + local.get 3 + i32.const 3 + i32.shl + call $_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 5 (;@1;) + end + local.get 3 + local.get 0 + i32.load offset=416 + i32.le_u + br_if 3 (;@2;) + block ;; label = @6 + block ;; label = @7 + block ;; label = @8 + block ;; label = @9 + block ;; label = @10 + block ;; label = @11 + block ;; label = @12 + local.get 1 + br_if 0 (;@12;) + local.get 0 + i32.load offset=412 + local.tee 1 + i32.eqz + br_if 10 (;@2;) + local.get 0 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE + i32.ctz + i32.const 2 + i32.shl + i32.add + i32.load + local.tee 7 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.get 3 + i32.sub + local.set 4 + block ;; label = @13 + local.get 7 + call $_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE + local.tee 1 + i32.eqz + br_if 0 (;@13;) + loop ;; label = @14 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.get 3 + i32.sub + local.tee 5 + local.get 4 + local.get 5 + local.get 4 + i32.lt_u + local.tee 5 + select + local.set 4 + local.get 1 + local.get 7 + local.get 5 + select + local.set 7 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE + local.tee 1 + br_if 0 (;@14;) + end + end + local.get 7 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + local.tee 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 5 + local.get 0 + local.get 7 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + local.get 4 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.lt_u + br_if 2 (;@10;) + local.get 5 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + local.set 5 + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 5 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 0 + i32.load offset=416 + local.tee 8 + br_if 1 (;@11;) + br 5 (;@7;) + end + block ;; label = @12 + block ;; label = @13 + local.get 0 + i32.const 144 + i32.add + local.tee 8 + i32.const 1 + local.get 4 + i32.const 31 + i32.and + local.tee 4 + i32.shl + call $_ZN8dlmalloc8dlmalloc9left_bits17hb6cbe146b8019d98E + local.get 1 + local.get 4 + i32.shl + i32.and + call $_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE + i32.ctz + local.tee 7 + i32.const 3 + i32.shl + i32.add + local.tee 4 + i32.load offset=8 + local.tee 1 + i32.load offset=8 + local.tee 5 + local.get 4 + i32.eq + br_if 0 (;@13;) + local.get 5 + local.get 4 + i32.store offset=12 + local.get 4 + local.get 5 + i32.store offset=8 + br 1 (;@12;) + end + local.get 0 + local.get 0 + i32.load offset=408 + i32.const -2 + local.get 7 + i32.rotl + i32.and + i32.store offset=408 + end + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 5 + local.get 7 + i32.const 3 + i32.shl + local.get 3 + i32.sub + local.tee 6 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 0 + i32.load offset=416 + local.tee 7 + br_if 2 (;@9;) + br 3 (;@8;) + end + local.get 0 + local.get 8 + i32.const -8 + i32.and + i32.add + i32.const 144 + i32.add + local.set 6 + local.get 0 + i32.load offset=424 + local.set 7 + block ;; label = @11 + block ;; label = @12 + local.get 0 + i32.load offset=408 + local.tee 10 + i32.const 1 + local.get 8 + i32.const 3 + i32.shr_u + i32.shl + local.tee 8 + i32.and + i32.eqz + br_if 0 (;@12;) + local.get 6 + i32.load offset=8 + local.set 8 + br 1 (;@11;) + end + local.get 0 + local.get 10 + local.get 8 + i32.or + i32.store offset=408 + local.get 6 + local.set 8 + end + local.get 6 + local.get 7 + i32.store offset=8 + local.get 8 + local.get 7 + i32.store offset=12 + local.get 7 + local.get 6 + i32.store offset=12 + local.get 7 + local.get 8 + i32.store offset=8 + br 3 (;@7;) + end + local.get 1 + local.get 4 + local.get 3 + i32.add + call $_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E + br 3 (;@6;) + end + local.get 8 + local.get 7 + i32.const -8 + i32.and + i32.add + local.set 4 + local.get 0 + i32.load offset=424 + local.set 3 + block ;; label = @9 + block ;; label = @10 + local.get 0 + i32.load offset=408 + local.tee 8 + i32.const 1 + local.get 7 + i32.const 3 + i32.shr_u + i32.shl + local.tee 7 + i32.and + i32.eqz + br_if 0 (;@10;) + local.get 4 + i32.load offset=8 + local.set 7 + br 1 (;@9;) + end + local.get 0 + local.get 8 + local.get 7 + i32.or + i32.store offset=408 + local.get 4 + local.set 7 + end + local.get 4 + local.get 3 + i32.store offset=8 + local.get 7 + local.get 3 + i32.store offset=12 + local.get 3 + local.get 4 + i32.store offset=12 + local.get 3 + local.get 7 + i32.store offset=8 + end + local.get 0 + local.get 5 + i32.store offset=424 + local.get 0 + local.get 6 + i32.store offset=416 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 6 (;@1;) + end + local.get 0 + local.get 5 + i32.store offset=424 + local.get 0 + local.get 4 + i32.store offset=416 + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 7 + i32.eqz + br_if 3 (;@2;) + br 4 (;@1;) + end + block ;; label = @5 + local.get 1 + local.get 5 + i32.or + br_if 0 (;@5;) + i32.const 1 + local.get 10 + i32.shl + call $_ZN8dlmalloc8dlmalloc9left_bits17hb6cbe146b8019d98E + local.get 9 + i32.and + local.tee 1 + i32.eqz + br_if 3 (;@2;) + local.get 0 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE + i32.ctz + i32.const 2 + i32.shl + i32.add + i32.load + local.set 1 + i32.const 0 + local.set 5 + end + local.get 1 + i32.eqz + br_if 1 (;@3;) + end + loop ;; label = @4 + local.get 1 + local.get 5 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 7 + local.get 3 + i32.ge_u + local.get 7 + local.get 3 + i32.sub + local.tee 7 + local.get 4 + i32.lt_u + i32.and + local.tee 6 + select + local.set 5 + local.get 7 + local.get 4 + local.get 6 + select + local.set 4 + local.get 1 + call $_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE + local.tee 1 + br_if 0 (;@4;) + end + end + local.get 5 + i32.eqz + br_if 0 (;@2;) + block ;; label = @3 + local.get 0 + i32.load offset=416 + local.tee 1 + local.get 3 + i32.lt_u + br_if 0 (;@3;) + local.get 4 + local.get 1 + local.get 3 + i32.sub + i32.ge_u + br_if 1 (;@2;) + end + local.get 5 + call $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E + local.tee 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 7 + local.get 0 + local.get 5 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + block ;; label = @3 + block ;; label = @4 + local.get 4 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.lt_u + br_if 0 (;@4;) + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 7 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + block ;; label = @5 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@5;) + local.get 0 + local.get 7 + local.get 4 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E + br 2 (;@3;) + end + local.get 0 + local.get 4 + i32.const -8 + i32.and + i32.add + i32.const 144 + i32.add + local.set 5 + block ;; label = @5 + block ;; label = @6 + local.get 0 + i32.load offset=408 + local.tee 6 + i32.const 1 + local.get 4 + i32.const 3 + i32.shr_u + i32.shl + local.tee 4 + i32.and + i32.eqz + br_if 0 (;@6;) + local.get 5 + i32.load offset=8 + local.set 4 + br 1 (;@5;) + end + local.get 0 + local.get 6 + local.get 4 + i32.or + i32.store offset=408 + local.get 5 + local.set 4 + end + local.get 5 + local.get 7 + i32.store offset=8 + local.get 4 + local.get 7 + i32.store offset=12 + local.get 7 + local.get 5 + i32.store offset=12 + local.get 7 + local.get 4 + i32.store offset=8 + br 1 (;@3;) + end + local.get 1 + local.get 4 + local.get 3 + i32.add + call $_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 7 + br_if 1 (;@1;) + end + block ;; label = @2 + block ;; label = @3 + block ;; label = @4 + block ;; label = @5 + block ;; label = @6 + block ;; label = @7 + block ;; label = @8 + block ;; label = @9 + local.get 0 + i32.load offset=416 + local.tee 4 + local.get 3 + i32.ge_u + br_if 0 (;@9;) + block ;; label = @10 + local.get 0 + i32.load offset=420 + local.tee 1 + local.get 3 + i32.gt_u + br_if 0 (;@10;) + local.get 2 + i32.const 4 + i32.add + local.get 0 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 1 + i32.sub + local.get 1 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.add + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.add + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.add + i32.const 8 + i32.add + i32.const 65536 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5alloc17hdbf1e2bcc01bc909E + i32.const 0 + local.set 7 + local.get 2 + i32.load offset=4 + local.tee 8 + i32.eqz + br_if 9 (;@1;) + local.get 2 + i32.load offset=12 + local.set 11 + local.get 0 + local.get 0 + i32.load offset=432 + local.get 2 + i32.load offset=8 + local.tee 10 + i32.add + local.tee 1 + i32.store offset=432 + local.get 0 + local.get 0 + i32.load offset=436 + local.tee 4 + local.get 1 + local.get 4 + local.get 1 + i32.gt_u + select + i32.store offset=436 + block ;; label = @11 + local.get 0 + i32.load offset=428 + br_if 0 (;@11;) + local.get 0 + i32.load offset=444 + local.tee 1 + i32.eqz + br_if 3 (;@8;) + local.get 8 + local.get 1 + i32.lt_u + br_if 3 (;@8;) + br 8 (;@3;) + end + local.get 0 + i32.const 128 + i32.add + local.tee 9 + local.set 1 + block ;; label = @11 + block ;; label = @12 + loop ;; label = @13 + local.get 8 + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E + i32.eq + br_if 1 (;@12;) + local.get 1 + i32.load offset=8 + local.tee 1 + br_if 0 (;@13;) + br 2 (;@11;) + end + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E + br_if 0 (;@11;) + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment9sys_flags17h224550055bf7775bE + local.get 11 + i32.ne + br_if 0 (;@11;) + local.get 1 + local.get 0 + i32.load offset=428 + call $_ZN8dlmalloc8dlmalloc7Segment5holds17h8f6de4ee6718009bE + br_if 4 (;@7;) + end + local.get 0 + local.get 0 + i32.load offset=444 + local.tee 1 + local.get 8 + local.get 8 + local.get 1 + i32.gt_u + select + i32.store offset=444 + local.get 8 + local.get 10 + i32.add + local.set 4 + local.get 9 + local.set 1 + block ;; label = @11 + block ;; label = @12 + block ;; label = @13 + loop ;; label = @14 + local.get 1 + i32.load + local.get 4 + i32.eq + br_if 1 (;@13;) + local.get 1 + i32.load offset=8 + local.tee 1 + br_if 0 (;@14;) + br 2 (;@12;) + end + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E + br_if 0 (;@12;) + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment9sys_flags17h224550055bf7775bE + local.get 11 + i32.eq + br_if 1 (;@11;) + end + local.get 0 + i32.load offset=428 + local.set 5 + local.get 9 + local.set 1 + block ;; label = @12 + loop ;; label = @13 + block ;; label = @14 + local.get 1 + i32.load + local.get 5 + i32.gt_u + br_if 0 (;@14;) + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E + local.get 5 + i32.gt_u + br_if 2 (;@12;) + end + local.get 1 + i32.load offset=8 + local.tee 1 + br_if 0 (;@13;) + end + i32.const 0 + local.set 1 + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E + local.tee 6 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.tee 12 + i32.sub + i32.const -23 + i32.add + local.set 1 + local.get 5 + local.get 1 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 4 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 4 + i32.sub + i32.add + local.tee 1 + local.get 1 + local.get 5 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.add + i32.lt_u + select + local.tee 13 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 4 + local.get 13 + local.get 12 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 1 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 14 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 15 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 16 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 17 + local.get 0 + local.get 8 + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 18 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 18 + i32.sub + local.tee 19 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 18 + i32.store offset=428 + local.get 0 + local.get 14 + local.get 10 + i32.add + local.get 17 + local.get 15 + local.get 16 + i32.add + i32.add + local.get 19 + i32.add + i32.sub + local.tee 14 + i32.store offset=420 + local.get 18 + local.get 14 + i32.const 1 + i32.or + i32.store offset=4 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 15 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 16 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 17 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 19 + local.get 18 + local.get 14 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 18 + local.get 0 + i32.const 2097152 + i32.store offset=440 + local.get 18 + local.get 19 + local.get 17 + local.get 16 + local.get 15 + i32.sub + i32.add + i32.add + i32.store offset=4 + local.get 13 + local.get 12 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 9 + i64.load align=4 + local.set 20 + local.get 4 + i32.const 8 + i32.add + local.get 9 + i32.const 8 + i32.add + i64.load align=4 + i64.store align=4 + local.get 4 + local.get 20 + i64.store align=4 + local.get 0 + i32.const 140 + i32.add + local.get 11 + i32.store + local.get 0 + i32.const 132 + i32.add + local.get 10 + i32.store + local.get 0 + local.get 8 + i32.store offset=128 + local.get 0 + i32.const 136 + i32.add + local.get 4 + i32.store + loop ;; label = @12 + local.get 1 + i32.const 4 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 4 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk14fencepost_head17he07aaa52f3b50dfdE + i32.store offset=4 + local.get 4 + local.set 1 + local.get 4 + i32.const 4 + i32.add + local.get 6 + i32.lt_u + br_if 0 (;@12;) + end + local.get 13 + local.get 5 + i32.eq + br_if 9 (;@2;) + local.get 13 + local.get 5 + i32.sub + local.set 1 + local.get 5 + local.get 1 + local.get 5 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + call $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E + block ;; label = @12 + local.get 1 + i32.const 256 + i32.lt_u + br_if 0 (;@12;) + local.get 0 + local.get 5 + local.get 1 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E + br 10 (;@2;) + end + local.get 0 + local.get 1 + i32.const -8 + i32.and + i32.add + i32.const 144 + i32.add + local.set 4 + block ;; label = @12 + block ;; label = @13 + local.get 0 + i32.load offset=408 + local.tee 6 + i32.const 1 + local.get 1 + i32.const 3 + i32.shr_u + i32.shl + local.tee 1 + i32.and + i32.eqz + br_if 0 (;@13;) + local.get 4 + i32.load offset=8 + local.set 1 + br 1 (;@12;) + end + local.get 0 + local.get 6 + local.get 1 + i32.or + i32.store offset=408 + local.get 4 + local.set 1 + end + local.get 4 + local.get 5 + i32.store offset=8 + local.get 1 + local.get 5 + i32.store offset=12 + local.get 5 + local.get 4 + i32.store offset=12 + local.get 5 + local.get 1 + i32.store offset=8 + br 9 (;@2;) + end + local.get 1 + i32.load + local.set 5 + local.get 1 + local.get 8 + i32.store + local.get 1 + local.get 1 + i32.load offset=4 + local.get 10 + i32.add + i32.store offset=4 + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 1 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 4 + local.get 5 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 6 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 10 + local.get 8 + local.get 4 + local.get 1 + i32.sub + i32.add + local.tee 4 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 7 + local.get 4 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 5 + local.get 10 + local.get 6 + i32.sub + i32.add + local.tee 1 + local.get 3 + local.get 4 + i32.add + i32.sub + local.set 3 + block ;; label = @11 + local.get 1 + local.get 0 + i32.load offset=428 + i32.eq + br_if 0 (;@11;) + local.get 1 + local.get 0 + i32.load offset=424 + i32.eq + br_if 5 (;@6;) + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk5inuse17h2d327e4c36b84dfeE + br_if 7 (;@4;) + block ;; label = @12 + block ;; label = @13 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 5 + i32.const 256 + i32.lt_u + br_if 0 (;@13;) + local.get 0 + local.get 1 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 1 (;@12;) + end + block ;; label = @13 + local.get 1 + i32.load offset=12 + local.tee 6 + local.get 1 + i32.load offset=8 + local.tee 8 + i32.eq + br_if 0 (;@13;) + local.get 8 + local.get 6 + i32.store offset=12 + local.get 6 + local.get 8 + i32.store offset=8 + br 1 (;@12;) + end + local.get 0 + local.get 0 + i32.load offset=408 + i32.const -2 + local.get 5 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=408 + end + local.get 5 + local.get 3 + i32.add + local.set 3 + local.get 1 + local.get 5 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 1 + br 7 (;@4;) + end + local.get 0 + local.get 7 + i32.store offset=428 + local.get 0 + local.get 0 + i32.load offset=420 + local.get 3 + i32.add + local.tee 1 + i32.store offset=420 + local.get 7 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 9 (;@1;) + end + local.get 0 + local.get 1 + local.get 3 + i32.sub + local.tee 4 + i32.store offset=420 + local.get 0 + local.get 0 + i32.load offset=428 + local.tee 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 7 + i32.store offset=428 + local.get 7 + local.get 4 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 8 (;@1;) + end + local.get 0 + i32.load offset=424 + local.set 1 + local.get 4 + local.get 3 + i32.sub + local.tee 4 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.lt_u + br_if 3 (;@5;) + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 7 + local.get 0 + local.get 4 + i32.store offset=416 + local.get 0 + local.get 7 + i32.store offset=424 + local.get 7 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 7 (;@1;) + end + local.get 0 + local.get 8 + i32.store offset=444 + br 4 (;@3;) + end + local.get 1 + local.get 1 + i32.load offset=4 + local.get 10 + i32.add + i32.store offset=4 + local.get 0 + local.get 0 + i32.load offset=428 + local.get 0 + i32.load offset=420 + local.get 10 + i32.add + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8init_top17he4cefe3b36a3bd87E + br 4 (;@2;) + end + local.get 0 + local.get 7 + i32.store offset=424 + local.get 0 + local.get 0 + i32.load offset=416 + local.get 3 + i32.add + local.tee 1 + i32.store offset=416 + local.get 7 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 4 (;@1;) + end + local.get 0 + i32.const 0 + i32.store offset=424 + local.get 0 + i32.load offset=416 + local.set 3 + local.get 0 + i32.const 0 + i32.store offset=416 + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 3 (;@1;) + end + local.get 7 + local.get 3 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E + block ;; label = @4 + local.get 3 + i32.const 256 + i32.lt_u + br_if 0 (;@4;) + local.get 0 + local.get 7 + local.get 3 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18insert_large_chunk17h8e77460818b80af0E + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 3 (;@1;) + end + local.get 0 + local.get 3 + i32.const -8 + i32.and + i32.add + i32.const 144 + i32.add + local.set 1 + block ;; label = @4 + block ;; label = @5 + local.get 0 + i32.load offset=408 + local.tee 5 + i32.const 1 + local.get 3 + i32.const 3 + i32.shr_u + i32.shl + local.tee 3 + i32.and + i32.eqz + br_if 0 (;@5;) + local.get 1 + i32.load offset=8 + local.set 3 + br 1 (;@4;) + end + local.get 0 + local.get 5 + local.get 3 + i32.or + i32.store offset=408 + local.get 1 + local.set 3 + end + local.get 1 + local.get 7 + i32.store offset=8 + local.get 3 + local.get 7 + i32.store offset=12 + local.get 7 + local.get 1 + i32.store offset=12 + local.get 7 + local.get 3 + i32.store offset=8 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + br 2 (;@1;) + end + local.get 0 + i32.const 4095 + i32.store offset=448 + local.get 0 + local.get 8 + i32.store offset=128 + local.get 0 + i32.const 140 + i32.add + local.get 11 + i32.store + local.get 0 + i32.const 132 + i32.add + local.get 10 + i32.store + i32.const 0 + local.set 4 + loop ;; label = @3 + local.get 0 + local.get 4 + i32.add + local.tee 1 + i32.const 164 + i32.add + local.get 1 + i32.const 152 + i32.add + local.tee 5 + i32.store + local.get 5 + local.get 1 + i32.const 144 + i32.add + local.tee 6 + i32.store + local.get 1 + i32.const 156 + i32.add + local.get 6 + i32.store + local.get 1 + i32.const 172 + i32.add + local.get 1 + i32.const 160 + i32.add + local.tee 6 + i32.store + local.get 6 + local.get 5 + i32.store + local.get 1 + i32.const 180 + i32.add + local.get 1 + i32.const 168 + i32.add + local.tee 5 + i32.store + local.get 5 + local.get 6 + i32.store + local.get 1 + i32.const 176 + i32.add + local.get 5 + i32.store + local.get 4 + i32.const 32 + i32.add + local.tee 4 + i32.const 256 + i32.ne + br_if 0 (;@3;) + end + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 4 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 5 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 9 + local.get 0 + local.get 8 + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 1 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 1 + i32.sub + local.tee 13 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 1 + i32.store offset=428 + local.get 0 + local.get 4 + local.get 10 + i32.add + local.get 9 + local.get 5 + local.get 6 + i32.add + i32.add + local.get 13 + i32.add + i32.sub + local.tee 4 + i32.store offset=420 + local.get 1 + local.get 4 + i32.const 1 + i32.or + i32.store offset=4 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 5 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 8 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 10 + local.get 1 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 1 + local.get 0 + i32.const 2097152 + i32.store offset=440 + local.get 1 + local.get 10 + local.get 8 + local.get 6 + local.get 5 + i32.sub + i32.add + i32.add + i32.store offset=4 + end + local.get 0 + i32.load offset=420 + local.tee 1 + local.get 3 + i32.le_u + br_if 0 (;@1;) + local.get 0 + local.get 1 + local.get 3 + i32.sub + local.tee 4 + i32.store offset=420 + local.get 0 + local.get 0 + i32.load offset=428 + local.tee 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 7 + i32.store offset=428 + local.get 7 + local.get 4 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + end + local.get 2 + i32.const 16 + i32.add + global.set $__stack_pointer + local.get 7 + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8init_top17he4cefe3b36a3bd87E (;6;) (type 0) (param i32 i32 i32) + (local i32 i32 i32 i32) + local.get 1 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.tee 3 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 3 + i32.sub + local.tee 3 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 1 + local.get 0 + local.get 2 + local.get 3 + i32.sub + local.tee 2 + i32.store offset=420 + local.get 0 + local.get 1 + i32.store offset=428 + local.get 1 + local.get 2 + i32.const 1 + i32.or + i32.store offset=4 + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 3 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 4 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 5 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 1 + local.get 0 + i32.const 2097152 + i32.store offset=440 + local.get 1 + local.get 6 + local.get 5 + local.get 4 + local.get 3 + i32.sub + i32.add + i32.add + i32.store offset=4 + ) + (func $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8memalign17he8794c5d1cb954f9E (;7;) (type 4) (param i32 i32 i32) (result i32) + (local i32 i32 i32 i32 i32 i32) + block ;; label = @1 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 1 + i32.le_u + br_if 0 (;@1;) + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 1 + end + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 3 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 4 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 5 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 0 + local.set 7 + block ;; label = @1 + i32.const 0 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const 2 + i32.shl + i32.sub + local.tee 8 + local.get 3 + local.get 6 + local.get 4 + local.get 5 + i32.add + i32.add + i32.sub + i32.const -65544 + i32.add + i32.const -9 + i32.and + i32.const -3 + i32.add + local.tee 3 + local.get 8 + local.get 3 + i32.lt_u + select + local.get 1 + i32.sub + local.get 2 + i32.le_u + br_if 0 (;@1;) + local.get 0 + local.get 1 + i32.const 16 + local.get 2 + i32.const 4 + i32.add + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const -5 + i32.add + local.get 2 + i32.gt_u + select + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.tee 4 + i32.add + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.add + i32.const -4 + i32.add + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE + local.tee 3 + i32.eqz + br_if 0 (;@1;) + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E + local.set 2 + block ;; label = @2 + block ;; label = @3 + local.get 1 + i32.const -1 + i32.add + local.tee 7 + local.get 3 + i32.and + br_if 0 (;@3;) + local.get 2 + local.set 1 + br 1 (;@2;) + end + local.get 7 + local.get 3 + i32.add + i32.const 0 + local.get 1 + i32.sub + i32.and + call $_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E + local.set 7 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 3 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.get 7 + i32.const 0 + local.get 1 + local.get 7 + local.get 2 + i32.sub + local.get 3 + i32.gt_u + select + i32.add + local.tee 1 + local.get 2 + i32.sub + local.tee 7 + i32.sub + local.set 3 + block ;; label = @3 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + br_if 0 (;@3;) + local.get 1 + local.get 3 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 2 + local.get 7 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 0 + local.get 2 + local.get 7 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE + br 1 (;@2;) + end + local.get 2 + i32.load + local.set 2 + local.get 1 + local.get 3 + i32.store offset=4 + local.get 1 + local.get 2 + local.get 7 + i32.add + i32.store + end + block ;; label = @2 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + br_if 0 (;@2;) + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 2 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.get 4 + i32.add + i32.le_u + br_if 0 (;@2;) + local.get 1 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 7 + local.get 1 + local.get 4 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 7 + local.get 2 + local.get 4 + i32.sub + local.tee 2 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 0 + local.get 7 + local.get 2 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 7 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + drop + end + local.get 7 + ) + (func $__main (;8;) (type 5) (result i32) + call $vec_alloc + ) + (func $__rust_alloc (;9;) (type 3) (param i32 i32) (result i32) + (local i32 i32) + global.get $__stack_pointer + i32.const 16 + i32.sub + local.tee 2 + global.set $__stack_pointer + call $_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E + local.get 2 + i32.const 15 + i32.add + call $_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E + local.set 3 + block ;; label = @1 + block ;; label = @2 + local.get 1 + i32.const 9 + i32.lt_u + br_if 0 (;@2;) + local.get 3 + local.get 1 + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8memalign17he8794c5d1cb954f9E + local.set 1 + br 1 (;@1;) + end + local.get 3 + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE + local.set 1 + end + local.get 2 + i32.const 15 + i32.add + call $_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E + local.get 2 + i32.const 16 + i32.add + global.set $__stack_pointer + local.get 1 + ) + (func $__rust_dealloc (;10;) (type 0) (param i32 i32 i32) + (local i32) + global.get $__stack_pointer + i32.const 16 + i32.sub + local.tee 3 + global.set $__stack_pointer + call $_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E + local.get 3 + i32.const 15 + i32.add + call $_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE + local.get 3 + i32.const 15 + i32.add + call $_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E + local.get 3 + i32.const 16 + i32.add + global.set $__stack_pointer + ) + (func $__rust_realloc (;11;) (type 6) (param i32 i32 i32 i32) (result i32) + (local i32 i32 i32 i32 i32 i32 i32 i32) + global.get $__stack_pointer + i32.const 16 + i32.sub + local.tee 4 + global.set $__stack_pointer + call $_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E + local.get 4 + i32.const 15 + i32.add + call $_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E + local.set 5 + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + block ;; label = @4 + block ;; label = @5 + block ;; label = @6 + local.get 2 + i32.const 9 + i32.lt_u + br_if 0 (;@6;) + local.get 5 + local.get 2 + local.get 3 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$8memalign17he8794c5d1cb954f9E + local.tee 2 + br_if 1 (;@5;) + i32.const 0 + local.set 2 + br 5 (;@1;) + end + call $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E + local.tee 1 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + i32.const 20 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 7 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 8 + i32.const 0 + local.set 2 + i32.const 0 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const 2 + i32.shl + i32.sub + local.tee 9 + local.get 1 + local.get 8 + local.get 6 + local.get 7 + i32.add + i32.add + i32.sub + i32.const -65544 + i32.add + i32.const -9 + i32.and + i32.const -3 + i32.add + local.tee 1 + local.get 9 + local.get 1 + i32.lt_u + select + local.get 3 + i32.le_u + br_if 4 (;@1;) + i32.const 16 + local.get 3 + i32.const 4 + i32.add + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.const -5 + i32.add + local.get 3 + i32.gt_u + select + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.set 6 + local.get 0 + call $_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E + local.set 1 + local.get 1 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 7 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 8 + block ;; label = @6 + block ;; label = @7 + block ;; label = @8 + block ;; label = @9 + block ;; label = @10 + block ;; label = @11 + block ;; label = @12 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + br_if 0 (;@12;) + local.get 7 + local.get 6 + i32.ge_u + br_if 4 (;@8;) + local.get 8 + local.get 5 + i32.load offset=428 + i32.eq + br_if 6 (;@6;) + local.get 8 + local.get 5 + i32.load offset=424 + i32.eq + br_if 3 (;@9;) + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E + br_if 9 (;@3;) + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.tee 9 + local.get 7 + i32.add + local.tee 7 + local.get 6 + i32.lt_u + br_if 9 (;@3;) + local.get 7 + local.get 6 + i32.sub + local.set 10 + local.get 9 + i32.const 256 + i32.lt_u + br_if 1 (;@11;) + local.get 5 + local.get 8 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$18unlink_large_chunk17h2e279402ce6356d4E + br 2 (;@10;) + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + local.set 7 + local.get 6 + i32.const 256 + i32.lt_u + br_if 8 (;@3;) + block ;; label = @12 + local.get 7 + local.get 6 + i32.const 4 + i32.add + i32.lt_u + br_if 0 (;@12;) + local.get 7 + local.get 6 + i32.sub + i32.const 131073 + i32.lt_u + br_if 5 (;@7;) + end + local.get 5 + local.get 1 + local.get 1 + i32.load + local.tee 8 + i32.sub + local.get 7 + local.get 8 + i32.add + i32.const 16 + i32.add + local.tee 9 + local.get 6 + i32.const 31 + i32.add + local.get 5 + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9page_size17h0fdd55b2693d440cE + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + local.tee 7 + i32.const 1 + call $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5remap17hf5ff3c6a92680f40E + local.tee 6 + i32.eqz + br_if 8 (;@3;) + local.get 6 + local.get 8 + i32.add + local.tee 1 + local.get 7 + local.get 8 + i32.sub + local.tee 3 + i32.const -16 + i32.add + local.tee 2 + i32.store offset=4 + call $_ZN8dlmalloc8dlmalloc5Chunk14fencepost_head17he07aaa52f3b50dfdE + local.set 0 + local.get 1 + local.get 2 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.get 0 + i32.store offset=4 + local.get 1 + local.get 3 + i32.const -12 + i32.add + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + i32.const 0 + i32.store offset=4 + local.get 5 + local.get 5 + i32.load offset=432 + local.get 7 + local.get 9 + i32.sub + i32.add + local.tee 3 + i32.store offset=432 + local.get 5 + local.get 5 + i32.load offset=444 + local.tee 2 + local.get 6 + local.get 6 + local.get 2 + i32.gt_u + select + i32.store offset=444 + local.get 5 + local.get 5 + i32.load offset=436 + local.tee 2 + local.get 3 + local.get 2 + local.get 3 + i32.gt_u + select + i32.store offset=436 + br 9 (;@2;) + end + block ;; label = @11 + local.get 8 + i32.load offset=12 + local.tee 11 + local.get 8 + i32.load offset=8 + local.tee 8 + i32.eq + br_if 0 (;@11;) + local.get 8 + local.get 11 + i32.store offset=12 + local.get 11 + local.get 8 + i32.store offset=8 + br 1 (;@10;) + end + local.get 5 + local.get 5 + i32.load offset=408 + i32.const -2 + local.get 9 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=408 + end + block ;; label = @10 + local.get 10 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.lt_u + br_if 0 (;@10;) + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 7 + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 7 + local.get 10 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 5 + local.get 7 + local.get 10 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE + local.get 1 + br_if 8 (;@2;) + br 7 (;@3;) + end + local.get 1 + local.get 7 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 1 + br_if 7 (;@2;) + br 6 (;@3;) + end + local.get 5 + i32.load offset=416 + local.get 7 + i32.add + local.tee 7 + local.get 6 + i32.lt_u + br_if 5 (;@3;) + block ;; label = @9 + block ;; label = @10 + local.get 7 + local.get 6 + i32.sub + local.tee 8 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.ge_u + br_if 0 (;@10;) + local.get 1 + local.get 7 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + i32.const 0 + local.set 8 + i32.const 0 + local.set 7 + br 1 (;@9;) + end + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.tee 7 + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 9 + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 7 + local.get 8 + call $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E + local.get 9 + call $_ZN8dlmalloc8dlmalloc5Chunk12clear_pinuse17h3c1a99d0f5bddc22E + end + local.get 5 + local.get 7 + i32.store offset=424 + local.get 5 + local.get 8 + i32.store offset=416 + local.get 1 + br_if 6 (;@2;) + br 5 (;@3;) + end + local.get 7 + local.get 6 + i32.sub + local.tee 7 + i32.const 16 + i32.const 8 + call $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E + i32.lt_u + br_if 0 (;@7;) + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 8 + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 8 + local.get 7 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 5 + local.get 8 + local.get 7 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$13dispose_chunk17h577eb103dc04307bE + end + local.get 1 + br_if 4 (;@2;) + br 3 (;@3;) + end + local.get 5 + i32.load offset=420 + local.get 7 + i32.add + local.tee 7 + local.get 6 + i32.gt_u + br_if 1 (;@4;) + br 2 (;@3;) + end + local.get 2 + local.get 0 + local.get 1 + local.get 3 + local.get 1 + local.get 3 + i32.lt_u + select + call $memcpy + drop + local.get 5 + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE + br 3 (;@1;) + end + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E + local.set 8 + local.get 1 + local.get 6 + call $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E + local.get 5 + local.get 8 + i32.store offset=428 + local.get 8 + local.get 7 + local.get 6 + i32.sub + local.tee 6 + i32.const 1 + i32.or + i32.store offset=4 + local.get 5 + local.get 6 + i32.store offset=420 + local.get 1 + br_if 1 (;@2;) + end + local.get 5 + local.get 3 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$6malloc17h1ae2390053e3628cE + local.tee 6 + i32.eqz + br_if 1 (;@1;) + local.get 6 + local.get 0 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E + i32.const -8 + i32.const -4 + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + select + i32.add + local.tee 2 + local.get 3 + local.get 2 + local.get 3 + i32.lt_u + select + call $memcpy + local.set 3 + local.get 5 + local.get 0 + call $_ZN8dlmalloc8dlmalloc17Dlmalloc$LT$A$GT$4free17h501de2a6604ba1ffE + local.get 3 + local.set 2 + br 1 (;@1;) + end + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E + drop + local.get 1 + call $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE + local.set 2 + end + local.get 4 + i32.const 15 + i32.add + call $_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E + local.get 4 + i32.const 16 + i32.add + global.set $__stack_pointer + local.get 2 + ) + (func $__rust_alloc_error_handler (;12;) (type 1) (param i32 i32) + local.get 0 + local.get 1 + call $__rdl_oom + return + ) + (func $_ZN5alloc7raw_vec11finish_grow17hcefa6a06206fd52bE (;13;) (type 7) (param i32 i32 i32 i32) + (local i32) + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + local.get 1 + i32.eqz + br_if 0 (;@3;) + local.get 2 + i32.const -1 + i32.le_s + br_if 1 (;@2;) + block ;; label = @4 + block ;; label = @5 + block ;; label = @6 + local.get 3 + i32.load offset=4 + i32.eqz + br_if 0 (;@6;) + block ;; label = @7 + local.get 3 + i32.const 8 + i32.add + i32.load + local.tee 4 + br_if 0 (;@7;) + block ;; label = @8 + local.get 2 + br_if 0 (;@8;) + local.get 1 + local.set 3 + br 4 (;@4;) + end + i32.const 0 + i32.load8_u offset=1048576 + drop + br 2 (;@5;) + end + local.get 3 + i32.load + local.get 4 + local.get 1 + local.get 2 + call $__rust_realloc + local.set 3 + br 2 (;@4;) + end + block ;; label = @6 + local.get 2 + br_if 0 (;@6;) + local.get 1 + local.set 3 + br 2 (;@4;) + end + i32.const 0 + i32.load8_u offset=1048576 + drop + end + local.get 2 + local.get 1 + call $__rust_alloc + local.set 3 + end + block ;; label = @4 + local.get 3 + i32.eqz + br_if 0 (;@4;) + local.get 0 + local.get 3 + i32.store offset=4 + local.get 0 + i32.const 8 + i32.add + local.get 2 + i32.store + local.get 0 + i32.const 0 + i32.store + return + end + local.get 0 + local.get 1 + i32.store offset=4 + local.get 0 + i32.const 8 + i32.add + local.get 2 + i32.store + br 2 (;@1;) + end + local.get 0 + i32.const 0 + i32.store offset=4 + local.get 0 + i32.const 8 + i32.add + local.get 2 + i32.store + br 1 (;@1;) + end + local.get 0 + i32.const 0 + i32.store offset=4 + end + local.get 0 + i32.const 1 + i32.store + ) + (func $_ZN5alloc7raw_vec19RawVec$LT$T$C$A$GT$16reserve_for_push17h2205b68aee7ddaceE (;14;) (type 8) (param i32) + (local i32 i32 i32 i32 i32) + global.get $__stack_pointer + i32.const 32 + i32.sub + local.tee 1 + global.set $__stack_pointer + local.get 0 + i32.load offset=4 + local.tee 2 + i32.const 1 + i32.shl + local.tee 3 + i32.const 4 + local.get 3 + i32.const 4 + i32.gt_u + select + local.tee 3 + i32.const 2 + i32.shl + local.set 4 + local.get 3 + i32.const 536870912 + i32.lt_u + i32.const 2 + i32.shl + local.set 5 + block ;; label = @1 + block ;; label = @2 + local.get 2 + i32.eqz + br_if 0 (;@2;) + local.get 1 + i32.const 4 + i32.store offset=24 + local.get 1 + local.get 2 + i32.const 2 + i32.shl + i32.store offset=28 + local.get 1 + local.get 0 + i32.load + i32.store offset=20 + br 1 (;@1;) + end + local.get 1 + i32.const 0 + i32.store offset=24 + end + local.get 1 + i32.const 8 + i32.add + local.get 5 + local.get 4 + local.get 1 + i32.const 20 + i32.add + call $_ZN5alloc7raw_vec11finish_grow17hcefa6a06206fd52bE + local.get 1 + i32.load offset=12 + local.set 2 + block ;; label = @1 + block ;; label = @2 + local.get 1 + i32.load offset=8 + br_if 0 (;@2;) + local.get 0 + local.get 3 + i32.store offset=4 + local.get 0 + local.get 2 + i32.store + br 1 (;@1;) + end + local.get 2 + i32.const -2147483647 + i32.eq + br_if 0 (;@1;) + block ;; label = @2 + local.get 2 + i32.eqz + br_if 0 (;@2;) + local.get 2 + local.get 1 + i32.const 16 + i32.add + i32.load + call $_ZN5alloc5alloc18handle_alloc_error17h4f3cb0c5afb21c76E + unreachable + end + call $_ZN5alloc7raw_vec17capacity_overflow17h6c250c8ca346b5adE + unreachable + end + local.get 1 + i32.const 32 + i32.add + global.set $__stack_pointer + ) + (func $vec_alloc (;15;) (type 5) (result i32) + (local i32 i32 i32) + global.get $__stack_pointer + i32.const 16 + i32.sub + local.tee 0 + global.set $__stack_pointer + local.get 0 + i32.const 0 + i32.store offset=12 + local.get 0 + i64.const 4 + i64.store offset=4 align=4 + local.get 0 + i32.const 4 + i32.add + call $_ZN5alloc7raw_vec19RawVec$LT$T$C$A$GT$16reserve_for_push17h2205b68aee7ddaceE + local.get 0 + i32.load offset=4 + local.tee 1 + local.get 0 + i32.load offset=12 + local.tee 2 + i32.const 2 + i32.shl + i32.add + i32.const 1 + i32.store + block ;; label = @1 + local.get 2 + i32.const -1 + i32.eq + br_if 0 (;@1;) + block ;; label = @2 + local.get 0 + i32.load offset=8 + local.tee 2 + i32.eqz + br_if 0 (;@2;) + local.get 1 + local.get 2 + i32.const 2 + i32.shl + i32.const 4 + call $__rust_dealloc + end + local.get 0 + i32.const 16 + i32.add + global.set $__stack_pointer + i32.const 1 + return + end + unreachable + unreachable + ) + (func $_ZN5alloc7raw_vec17capacity_overflow17h6c250c8ca346b5adE (;16;) (type 9) + unreachable + unreachable + ) + (func $_ZN5alloc5alloc18handle_alloc_error17h4f3cb0c5afb21c76E (;17;) (type 1) (param i32 i32) + local.get 0 + local.get 1 + call $_ZN5alloc5alloc18handle_alloc_error8rt_error17h63de615f6e977af2E + unreachable + ) + (func $_ZN5alloc5alloc18handle_alloc_error8rt_error17h63de615f6e977af2E (;18;) (type 1) (param i32 i32) + local.get 1 + local.get 0 + call $__rust_alloc_error_handler + unreachable + ) + (func $__rdl_oom (;19;) (type 1) (param i32 i32) + unreachable + unreachable + ) + (func $_ZN8dlmalloc8dlmalloc8align_up17hacb462cafc347c13E (;20;) (type 3) (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.add + i32.const -1 + i32.add + i32.const 0 + local.get 1 + i32.sub + i32.and + ) + (func $_ZN8dlmalloc8dlmalloc9left_bits17hb6cbe146b8019d98E (;21;) (type 2) (param i32) (result i32) + local.get 0 + i32.const 1 + i32.shl + local.tee 0 + i32.const 0 + local.get 0 + i32.sub + i32.or + ) + (func $_ZN8dlmalloc8dlmalloc9least_bit17h4bca52ead665dc5aE (;22;) (type 2) (param i32) (result i32) + i32.const 0 + local.get 0 + i32.sub + local.get 0 + i32.and + ) + (func $_ZN8dlmalloc8dlmalloc24leftshift_for_tree_index17h31d064fdd867f502E (;23;) (type 2) (param i32) (result i32) + i32.const 0 + i32.const 25 + local.get 0 + i32.const 1 + i32.shr_u + i32.sub + local.get 0 + i32.const 31 + i32.eq + select + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk14fencepost_head17he07aaa52f3b50dfdE (;24;) (type 5) (result i32) + i32.const 7 + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk4size17h77d1c406ab42db33E (;25;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=4 + i32.const -8 + i32.and + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk6cinuse17h58499de57c2d37e2E (;26;) (type 2) (param i32) (result i32) + local.get 0 + i32.load8_u offset=4 + i32.const 2 + i32.and + i32.const 1 + i32.shr_u + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk6pinuse17h92d5107047b03ba7E (;27;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=4 + i32.const 1 + i32.and + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk12clear_pinuse17h3c1a99d0f5bddc22E (;28;) (type 8) (param i32) + local.get 0 + local.get 0 + i32.load offset=4 + i32.const -2 + i32.and + i32.store offset=4 + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk5inuse17h2d327e4c36b84dfeE (;29;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=4 + i32.const 3 + i32.and + i32.const 1 + i32.ne + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk7mmapped17h1a9959fbf47496c3E (;30;) (type 2) (param i32) (result i32) + local.get 0 + i32.load8_u offset=4 + i32.const 3 + i32.and + i32.eqz + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk9set_inuse17h4282057414c4e601E (;31;) (type 1) (param i32 i32) + local.get 0 + local.get 0 + i32.load offset=4 + i32.const 1 + i32.and + local.get 1 + i32.or + i32.const 2 + i32.or + i32.store offset=4 + local.get 0 + local.get 1 + i32.add + local.tee 0 + local.get 0 + i32.load offset=4 + i32.const 1 + i32.or + i32.store offset=4 + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk20set_inuse_and_pinuse17h515e5a69a6d1edc6E (;32;) (type 1) (param i32 i32) + local.get 0 + local.get 1 + i32.const 3 + i32.or + i32.store offset=4 + local.get 0 + local.get 1 + i32.add + local.tee 1 + local.get 1 + i32.load offset=4 + i32.const 1 + i32.or + i32.store offset=4 + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk34set_size_and_pinuse_of_inuse_chunk17h4acf6d59020bd397E (;33;) (type 1) (param i32 i32) + local.get 0 + local.get 1 + i32.const 3 + i32.or + i32.store offset=4 + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk33set_size_and_pinuse_of_free_chunk17ha971516d0be71949E (;34;) (type 1) (param i32 i32) + local.get 0 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 0 + local.get 1 + i32.add + local.get 1 + i32.store + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk20set_free_with_pinuse17h5d876ea751634e99E (;35;) (type 0) (param i32 i32 i32) + local.get 2 + local.get 2 + i32.load offset=4 + i32.const -2 + i32.and + i32.store offset=4 + local.get 0 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 0 + local.get 1 + i32.add + local.get 1 + i32.store + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk11plus_offset17h6e6d06559ad34b15E (;36;) (type 3) (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.add + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk12minus_offset17h7c3eec81761249d9E (;37;) (type 3) (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.sub + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk6to_mem17h75497733644e1d6cE (;38;) (type 2) (param i32) (result i32) + local.get 0 + i32.const 8 + i32.add + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk10mem_offset17h86551c33e07de253E (;39;) (type 5) (result i32) + i32.const 8 + ) + (func $_ZN8dlmalloc8dlmalloc5Chunk8from_mem17h11dd30c74f483706E (;40;) (type 2) (param i32) (result i32) + local.get 0 + i32.const -8 + i32.add + ) + (func $_ZN8dlmalloc8dlmalloc9TreeChunk14leftmost_child17h20605933c801b44bE (;41;) (type 2) (param i32) (result i32) + (local i32) + block ;; label = @1 + local.get 0 + i32.load offset=16 + local.tee 1 + br_if 0 (;@1;) + local.get 0 + i32.const 20 + i32.add + i32.load + local.set 1 + end + local.get 1 + ) + (func $_ZN8dlmalloc8dlmalloc9TreeChunk5chunk17h4efd58110bb4b6e5E (;42;) (type 2) (param i32) (result i32) + local.get 0 + ) + (func $_ZN8dlmalloc8dlmalloc9TreeChunk4next17he250edbec5d87123E (;43;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=12 + ) + (func $_ZN8dlmalloc8dlmalloc9TreeChunk4prev17h7a0f1d46544cc14aE (;44;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=8 + ) + (func $_ZN8dlmalloc8dlmalloc7Segment9is_extern17h6f6db2c70b891fd9E (;45;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=12 + i32.const 1 + i32.and + ) + (func $_ZN8dlmalloc8dlmalloc7Segment9sys_flags17h224550055bf7775bE (;46;) (type 2) (param i32) (result i32) + local.get 0 + i32.load offset=12 + i32.const 1 + i32.shr_u + ) + (func $_ZN8dlmalloc8dlmalloc7Segment5holds17h8f6de4ee6718009bE (;47;) (type 3) (param i32 i32) (result i32) + (local i32 i32) + i32.const 0 + local.set 2 + block ;; label = @1 + local.get 0 + i32.load + local.tee 3 + local.get 1 + i32.gt_u + br_if 0 (;@1;) + local.get 3 + local.get 0 + i32.load offset=4 + i32.add + local.get 1 + i32.gt_u + local.set 2 + end + local.get 2 + ) + (func $_ZN8dlmalloc8dlmalloc7Segment3top17he7e9e2493151d036E (;48;) (type 2) (param i32) (result i32) + local.get 0 + i32.load + local.get 0 + i32.load offset=4 + i32.add + ) + (func $_ZN73_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..deref..DerefMut$GT$9deref_mut17h16955ef502c5c4e5E (;49;) (type 2) (param i32) (result i32) + i32.const 1048580 + ) + (func $_ZN68_$LT$dlmalloc..global..Instance$u20$as$u20$core..ops..drop..Drop$GT$4drop17he19d8d9c8ea92454E (;50;) (type 8) (param i32)) + (func $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5alloc17hdbf1e2bcc01bc909E (;51;) (type 0) (param i32 i32 i32) + (local i32) + local.get 2 + i32.const 16 + i32.shr_u + memory.grow + local.set 3 + local.get 0 + i32.const 0 + i32.store offset=8 + local.get 0 + i32.const 0 + local.get 2 + i32.const -65536 + i32.and + local.get 3 + i32.const -1 + i32.eq + local.tee 2 + select + i32.store offset=4 + local.get 0 + i32.const 0 + local.get 3 + i32.const 16 + i32.shl + local.get 2 + select + i32.store + ) + (func $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$5remap17hf5ff3c6a92680f40E (;52;) (type 10) (param i32 i32 i32 i32 i32) (result i32) + i32.const 0 + ) + (func $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9free_part17h74489c9e7a3aa967E (;53;) (type 6) (param i32 i32 i32 i32) (result i32) + i32.const 0 + ) + (func $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$4free17h993c5f05ba1214bcE (;54;) (type 4) (param i32 i32 i32) (result i32) + i32.const 0 + ) + (func $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$16can_release_part17h43bfb7d8666fcc31E (;55;) (type 3) (param i32 i32) (result i32) + i32.const 0 + ) + (func $_ZN61_$LT$dlmalloc..sys..System$u20$as$u20$dlmalloc..Allocator$GT$9page_size17h0fdd55b2693d440cE (;56;) (type 2) (param i32) (result i32) + i32.const 65536 + ) + (func $_ZN8dlmalloc3sys23enable_alloc_after_fork17h64eb1fc0ff7b2689E (;57;) (type 9)) + (func $memcpy (;58;) (type 4) (param i32 i32 i32) (result i32) + local.get 0 + local.get 1 + local.get 2 + call $_ZN17compiler_builtins3mem6memcpy17h7b83c85e899060b3E + ) + (func $_ZN17compiler_builtins3mem6memcpy17h7b83c85e899060b3E (;59;) (type 4) (param i32 i32 i32) (result i32) + (local i32 i32 i32 i32 i32 i32 i32 i32) + block ;; label = @1 + block ;; label = @2 + local.get 2 + i32.const 15 + i32.gt_u + br_if 0 (;@2;) + local.get 0 + local.set 3 + br 1 (;@1;) + end + local.get 0 + i32.const 0 + local.get 0 + i32.sub + i32.const 3 + i32.and + local.tee 4 + i32.add + local.set 5 + block ;; label = @2 + local.get 4 + i32.eqz + br_if 0 (;@2;) + local.get 0 + local.set 3 + local.get 1 + local.set 6 + loop ;; label = @3 + local.get 3 + local.get 6 + i32.load8_u + i32.store8 + local.get 6 + i32.const 1 + i32.add + local.set 6 + local.get 3 + i32.const 1 + i32.add + local.tee 3 + local.get 5 + i32.lt_u + br_if 0 (;@3;) + end + end + local.get 5 + local.get 2 + local.get 4 + i32.sub + local.tee 7 + i32.const -4 + i32.and + local.tee 8 + i32.add + local.set 3 + block ;; label = @2 + block ;; label = @3 + local.get 1 + local.get 4 + i32.add + local.tee 9 + i32.const 3 + i32.and + i32.eqz + br_if 0 (;@3;) + local.get 8 + i32.const 1 + i32.lt_s + br_if 1 (;@2;) + local.get 9 + i32.const 3 + i32.shl + local.tee 6 + i32.const 24 + i32.and + local.set 2 + local.get 9 + i32.const -4 + i32.and + local.tee 10 + i32.const 4 + i32.add + local.set 1 + i32.const 0 + local.get 6 + i32.sub + i32.const 24 + i32.and + local.set 4 + local.get 10 + i32.load + local.set 6 + loop ;; label = @4 + local.get 5 + local.get 6 + local.get 2 + i32.shr_u + local.get 1 + i32.load + local.tee 6 + local.get 4 + i32.shl + i32.or + i32.store + local.get 1 + i32.const 4 + i32.add + local.set 1 + local.get 5 + i32.const 4 + i32.add + local.tee 5 + local.get 3 + i32.lt_u + br_if 0 (;@4;) + br 2 (;@2;) + end + end + local.get 8 + i32.const 1 + i32.lt_s + br_if 0 (;@2;) + local.get 9 + local.set 1 + loop ;; label = @3 + local.get 5 + local.get 1 + i32.load + i32.store + local.get 1 + i32.const 4 + i32.add + local.set 1 + local.get 5 + i32.const 4 + i32.add + local.tee 5 + local.get 3 + i32.lt_u + br_if 0 (;@3;) + end + end + local.get 7 + i32.const 3 + i32.and + local.set 2 + local.get 9 + local.get 8 + i32.add + local.set 1 + end + block ;; label = @1 + local.get 2 + i32.eqz + br_if 0 (;@1;) + local.get 3 + local.get 2 + i32.add + local.set 5 + loop ;; label = @2 + local.get 3 + local.get 1 + i32.load8_u + i32.store8 + local.get 1 + i32.const 1 + i32.add + local.set 1 + local.get 3 + i32.const 1 + i32.add + local.tee 3 + local.get 5 + i32.lt_u + br_if 0 (;@2;) + end + end + local.get 0 + ) + (table (;0;) 1 1 funcref) + (memory (;0;) 17) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1049032) + (global (;2;) i32 i32.const 1049040) + (export "memory" (memory 0)) + (export "__main" (func $__main)) + (export "vec_alloc" (func $vec_alloc)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) +) \ No newline at end of file diff --git a/frontend-wasm/tests/expected/signed_arith.mir b/frontend-wasm/tests/expected/signed_arith.mir new file mode 100644 index 00000000..0473ba0c --- /dev/null +++ b/frontend-wasm/tests/expected/signed_arith.mir @@ -0,0 +1,316 @@ +module noname + +memory { + segment @0x100000 x 336 = 0x00000002000000010000000000000001000000050000001e0000004800100000776f6c667265766f2068746977207265646e69616d657220656874206574616c75636c6163206f742074706d65747461000000000000006f72657a20666f20726f736976696420612068746977207265646e69616d657220656874206574616c75636c6163206f742074706d65747461000000050000001800000048001000000000000500000012000000480010000000776f6c667265766f206874697720656469766964206f742074706d65747461000000000000006f72657a20796220656469766964206f742074706d657474610000000000000000000000050000000c000000480010000073722e336634633134343734643266663337343732323933303434663363313262313536393738323535636666643934666666383264323264333438626430643363362f706d742f; +} + +global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; +global external gv1 : i32 = 0x00100150 { id = gvar1 }; +global external gv2 : i32 = 0x00100150 { id = gvar2 }; + + +pub fn rust_begin_unwind(i32) { +block0(v0: i32): + br block2 + +block1: + +block2: + br block2 + +block3: +} + +pub fn div_s(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = eq v1, 0 : i1 + v4 = cast v3 : i32 + v5 = neq v4, 0 : i1 + condbr v5, block3, block4 + +block1(v2: i32): + ret v2 + +block2: + v22 = div v0, v1 : i32 + br block1(v22) + +block3: + v17 = const.i32 1048672 : i32 + v18 = const.i32 25 : i32 + v19 = const.i32 1048648 : i32 + call noname::_ZN4core9panicking5panic17h62f53cc4db8dd7b3E(v17, v18, v19) + unreachable + +block4: + v6 = const.i32 -2147483648 : i32 + v7 = neq v0, v6 : i1 + v8 = cast v7 : i32 + v9 = neq v8, 0 : i1 + condbr v9, block2, block5 + +block5: + v10 = const.i32 -1 : i32 + v11 = neq v1, v10 : i1 + v12 = cast v11 : i32 + v13 = neq v12, 0 : i1 + condbr v13, block2, block6 + +block6: + v14 = const.i32 1048704 : i32 + v15 = const.i32 31 : i32 + v16 = const.i32 1048648 : i32 + call noname::_ZN4core9panicking5panic17h62f53cc4db8dd7b3E(v14, v15, v16) + unreachable +} + +pub fn div_u(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = eq v1, 0 : i1 + v4 = cast v3 : i32 + v5 = neq v4, 0 : i1 + condbr v5, block2, block3 + +block1(v2: i32): + +block2: + v10 = const.i32 1048672 : i32 + v11 = const.i32 25 : i32 + v12 = const.i32 1048736 : i32 + call noname::_ZN4core9panicking5panic17h62f53cc4db8dd7b3E(v10, v11, v12) + unreachable + +block3: + v6 = cast v0 : u32 + v7 = cast v1 : u32 + v8 = div v6, v7 : u32 + v9 = cast v8 : i32 + ret v9 +} + +pub fn rem_s(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = eq v1, 0 : i1 + v4 = cast v3 : i32 + v5 = neq v4, 0 : i1 + condbr v5, block3, block4 + +block1(v2: i32): + ret v2 + +block2: + v22 = mod v0, v1 : i32 + br block1(v22) + +block3: + v17 = const.i32 1048768 : i32 + v18 = const.i32 57 : i32 + v19 = const.i32 1048752 : i32 + call noname::_ZN4core9panicking5panic17h62f53cc4db8dd7b3E(v17, v18, v19) + unreachable + +block4: + v6 = const.i32 -2147483648 : i32 + v7 = neq v0, v6 : i1 + v8 = cast v7 : i32 + v9 = neq v8, 0 : i1 + condbr v9, block2, block5 + +block5: + v10 = const.i32 -1 : i32 + v11 = neq v1, v10 : i1 + v12 = cast v11 : i32 + v13 = neq v12, 0 : i1 + condbr v13, block2, block6 + +block6: + v14 = const.i32 1048832 : i32 + v15 = const.i32 48 : i32 + v16 = const.i32 1048752 : i32 + call noname::_ZN4core9panicking5panic17h62f53cc4db8dd7b3E(v14, v15, v16) + unreachable +} + +pub fn rem_u(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = eq v1, 0 : i1 + v4 = cast v3 : i32 + v5 = neq v4, 0 : i1 + condbr v5, block2, block3 + +block1(v2: i32): + +block2: + v10 = const.i32 1048768 : i32 + v11 = const.i32 57 : i32 + v12 = const.i32 1048880 : i32 + call noname::_ZN4core9panicking5panic17h62f53cc4db8dd7b3E(v10, v11, v12) + unreachable + +block3: + v6 = cast v0 : u32 + v7 = cast v1 : u32 + v8 = mod v6, v7 : u32 + v9 = cast v8 : i32 + ret v9 +} + +pub fn shr_s(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = shr v0, v1 : i32 + br block1(v3) + +block1(v2: i32): + ret v2 +} + +pub fn shr_u(i32, i32) -> i32 { +block0(v0: i32, v1: i32): + v3 = cast v0 : u32 + v4 = cast v1 : u32 + v5 = shr v3, v4 : u32 + v6 = cast v5 : i32 + br block1(v6) + +block1(v2: i32): + ret v2 +} + +pub fn __main() -> i32 { +block0: + v1 = const.i32 -8 : i32 + v2 = const.i32 -4 : i32 + v3 = call noname::div_s(v1, v2) : i32 + v4 = const.i32 -8 : i32 + v5 = const.i32 -3 : i32 + v6 = call noname::rem_s(v4, v5) : i32 + v7 = add v3, v6 : i32 + v8 = const.i32 -16 : i32 + v9 = const.i32 2 : i32 + v10 = call noname::shr_s(v8, v9) : i32 + v11 = add v7, v10 : i32 + v12 = const.i32 8 : i32 + v13 = const.i32 4 : i32 + v14 = call noname::div_u(v12, v13) : i32 + v15 = add v11, v14 : i32 + v16 = const.i32 8 : i32 + v17 = const.i32 3 : i32 + v18 = call noname::rem_u(v16, v17) : i32 + v19 = add v15, v18 : i32 + v20 = const.i32 16 : i32 + v21 = const.i32 2 : i32 + v22 = call noname::shr_u(v20, v21) : i32 + v23 = add v19, v22 : i32 + br block1(v23) + +block1(v0: i32): + ret v0 +} + +pub fn _ZN4core3ptr37drop_in_place$LT$core..fmt..Error$GT$17h282a1f10dc7e004dE(i32) { +block0(v0: i32): + br block1 + +block1: + ret +} + +pub fn _ZN4core9panicking9panic_fmt17h9f61a1f2faa523f9E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i32 0 : i32 + v3 = global.load (@__stack_pointer) as *mut i8 : i32 + v4 = const.i32 32 : i32 + v5 = sub v3, v4 : i32 + v6 = global.symbol @__stack_pointer : *mut i32 + store v6, v5 + v7 = cast v5 : u32 + v8 = add v7, 24 : u32 + v9 = inttoptr v8 : *mut i32 + store v9, v0 + v10 = const.i32 1048896 : i32 + v11 = cast v5 : u32 + v12 = add v11, 16 : u32 + v13 = inttoptr v12 : *mut i32 + store v13, v10 + v14 = const.i32 1048896 : i32 + v15 = cast v5 : u32 + v16 = add v15, 12 : u32 + v17 = inttoptr v16 : *mut i32 + store v17, v14 + v18 = const.i32 1 : i32 + v19 = trunc v18 : u8 + v20 = cast v5 : u32 + v21 = add v20, 28 : u32 + v22 = inttoptr v21 : *mut u8 + store v22, v19 + v23 = cast v5 : u32 + v24 = add v23, 20 : u32 + v25 = inttoptr v24 : *mut i32 + store v25, v1 + v26 = const.i32 12 : i32 + v27 = add v5, v26 : i32 + call noname::rust_begin_unwind(v27) + unreachable + +block1: +} + +pub fn _ZN4core9panicking5panic17h62f53cc4db8dd7b3E(i32, i32, i32) { +block0(v0: i32, v1: i32, v2: i32): + v3 = const.i32 0 : i32 + v4 = global.load (@__stack_pointer) as *mut i8 : i32 + v5 = const.i32 32 : i32 + v6 = sub v4, v5 : i32 + v7 = global.symbol @__stack_pointer : *mut i32 + store v7, v6 + v8 = const.i32 12 : i32 + v9 = add v6, v8 : i32 + v10 = const.i64 0 : i64 + v11 = cast v9 : u32 + v12 = inttoptr v11 : *mut i64 + store v12, v10 + v13 = const.i32 1 : i32 + v14 = cast v6 : u32 + v15 = add v14, 4 : u32 + v16 = inttoptr v15 : *mut i32 + store v16, v13 + v17 = const.i32 1048896 : i32 + v18 = cast v6 : u32 + v19 = add v18, 8 : u32 + v20 = inttoptr v19 : *mut i32 + store v20, v17 + v21 = cast v6 : u32 + v22 = add v21, 28 : u32 + v23 = inttoptr v22 : *mut i32 + store v23, v1 + v24 = cast v6 : u32 + v25 = add v24, 24 : u32 + v26 = inttoptr v25 : *mut i32 + store v26, v0 + v27 = const.i32 24 : i32 + v28 = add v6, v27 : i32 + v29 = cast v6 : u32 + v30 = inttoptr v29 : *mut i32 + store v30, v28 + call noname::_ZN4core9panicking9panic_fmt17h9f61a1f2faa523f9E(v6, v2) + unreachable + +block1: +} + +pub fn _ZN36_$LT$T$u20$as$u20$core..any..Any$GT$7type_id17h29327df37c6e3023E(i32, i32) { +block0(v0: i32, v1: i32): + v2 = const.i64 -1688046730280208939 : i64 + v3 = cast v0 : u32 + v4 = add v3, 8 : u32 + v5 = inttoptr v4 : *mut i64 + store v5, v2 + v6 = const.i64 -2518113060735759681 : i64 + v7 = cast v0 : u32 + v8 = inttoptr v7 : *mut i64 + store v8, v6 + br block1 + +block1: + ret +} diff --git a/frontend-wasm/tests/expected/signed_arith.wat b/frontend-wasm/tests/expected/signed_arith.wat new file mode 100644 index 00000000..22b2abe3 --- /dev/null +++ b/frontend-wasm/tests/expected/signed_arith.wat @@ -0,0 +1,227 @@ +(module + (type (;0;) (func (param i32))) + (type (;1;) (func (param i32 i32) (result i32))) + (type (;2;) (func (result i32))) + (type (;3;) (func (param i32 i32))) + (type (;4;) (func (param i32 i32 i32))) + (func $rust_begin_unwind (;0;) (type 0) (param i32) + loop ;; label = @1 + br 0 (;@1;) + end + ) + (func $div_s (;1;) (type 1) (param i32 i32) (result i32) + block ;; label = @1 + block ;; label = @2 + local.get 1 + i32.eqz + br_if 0 (;@2;) + local.get 0 + i32.const -2147483648 + i32.ne + br_if 1 (;@1;) + local.get 1 + i32.const -1 + i32.ne + br_if 1 (;@1;) + i32.const 1048704 + i32.const 31 + i32.const 1048648 + call $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E + unreachable + end + i32.const 1048672 + i32.const 25 + i32.const 1048648 + call $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E + unreachable + end + local.get 0 + local.get 1 + i32.div_s + ) + (func $div_u (;2;) (type 1) (param i32 i32) (result i32) + block ;; label = @1 + local.get 1 + i32.eqz + br_if 0 (;@1;) + local.get 0 + local.get 1 + i32.div_u + return + end + i32.const 1048672 + i32.const 25 + i32.const 1048736 + call $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E + unreachable + ) + (func $rem_s (;3;) (type 1) (param i32 i32) (result i32) + block ;; label = @1 + block ;; label = @2 + local.get 1 + i32.eqz + br_if 0 (;@2;) + local.get 0 + i32.const -2147483648 + i32.ne + br_if 1 (;@1;) + local.get 1 + i32.const -1 + i32.ne + br_if 1 (;@1;) + i32.const 1048832 + i32.const 48 + i32.const 1048752 + call $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E + unreachable + end + i32.const 1048768 + i32.const 57 + i32.const 1048752 + call $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E + unreachable + end + local.get 0 + local.get 1 + i32.rem_s + ) + (func $rem_u (;4;) (type 1) (param i32 i32) (result i32) + block ;; label = @1 + local.get 1 + i32.eqz + br_if 0 (;@1;) + local.get 0 + local.get 1 + i32.rem_u + return + end + i32.const 1048768 + i32.const 57 + i32.const 1048880 + call $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E + unreachable + ) + (func $shr_s (;5;) (type 1) (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.shr_s + ) + (func $shr_u (;6;) (type 1) (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.shr_u + ) + (func $__main (;7;) (type 2) (result i32) + i32.const -8 + i32.const -4 + call $div_s + i32.const -8 + i32.const -3 + call $rem_s + i32.add + i32.const -16 + i32.const 2 + call $shr_s + i32.add + i32.const 8 + i32.const 4 + call $div_u + i32.add + i32.const 8 + i32.const 3 + call $rem_u + i32.add + i32.const 16 + i32.const 2 + call $shr_u + i32.add + ) + (func $_ZN4core3ptr37drop_in_place$LT$core..fmt..Error$GT$17h282a1f10dc7e004dE (;8;) (type 0) (param i32)) + (func $_ZN4core9panicking9panic_fmt17h9f61a1f2faa523f9E (;9;) (type 3) (param i32 i32) + (local i32) + global.get $__stack_pointer + i32.const 32 + i32.sub + local.tee 2 + global.set $__stack_pointer + local.get 2 + local.get 0 + i32.store offset=24 + local.get 2 + i32.const 1048896 + i32.store offset=16 + local.get 2 + i32.const 1048896 + i32.store offset=12 + local.get 2 + i32.const 1 + i32.store8 offset=28 + local.get 2 + local.get 1 + i32.store offset=20 + local.get 2 + i32.const 12 + i32.add + call $rust_begin_unwind + unreachable + ) + (func $_ZN4core9panicking5panic17h62f53cc4db8dd7b3E (;10;) (type 4) (param i32 i32 i32) + (local i32) + global.get $__stack_pointer + i32.const 32 + i32.sub + local.tee 3 + global.set $__stack_pointer + local.get 3 + i32.const 12 + i32.add + i64.const 0 + i64.store align=4 + local.get 3 + i32.const 1 + i32.store offset=4 + local.get 3 + i32.const 1048896 + i32.store offset=8 + local.get 3 + local.get 1 + i32.store offset=28 + local.get 3 + local.get 0 + i32.store offset=24 + local.get 3 + local.get 3 + i32.const 24 + i32.add + i32.store + local.get 3 + local.get 2 + call $_ZN4core9panicking9panic_fmt17h9f61a1f2faa523f9E + unreachable + ) + (func $_ZN36_$LT$T$u20$as$u20$core..any..Any$GT$7type_id17h29327df37c6e3023E (;11;) (type 3) (param i32 i32) + local.get 0 + i64.const -1688046730280208939 + i64.store offset=8 + local.get 0 + i64.const -2518113060735759681 + i64.store + ) + (table (;0;) 3 3 funcref) + (memory (;0;) 17) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1048912) + (global (;2;) i32 i32.const 1048912) + (export "memory" (memory 0)) + (export "div_s" (func $div_s)) + (export "div_u" (func $div_u)) + (export "rem_s" (func $rem_s)) + (export "rem_u" (func $rem_u)) + (export "shr_s" (func $shr_s)) + (export "shr_u" (func $shr_u)) + (export "__main" (func $__main)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) + (elem (;0;) (i32.const 1) func $_ZN4core3ptr37drop_in_place$LT$core..fmt..Error$GT$17h282a1f10dc7e004dE $_ZN36_$LT$T$u20$as$u20$core..any..Any$GT$7type_id17h29327df37c6e3023E) + (data $.rodata (;0;) (i32.const 1048576) "/tmp/6c3d0db843d22d28fff49dffc552879651b21c3f44039227473ff2d47441c4f3.rs\00\00\10\00H\00\00\00\0c\00\00\00\05\00\00\00\00\00\00\00\00\00\00\00attempt to divide by zero\00\00\00\00\00\00\00attempt to divide with overflow\00\00\00\10\00H\00\00\00\12\00\00\00\05\00\00\00\00\00\10\00H\00\00\00\18\00\00\00\05\00\00\00attempt to calculate the remainder with a divisor of zero\00\00\00\00\00\00\00attempt to calculate the remainder with overflow\00\00\10\00H\00\00\00\1e\00\00\00\05\00\00\00\01\00\00\00\00\00\00\00\01\00\00\00\02\00\00\00") +) \ No newline at end of file diff --git a/frontend-wasm/tests/rust_source/add.rs b/frontend-wasm/tests/rust_source/add.rs new file mode 100644 index 00000000..ea10f38e --- /dev/null +++ b/frontend-wasm/tests/rust_source/add.rs @@ -0,0 +1,20 @@ +#![no_std] +#![no_main] + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +#[inline(never)] +#[no_mangle] +fn add(a: i32, b: i32) -> i32 { + a + b +} + +#[no_mangle] +pub extern "C" fn __main() -> i32 { + let a = 1; + let b = 2; + add(a, b) +} diff --git a/frontend-wasm/tests/rust_source/array.rs b/frontend-wasm/tests/rust_source/array.rs new file mode 100644 index 00000000..8dab20c1 --- /dev/null +++ b/frontend-wasm/tests/rust_source/array.rs @@ -0,0 +1,18 @@ +#![no_std] +#![no_main] + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +#[inline(never)] +#[no_mangle] +pub fn sum_arr(arr: &[u32]) -> u32 { + arr.iter().sum() +} + +#[no_mangle] +pub extern "C" fn __main() -> u32 { + sum_arr(&[1, 2, 3, 4, 5]) + sum_arr(&[6, 7, 8, 9, 10]) +} diff --git a/frontend-wasm/tests/rust_source/enum.rs b/frontend-wasm/tests/rust_source/enum.rs new file mode 100644 index 00000000..566f493f --- /dev/null +++ b/frontend-wasm/tests/rust_source/enum.rs @@ -0,0 +1,28 @@ +#![no_std] +#![no_main] + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +enum Op { + Add, + Sub, + Mul, +} + +#[inline(never)] +#[no_mangle] +fn match_enum(a: u32, b: u32, foo: Op) -> u32 { + match foo { + Op::Add => a + b, + Op::Sub => a - b, + Op::Mul => a * b, + } +} + +#[no_mangle] +pub extern "C" fn __main() -> u32 { + match_enum(3, 5, Op::Add) + match_enum(3, 5, Op::Sub) + match_enum(3, 5, Op::Mul) +} diff --git a/frontend-wasm/tests/rust_source/fib.rs b/frontend-wasm/tests/rust_source/fib.rs new file mode 100644 index 00000000..8c7b9b98 --- /dev/null +++ b/frontend-wasm/tests/rust_source/fib.rs @@ -0,0 +1,25 @@ +#![no_std] +#![no_main] + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +#[inline(never)] +#[no_mangle] +pub fn fib(n: u32) -> u32 { + let mut a = 0; + let mut b = 1; + for _ in 0..n { + let c = a + b; + a = b; + b = c; + } + a +} + +#[no_mangle] +pub extern "C" fn __main() -> u32 { + fib(25) +} diff --git a/frontend-wasm/tests/rust_source/signed_arith.rs b/frontend-wasm/tests/rust_source/signed_arith.rs new file mode 100644 index 00000000..14c4330e --- /dev/null +++ b/frontend-wasm/tests/rust_source/signed_arith.rs @@ -0,0 +1,49 @@ +#![no_std] +#![no_main] + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +#[inline(never)] +#[no_mangle] +fn div_s(a: i32, b: i32) -> i32 { + a / b +} + +#[inline(never)] +#[no_mangle] +fn div_u(a: u32, b: u32) -> u32 { + a / b +} + +#[inline(never)] +#[no_mangle] +fn rem_s(a: i32, b: i32) -> i32 { + a % b +} + +#[inline(never)] +#[no_mangle] +fn rem_u(a: u32, b: u32) -> u32 { + a % b +} + +#[inline(never)] +#[no_mangle] +fn shr_s(a: i32, b: i32) -> i32 { + a >> b +} + +#[inline(never)] +#[no_mangle] +fn shr_u(a: u32, b: u32) -> u32 { + a >> b +} + +#[no_mangle] +pub extern "C" fn __main() -> i32 { + div_s(-8, -4) + rem_s(-8, -3) + shr_s(-16, 2) + + (div_u(8, 4) + rem_u(8, 3) + shr_u(16, 2)) as i32 +} diff --git a/frontend-wasm/tests/rust_source/static_mut.rs b/frontend-wasm/tests/rust_source/static_mut.rs new file mode 100644 index 00000000..2d82148f --- /dev/null +++ b/frontend-wasm/tests/rust_source/static_mut.rs @@ -0,0 +1,23 @@ +#![no_std] +#![no_main] + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +static mut G1: [u8; 9] = [1, 2, 3, 4, 5, 6, 7, 8, 9]; + +#[inline(never)] +#[no_mangle] +fn global_var_update() { + unsafe { + G1[0] = G1[1] + 1; + } +} + +#[no_mangle] +pub extern "C" fn __main() -> u32 { + global_var_update(); + unsafe { G1.into_iter().sum::() as u32 } +} diff --git a/frontend-wasm/tests/test_rust_comp.rs b/frontend-wasm/tests/test_rust_comp.rs new file mode 100644 index 00000000..650ede30 --- /dev/null +++ b/frontend-wasm/tests/test_rust_comp.rs @@ -0,0 +1,693 @@ +use expect_test::expect; +use expect_test::expect_file; +use std::fs; +use std::io::Read; +use std::path::Path; +use std::process::Command; +use std::sync::Arc; + +use miden_diagnostics::term::termcolor::ColorChoice; +use miden_diagnostics::CodeMap; +use miden_diagnostics::DefaultEmitter; +use miden_diagnostics::DiagnosticsConfig; +use miden_diagnostics::DiagnosticsHandler; +use miden_diagnostics::Emitter; +use miden_diagnostics::NullEmitter; +use miden_diagnostics::Verbosity; +use miden_frontend_wasm::translate_module; +use miden_frontend_wasm::WasmTranslationConfig; + +fn hash_string(inputs: &[&str]) -> String { + use sha2::{Digest, Sha256}; + let mut hasher = Sha256::new(); + for input in inputs { + hasher.update(input); + } + format!("{:x}", hasher.finalize()) +} + +fn compile_wasm(rust_source: &str) -> Vec { + let rustc_opts = [ + "-C", + "opt-level=z", // optimize for size + "--target", + "wasm32-unknown-unknown", + ]; + let file_name = hash_string(&[rust_source]); + let temp_dir = std::env::temp_dir(); + let input_file = temp_dir.join(format!("{file_name}.rs")); + let output_file = temp_dir.join(format!("{file_name}.wasm")); + fs::write(&input_file, rust_source).unwrap(); + let output = Command::new("rustc") + .args(&rustc_opts) + .arg(&input_file) + .arg("-o") + .arg(&output_file) + .output() + .expect("Failed to execute rustc."); + if !output.status.success() { + eprintln!("{}", String::from_utf8_lossy(&output.stderr)); + panic!("Rust to Wasm compilation failed!"); + } + let wasm = fs::read(&output_file).unwrap(); + fs::remove_file(&input_file).unwrap(); + fs::remove_file(&output_file).unwrap(); + return wasm; +} + +pub fn check_ir_files_cargo( + bin_name: &str, + expected_wat_file: expect_test::ExpectFile, + expected_ir_file: expect_test::ExpectFile, +) { + let bundle_name = "rust-wasm-tests"; + let manifest_path = format!("../tests/{}/Cargo.toml", bundle_name); + // dbg!(&pwd); + let temp_dir = std::env::temp_dir(); + let target_dir = temp_dir.join(format!("{bundle_name}-cargo/")); + let output = Command::new("cargo") + .arg("build") + .arg("--manifest-path") + .arg(manifest_path) + .arg("--release") + .arg("--bins") + .arg("--target=wasm32-unknown-unknown") + .arg("--features=wasm-target") + .arg("--target-dir") + .arg(target_dir.clone()) + // compile std as part of crate graph compilation + // https://doc.rust-lang.org/cargo/reference/unstable.html#build-std + .arg("-Z") + .arg("build-std=core,alloc") + .arg("-Z") + // abort on panic without message formatting (core::fmt uses call_indirect) + .arg("build-std-features=panic_immediate_abort") + .output() + .expect("Failed to execute cargo build."); + if !output.status.success() { + eprintln!("{}", String::from_utf8_lossy(&output.stderr)); + panic!("Rust to Wasm compilation failed!"); + } + let target_bin_file_path = Path::new(&target_dir) + .join("wasm32-unknown-unknown") + .join("release") + .join(bin_name) + .with_extension("wasm"); + let mut target_bin_file = fs::File::open(target_bin_file_path).unwrap(); + let mut wasm_bytes = vec![]; + Read::read_to_end(&mut target_bin_file, &mut wasm_bytes).unwrap(); + fs::remove_dir_all(target_dir).unwrap(); + + let wat = wasm_to_wat(&wasm_bytes); + expected_wat_file.assert_eq(&wat); + let module = translate(wasm_bytes); + expected_ir_file.assert_eq(&module.to_string()); +} + +fn check_ir( + rust_source: &str, + expected_wat: expect_test::Expect, + expected_ir: expect_test::Expect, +) { + let wasm_bytes = compile_wasm(rust_source); + let wat = wasm_to_wat(&wasm_bytes); + expected_wat.assert_eq(&wat); + let module = translate(wasm_bytes); + expected_ir.assert_eq(&module.to_string()); +} + +#[allow(dead_code)] +fn check_ir_files( + rust_source: &str, + expected_wat_file: expect_test::ExpectFile, + expected_ir_file: expect_test::ExpectFile, +) { + let wasm_bytes = compile_wasm(rust_source); + let wat = wasm_to_wat(&wasm_bytes); + expected_wat_file.assert_eq(&wat); + let module = translate(wasm_bytes); + expected_ir_file.assert_eq(&module.to_string()); +} + +fn wasm_to_wat(wasm_bytes: &Vec) -> String { + let mut wasm_printer = wasmprinter::Printer::new(); + // disable printing of the "producers" section because it contains a rustc version + // to not brake tests when rustc is updated + wasm_printer.add_custom_section_printer("producers", |_, _, _| Ok(())); + let wat = wasm_printer.print(wasm_bytes.as_ref()).unwrap(); + wat +} + +fn translate(wasm_bytes: Vec) -> miden_hir::Module { + let codemap = Arc::new(CodeMap::new()); + let diagnostics = DiagnosticsHandler::new( + DiagnosticsConfig { + verbosity: Verbosity::Debug, + warnings_as_errors: false, + no_warn: false, + display: Default::default(), + }, + codemap, + default_emitter(Verbosity::Debug, ColorChoice::Auto), + ); + let module = + translate_module(&wasm_bytes, &WasmTranslationConfig::default(), &diagnostics).unwrap(); + module +} + +fn default_emitter(verbosity: Verbosity, color: ColorChoice) -> Arc { + match verbosity { + Verbosity::Silent => Arc::new(NullEmitter::new(color)), + _ => Arc::new(DefaultEmitter::new(color)), + } +} + +#[test] +fn rust_add() { + check_ir( + include_str!("rust_source/add.rs"), + expect![[r#" + (module + (type (;0;) (func (param i32 i32) (result i32))) + (type (;1;) (func (result i32))) + (func $add (;0;) (type 0) (param i32 i32) (result i32) + local.get 1 + local.get 0 + i32.add + ) + (func $__main (;1;) (type 1) (result i32) + i32.const 1 + i32.const 2 + call $add + ) + (memory (;0;) 16) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1048576) + (global (;2;) i32 i32.const 1048576) + (export "memory" (memory 0)) + (export "add" (func $add)) + (export "__main" (func $__main)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) + )"#]], + expect![[r#" + module noname + global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; + global external gv1 : i32 = 0x00100000 { id = gvar1 }; + global external gv2 : i32 = 0x00100000 { id = gvar2 }; + + + pub fn add(i32, i32) -> i32 { + block0(v0: i32, v1: i32): + v3 = add v1, v0 : i32 + br block1(v3) + + block1(v2: i32): + ret v2 + } + + pub fn __main() -> i32 { + block0: + v1 = const.i32 1 : i32 + v2 = const.i32 2 : i32 + v3 = call noname::add(v1, v2) : i32 + br block1(v3) + + block1(v0: i32): + ret v0 + } + "#]], + ); +} + +#[test] +fn rust_fib() { + check_ir( + include_str!("rust_source/fib.rs"), + expect![[r#" + (module + (type (;0;) (func (param i32) (result i32))) + (type (;1;) (func (result i32))) + (func $fib (;0;) (type 0) (param i32) (result i32) + (local i32 i32 i32) + i32.const 0 + local.set 1 + i32.const 1 + local.set 2 + loop (result i32) ;; label = @1 + local.get 2 + local.set 3 + block ;; label = @2 + local.get 0 + br_if 0 (;@2;) + local.get 1 + return + end + local.get 0 + i32.const -1 + i32.add + local.set 0 + local.get 1 + local.get 3 + i32.add + local.set 2 + local.get 3 + local.set 1 + br 0 (;@1;) + end + ) + (func $__main (;1;) (type 1) (result i32) + i32.const 25 + call $fib + ) + (memory (;0;) 16) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1048576) + (global (;2;) i32 i32.const 1048576) + (export "memory" (memory 0)) + (export "fib" (func $fib)) + (export "__main" (func $__main)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) + )"#]], + expect![[r#" + module noname + global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; + global external gv1 : i32 = 0x00100000 { id = gvar1 }; + global external gv2 : i32 = 0x00100000 { id = gvar2 }; + + + pub fn fib(i32) -> i32 { + block0(v0: i32): + v2 = const.i32 0 : i32 + v3 = const.i32 0 : i32 + v4 = const.i32 1 : i32 + br block2(v4, v0, v3) + + block1(v1: i32): + + block2(v6: i32, v7: i32, v9: i32): + v8 = neq v7, 0 : i1 + condbr v8, block4, block5 + + block3(v5: i32): + + block4: + v10 = const.i32 -1 : i32 + v11 = add v7, v10 : i32 + v12 = add v9, v6 : i32 + br block2(v12, v11, v6) + + block5: + ret v9 + } + + pub fn __main() -> i32 { + block0: + v1 = const.i32 25 : i32 + v2 = call noname::fib(v1) : i32 + br block1(v2) + + block1(v0: i32): + ret v0 + } + "#]], + ); +} + +#[test] +fn rust_enum() { + check_ir( + include_str!("rust_source/enum.rs"), + expect![[r#" + (module + (type (;0;) (func (param i32 i32 i32) (result i32))) + (type (;1;) (func (result i32))) + (func $match_enum (;0;) (type 0) (param i32 i32 i32) (result i32) + block ;; label = @1 + block ;; label = @2 + block ;; label = @3 + local.get 2 + i32.const 255 + i32.and + br_table 0 (;@3;) 1 (;@2;) 2 (;@1;) 0 (;@3;) + end + local.get 1 + local.get 0 + i32.add + return + end + local.get 0 + local.get 1 + i32.sub + return + end + local.get 1 + local.get 0 + i32.mul + ) + (func $__main (;1;) (type 1) (result i32) + i32.const 3 + i32.const 5 + i32.const 0 + call $match_enum + i32.const 3 + i32.const 5 + i32.const 1 + call $match_enum + i32.add + i32.const 3 + i32.const 5 + i32.const 2 + call $match_enum + i32.add + ) + (memory (;0;) 16) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1048576) + (global (;2;) i32 i32.const 1048576) + (export "memory" (memory 0)) + (export "match_enum" (func $match_enum)) + (export "__main" (func $__main)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) + )"#]], + expect![[r#" + module noname + global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; + global external gv1 : i32 = 0x00100000 { id = gvar1 }; + global external gv2 : i32 = 0x00100000 { id = gvar2 }; + + + pub fn match_enum(i32, i32, i32) -> i32 { + block0(v0: i32, v1: i32, v2: i32): + v4 = const.i32 255 : i32 + v5 = band v2, v4 : i32 + v6 = cast v5 : u32 + switch v6, 0 => block4, 1 => block3, 2 => block2, block4 + + block1(v3: i32): + ret v3 + + block2: + v9 = mul v1, v0 : i32 + br block1(v9) + + block3: + v8 = sub v0, v1 : i32 + ret v8 + + block4: + v7 = add v1, v0 : i32 + ret v7 + } + + pub fn __main() -> i32 { + block0: + v1 = const.i32 3 : i32 + v2 = const.i32 5 : i32 + v3 = const.i32 0 : i32 + v4 = call noname::match_enum(v1, v2, v3) : i32 + v5 = const.i32 3 : i32 + v6 = const.i32 5 : i32 + v7 = const.i32 1 : i32 + v8 = call noname::match_enum(v5, v6, v7) : i32 + v9 = add v4, v8 : i32 + v10 = const.i32 3 : i32 + v11 = const.i32 5 : i32 + v12 = const.i32 2 : i32 + v13 = call noname::match_enum(v10, v11, v12) : i32 + v14 = add v9, v13 : i32 + br block1(v14) + + block1(v0: i32): + ret v0 + } + "#]], + ) +} + +#[test] +fn rust_array() { + check_ir( + include_str!("rust_source/array.rs"), + expect![[r#" + (module + (type (;0;) (func (param i32 i32) (result i32))) + (type (;1;) (func (result i32))) + (func $sum_arr (;0;) (type 0) (param i32 i32) (result i32) + (local i32) + i32.const 0 + local.set 2 + block ;; label = @1 + local.get 1 + i32.eqz + br_if 0 (;@1;) + loop ;; label = @2 + local.get 0 + i32.load + local.get 2 + i32.add + local.set 2 + local.get 0 + i32.const 4 + i32.add + local.set 0 + local.get 1 + i32.const -1 + i32.add + local.tee 1 + br_if 0 (;@2;) + end + end + local.get 2 + ) + (func $__main (;1;) (type 1) (result i32) + i32.const 1048576 + i32.const 5 + call $sum_arr + i32.const 1048596 + i32.const 5 + call $sum_arr + i32.add + ) + (memory (;0;) 17) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1048616) + (global (;2;) i32 i32.const 1048624) + (export "memory" (memory 0)) + (export "sum_arr" (func $sum_arr)) + (export "__main" (func $__main)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) + (data $.rodata (;0;) (i32.const 1048576) "\01\00\00\00\02\00\00\00\03\00\00\00\04\00\00\00\05\00\00\00\06\00\00\00\07\00\00\00\08\00\00\00\09\00\00\00\0a\00\00\00") + )"#]], + expect![[r#" + module noname + + memory { + segment @0x100000 x 40 = 0x0000000a000000090000000800000007000000060000000500000004000000030000000200000001; + } + + global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; + global external gv1 : i32 = 0x00100028 { id = gvar1 }; + global external gv2 : i32 = 0x00100030 { id = gvar2 }; + + + pub fn sum_arr(i32, i32) -> i32 { + block0(v0: i32, v1: i32): + v3 = const.i32 0 : i32 + v4 = const.i32 0 : i32 + v5 = eq v1, 0 : i1 + v6 = cast v5 : i32 + v7 = neq v6, 0 : i1 + condbr v7, block2(v4), block3 + + block1(v2: i32): + ret v2 + + block2(v20: i32): + br block1(v20) + + block3: + br block4(v0, v4, v1) + + block4(v8: i32, v12: i32, v16: i32): + v9 = cast v8 : u32 + v10 = inttoptr v9 : *mut i32 + v11 = load v10 : i32 + v13 = add v11, v12 : i32 + v14 = const.i32 4 : i32 + v15 = add v8, v14 : i32 + v17 = const.i32 -1 : i32 + v18 = add v16, v17 : i32 + v19 = neq v18, 0 : i1 + condbr v19, block4(v15, v13, v18), block6 + + block5: + br block2(v13) + + block6: + br block5 + } + + pub fn __main() -> i32 { + block0: + v1 = const.i32 1048576 : i32 + v2 = const.i32 5 : i32 + v3 = call noname::sum_arr(v1, v2) : i32 + v4 = const.i32 1048596 : i32 + v5 = const.i32 5 : i32 + v6 = call noname::sum_arr(v4, v5) : i32 + v7 = add v3, v6 : i32 + br block1(v7) + + block1(v0: i32): + ret v0 + } + "#]], + ) +} + +#[test] +fn rust_static_mut() { + check_ir( + include_str!("rust_source/static_mut.rs"), + expect![[r#" + (module + (type (;0;) (func)) + (type (;1;) (func (result i32))) + (func $global_var_update (;0;) (type 0) + i32.const 0 + i32.const 0 + i32.load8_u offset=1048577 + i32.const 1 + i32.add + i32.store8 offset=1048576 + ) + (func $__main (;1;) (type 1) (result i32) + (local i32 i32 i32) + call $global_var_update + i32.const 0 + local.set 0 + i32.const -9 + local.set 1 + loop ;; label = @1 + local.get 1 + i32.const 1048585 + i32.add + i32.load8_u + local.get 0 + i32.add + local.set 0 + local.get 1 + i32.const 1 + i32.add + local.tee 2 + local.set 1 + local.get 2 + br_if 0 (;@1;) + end + local.get 0 + i32.const 255 + i32.and + ) + (memory (;0;) 17) + (global $__stack_pointer (;0;) (mut i32) i32.const 1048576) + (global (;1;) i32 i32.const 1048585) + (global (;2;) i32 i32.const 1048592) + (export "memory" (memory 0)) + (export "global_var_update" (func $global_var_update)) + (export "__main" (func $__main)) + (export "__data_end" (global 1)) + (export "__heap_base" (global 2)) + (data $.data (;0;) (i32.const 1048576) "\01\02\03\04\05\06\07\08\09") + )"#]], + expect![[r#" + module noname + + memory { + segment @0x100000 x 9 = 0x090807060504030201; + } + + global external __stack_pointer : i32 = 0x00100000 { id = gvar0 }; + global external gv1 : i32 = 0x00100009 { id = gvar1 }; + global external gv2 : i32 = 0x00100010 { id = gvar2 }; + + + pub fn global_var_update() { + block0: + v0 = const.i32 0 : i32 + v1 = const.i32 0 : i32 + v2 = cast v1 : u32 + v3 = add v2, 1048577 : u32 + v4 = inttoptr v3 : *mut u8 + v5 = load v4 : u8 + v6 = zext v5 : i32 + v7 = const.i32 1 : i32 + v8 = add v6, v7 : i32 + v9 = trunc v8 : u8 + v10 = cast v0 : u32 + v11 = add v10, 1048576 : u32 + v12 = inttoptr v11 : *mut u8 + store v12, v9 + br block1 + + block1: + ret + } + + pub fn __main() -> i32 { + block0: + v1 = const.i32 0 : i32 + call noname::global_var_update() + v2 = const.i32 0 : i32 + v3 = const.i32 -9 : i32 + br block2(v3, v2) + + block1(v0: i32): + ret v0 + + block2(v4: i32, v11: i32): + v5 = const.i32 1048585 : i32 + v6 = add v4, v5 : i32 + v7 = cast v6 : u32 + v8 = inttoptr v7 : *mut u8 + v9 = load v8 : u8 + v10 = zext v9 : i32 + v12 = add v10, v11 : i32 + v13 = const.i32 1 : i32 + v14 = add v4, v13 : i32 + v15 = neq v14, 0 : i1 + condbr v15, block2(v14, v12), block4 + + block3: + v16 = const.i32 255 : i32 + v17 = band v12, v16 : i32 + br block1(v17) + + block4: + br block3 + } + "#]], + ); +} + +#[ignore = "hash part in mangled function names is not stable enough"] +#[test] +fn dlmalloc() { + check_ir_files_cargo( + "dlmalloc_app", + expect_file!["./expected/dlmalloc.wat"], + expect_file!["./expected/dlmalloc.mir"], + ) +} + +#[test] +fn signed_arith() { + check_ir_files( + include_str!("rust_source/signed_arith.rs"), + expect_file!["./expected/signed_arith.wat"], + expect_file!["./expected/signed_arith.mir"], + ); +} diff --git a/hir/src/builder.rs b/hir/src/builder.rs index b93160e5..986a65c4 100644 --- a/hir/src/builder.rs +++ b/hir/src/builder.rs @@ -576,6 +576,16 @@ pub trait InstBuilder<'f>: InstBuilderBase<'f> { )) } + fn mem_grow(mut self, value: Value, span: SourceSpan) -> Value { + require_integer!(self, value, Type::U32); + let mut vlist = ValueList::default(); + { + let pool = &mut self.data_flow_graph_mut().value_lists; + vlist.push(value, pool); + } + into_first_result!(self.PrimOp(Opcode::MemGrow, Type::I32, vlist, span,)) + } + /// Get a [GlobalValue] which represents the address of a global variable whose symbol is `name` /// /// On it's own, this does nothing, you must use the resulting [GlobalValue] with a builder diff --git a/hir/src/dataflow.rs b/hir/src/dataflow.rs index f312dc0f..cdb7e6dc 100644 --- a/hir/src/dataflow.rs +++ b/hir/src/dataflow.rs @@ -540,6 +540,10 @@ impl DataFlowGraph { id } + pub fn append_block(&mut self, block: Block) { + self.blocks.append(block, BlockData::new(block)); + } + /// Creates a new block, inserted into the function layout just after `block` pub fn create_block_after(&mut self, block: Block) -> Block { let id = self.blocks.create(); @@ -613,6 +617,79 @@ impl DataFlowGraph { false } } + + /// Removes `val` from `block`'s parameters by a standard linear time list removal which + /// preserves ordering. Also updates the values' data. + pub fn remove_block_param(&mut self, val: Value) { + let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] { + (block, num) + } else { + panic!("{} must be a block parameter", val); + }; + self.blocks[block] + .params + .remove(num as usize, &mut self.value_lists); + for index in num..(self.num_block_params(block) as u16) { + let value_data = &mut self.values[self.blocks[block] + .params + .get(index as usize, &self.value_lists) + .unwrap()]; + let mut value_data_clone = value_data.clone(); + match &mut value_data_clone { + ValueData::Param { ref mut num, .. } => { + *num -= 1; + *value_data = value_data_clone.into(); + } + _ => panic!( + "{} must be a block parameter", + self.blocks[block] + .params + .get(index as usize, &self.value_lists) + .unwrap() + ), + } + } + } + + /// Appends `value` as an argument to the `branch_inst` instruction arguments list if the + /// destination block of the `branch_inst` is `dest`. + /// Panics if `branch_inst` is not a branch instruction. + pub fn append_branch_destination_argument( + &mut self, + branch_inst: Inst, + dest: Block, + value: Value, + ) { + match self.insts[branch_inst].data.item { + Instruction::Br(Br { + destination, + ref mut args, + .. + }) if destination == dest => { + args.push(value, &mut self.value_lists); + } + Instruction::CondBr(CondBr { + then_dest: (then_dest, ref mut then_args), + else_dest: (else_dest, ref mut else_args), + .. + }) => { + if then_dest == dest { + then_args.push(value, &mut self.value_lists); + } else if else_dest == dest { + else_args.push(value, &mut self.value_lists); + } + } + Instruction::Switch(Switch { + op: _, + arg: _, + arms: _, + default: _, + }) => { + panic!("cannot append argument {value} to Switch destination block {dest}, since it has no block arguments support"); + } + _ => panic!("{} must be a branch instruction", branch_inst), + } + } } impl Index for DataFlowGraph { type Output = Instruction; diff --git a/hir/src/function.rs b/hir/src/function.rs index 08678654..87cac099 100644 --- a/hir/src/function.rs +++ b/hir/src/function.rs @@ -361,6 +361,10 @@ impl Function { pub fn imports<'a, 'b: 'a>(&'b self) -> impl Iterator + 'a { self.dfg.imports().filter(|ext| ext.id != self.id) } + + pub fn builder(&mut self) -> FunctionBuilder { + FunctionBuilder::new(self) + } } impl fmt::Debug for Function { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/hir/src/instruction.rs b/hir/src/instruction.rs index 5432c76f..8c74e0b2 100644 --- a/hir/src/instruction.rs +++ b/hir/src/instruction.rs @@ -616,7 +616,6 @@ impl Opcode { | Self::Assertz | Self::AssertEq | Self::Store - | Self::MemGrow | Self::MemCpy | Self::Br | Self::CondBr @@ -678,7 +677,8 @@ impl Opcode { | Self::Shl | Self::Shr | Self::Rotl - | Self::Rotr => { + | Self::Rotr + | Self::MemGrow => { smallvec![ctrl_ty] } // The result type of a load is derived from the pointee type diff --git a/hir/src/lib.rs b/hir/src/lib.rs index 93aa8fa0..f7a43ac9 100644 --- a/hir/src/lib.rs +++ b/hir/src/lib.rs @@ -173,7 +173,10 @@ pub use self::module::*; pub use self::program::{Linker, LinkerError, Program, ProgramBuilder}; pub use self::segments::{DataSegment, DataSegmentAdapter, DataSegmentError, DataSegmentTable}; pub use self::value::{Value, ValueData, ValueList, ValueListPool}; -pub use self::write::{write_external_function, write_function}; +pub use self::write::{write_external_function, write_function, write_instruction}; + +// Re-export cranelift_entity so that users don't have to hunt for the same version +pub use cranelift_entity; use core::fmt; diff --git a/hir/src/module.rs b/hir/src/module.rs index 45204c91..c29d24bc 100644 --- a/hir/src/module.rs +++ b/hir/src/module.rs @@ -549,6 +549,10 @@ impl ModuleBuilder { self } + pub fn name(&self) -> Ident { + self.module.name + } + pub fn declare_global_variable>( &mut self, name: S, @@ -626,6 +630,11 @@ impl<'m> ModuleFunctionBuilder<'m> { self.function.id } + /// Get the signature of the underlying function + pub fn signature(&self) -> &Signature { + &self.function.signature + } + pub fn module<'a, 'b: 'a>(&'b mut self) -> &'a mut ModuleBuilder { self.builder } diff --git a/hir/src/value.rs b/hir/src/value.rs index 3aeef88e..0436bca3 100644 --- a/hir/src/value.rs +++ b/hir/src/value.rs @@ -22,7 +22,7 @@ impl Default for Value { /// Data associated with a `Value`. /// -/// Values are either block arguments or instructions, and +/// Values are either block arguments, instructions or aliases, and /// in addition to being linked to a `Inst` or a `Block`, they /// have an associated type, position, and in some cases, a `SourceSpan`. #[derive(Debug, Clone)] diff --git a/hir/src/write.rs b/hir/src/write.rs index 5a6ab520..30b59464 100644 --- a/hir/src/write.rs +++ b/hir/src/write.rs @@ -114,7 +114,12 @@ pub fn write_block_header( writeln!(w, "):") } -fn write_instruction(w: &mut dyn Write, func: &Function, inst: Inst, indent: usize) -> fmt::Result { +pub fn write_instruction( + w: &mut dyn Write, + func: &Function, + inst: Inst, + indent: usize, +) -> fmt::Result { let s = String::with_capacity(16); write!(w, "{1:0$}", indent, s)?; diff --git a/tests/rust-wasm-tests/Cargo.toml b/tests/rust-wasm-tests/Cargo.toml new file mode 100644 index 00000000..e0154805 --- /dev/null +++ b/tests/rust-wasm-tests/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "rust-wasm-tests" +version = "0.1.0" +edition = "2021" + +[dependencies] +dlmalloc = { version = "0.2.4", features = ["global"] } + +[[bin]] +name = "dlmalloc_app" +path = "src/bin/dlmalloc_app.rs" +test = false +# Since we only need to build it in test to get the compiled Wasm for testing, we +# hide behind the artifical feature so that it only builds with this feature is specified +required-features = ["wasm-target"] + +[features] +wasm-target = [] + +[profile.release] +panic = 'abort' # Abort on panic +opt-level = 'z' # Optimize for size diff --git a/tests/rust-wasm-tests/src/bin/dlmalloc_app.rs b/tests/rust-wasm-tests/src/bin/dlmalloc_app.rs new file mode 100644 index 00000000..cfca3f8b --- /dev/null +++ b/tests/rust-wasm-tests/src/bin/dlmalloc_app.rs @@ -0,0 +1,19 @@ +#![no_main] +#![no_std] + +extern crate dlmalloc; + +use rust_wasm_tests::dlmalloc::vec_alloc; + +#[global_allocator] +static A: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; + +#[panic_handler] +fn my_panic(_info: &core::panic::PanicInfo) -> ! { + loop {} +} + +#[no_mangle] +pub extern "C" fn __main() -> u32 { + vec_alloc() +} diff --git a/tests/rust-wasm-tests/src/dlmalloc.rs b/tests/rust-wasm-tests/src/dlmalloc.rs new file mode 100644 index 00000000..1c36b7b7 --- /dev/null +++ b/tests/rust-wasm-tests/src/dlmalloc.rs @@ -0,0 +1,9 @@ +extern crate alloc; +use alloc::vec::Vec; + +#[no_mangle] +pub fn vec_alloc() -> u32 { + let mut v = Vec::new(); + v.push(1); + v.pop().unwrap() +} diff --git a/tests/rust-wasm-tests/src/lib.rs b/tests/rust-wasm-tests/src/lib.rs new file mode 100644 index 00000000..57d6d200 --- /dev/null +++ b/tests/rust-wasm-tests/src/lib.rs @@ -0,0 +1,3 @@ +#![no_std] + +pub mod dlmalloc; diff --git a/tools/midenc/Cargo.toml b/tools/midenc/Cargo.toml index 1b605538..ae1a2941 100644 --- a/tools/midenc/Cargo.toml +++ b/tools/midenc/Cargo.toml @@ -21,3 +21,4 @@ env_logger.workspace = true human-panic = "1.0" log.workspace = true miden-diagnostics.workspace = true +miden-frontend-wasm.workspace = true diff --git a/tools/midenc/src/compiler/mod.rs b/tools/midenc/src/compiler/mod.rs index 6b97583b..fa7a356b 100644 --- a/tools/midenc/src/compiler/mod.rs +++ b/tools/midenc/src/compiler/mod.rs @@ -2,11 +2,13 @@ mod options; pub use self::options::Options; +use anyhow::anyhow; use std::sync::Arc; use std::time::Instant; use miden_diagnostics::term::termcolor::ColorChoice; use miden_diagnostics::*; +use miden_frontend_wasm::WasmTranslationConfig; use crate::utils::HumanDuration; @@ -30,12 +32,38 @@ pub fn compile( diagnostics.fatal("No inputs found!").raise(); } + if options.input_files.len() > 1 { + diagnostics + .fatal("Multiple Wasm files are not supported!") + .raise(); + } + // Track when compilation began let start = Instant::now(); - // let files = options.input_files.clone(); - // let artifacts = compile()?; - // diagnostics.abort_if_errors(); + let input_file = options.input_files.first().unwrap(); + let wasm_data = match input_file { + FileName::Real(path) => match std::fs::read(path) { + Ok(data) => data, + Err(e) => diagnostics + .fatal(format!( + "error reading file {}, with error {e}", + path.display() + )) + .raise(), + }, + FileName::Virtual(_) => todo!("virtual files are not yet supported"), + }; + let config = WasmTranslationConfig::default(); + let res = miden_frontend_wasm::translate_module(&wasm_data, &config, &diagnostics); + let _module = match res { + Ok(module) => module, + Err(e) => { + diagnostics.emit(e); + return Err(anyhow!("error translating module")); + } + }; + diagnostics.abort_if_errors(); // for x in artifacts { // let dir = options.get_output_dir();