From b4e1f3abf84c0c982b3a0ff83bb7568d2ba7ee88 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 6 Aug 2024 08:58:05 +1200 Subject: [PATCH] refactor: add RowIndex(u32) newtype (#1408) --- CHANGELOG.md | 1 + Cargo.lock | 2 + air/Cargo.toml | 3 +- air/src/constraints/chiplets/bitwise/mod.rs | 13 - air/src/constraints/chiplets/bitwise/tests.rs | 72 +++-- air/src/constraints/chiplets/hasher/tests.rs | 26 +- air/src/lib.rs | 1 + air/src/trace/main_trace.rs | 167 +++++----- air/src/trace/mod.rs | 1 + air/src/trace/rows.rs | 301 ++++++++++++++++++ air/src/utils.rs | 3 +- miden/Cargo.toml | 2 +- miden/tests/integration/exec_iters.rs | 51 +-- .../operations/decorators/asmop.rs | 133 ++++---- .../tests/integration/operations/field_ops.rs | 10 +- .../integration/operations/io_ops/adv_ops.rs | 4 +- miden/tests/integration/operations/sys_ops.rs | 8 +- .../operations/u32_ops/arithmetic_ops.rs | 6 +- processor/src/chiplets/aux_trace/mod.rs | 98 +++--- processor/src/chiplets/hasher/tests.rs | 1 - processor/src/chiplets/kernel_rom/mod.rs | 6 +- processor/src/chiplets/memory/mod.rs | 21 +- processor/src/chiplets/memory/segment.rs | 9 +- processor/src/chiplets/memory/tests.rs | 120 +++---- processor/src/chiplets/mod.rs | 31 +- processor/src/debug.rs | 17 +- .../src/decoder/aux_trace/block_hash_table.rs | 22 +- .../decoder/aux_trace/block_stack_table.rs | 9 +- .../src/decoder/aux_trace/op_group_table.rs | 13 +- processor/src/decoder/mod.rs | 19 +- processor/src/errors.rs | 9 +- processor/src/host/debug.rs | 5 +- processor/src/lib.rs | 5 +- processor/src/range/aux_trace.rs | 13 +- processor/src/range/mod.rs | 5 +- processor/src/stack/aux_trace.rs | 6 +- processor/src/stack/mod.rs | 19 +- processor/src/stack/tests.rs | 6 +- processor/src/stack/trace.rs | 45 +-- processor/src/system/mod.rs | 31 +- processor/src/trace/mod.rs | 10 +- processor/src/trace/tests/chiplets/bitwise.rs | 24 +- processor/src/trace/tests/chiplets/hasher.rs | 93 +++--- processor/src/trace/tests/chiplets/memory.rs | 29 +- processor/src/trace/utils.rs | 24 +- stdlib/tests/crypto/falcon.rs | 2 +- stdlib/tests/crypto/native.rs | 4 +- test-utils/Cargo.toml | 1 + 48 files changed, 940 insertions(+), 561 deletions(-) create mode 100644 air/src/trace/rows.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d1545d354..6771afbd65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - Added source code management primitives in `miden-core` (#1419) - Added `make test-fast` and `make test-skip-proptests` Makefile targets for faster testing during local development - Added `ProgramFile::read_with` constructor that takes a `SourceManager` impl to use for source management +- Added `RowIndex(u32)` (#1408) #### Changed diff --git a/Cargo.lock b/Cargo.lock index 80b44aed47..1466997450 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -998,6 +998,7 @@ dependencies = [ "criterion", "miden-core", "proptest", + "thiserror", "winter-air", "winter-prover", "winter-rand-utils", @@ -1131,6 +1132,7 @@ dependencies = [ name = "miden-test-utils" version = "0.1.0" dependencies = [ + "miden-air", "miden-assembly", "miden-core", "miden-processor", diff --git a/air/Cargo.toml b/air/Cargo.toml index 87d2657df7..07cef453b6 100644 --- a/air/Cargo.toml +++ b/air/Cargo.toml @@ -27,13 +27,14 @@ harness = false [features] default = ["std"] -std = ["vm-core/std", "winter-air/std"] +std = ["vm-core/std", "winter-air/std", "thiserror/std"] testing = [] [dependencies] vm-core = { package = "miden-core", path = "../core", version = "0.10", default-features = false } winter-air = { package = "winter-air", version = "0.9", default-features = false } winter-prover = { package = "winter-prover", version = "0.9", default-features = false } +thiserror = { version = "1.0", git = "https://github.com/bitwalker/thiserror", branch = "no-std", default-features = false } [dev-dependencies] criterion = "0.5" diff --git a/air/src/constraints/chiplets/bitwise/mod.rs b/air/src/constraints/chiplets/bitwise/mod.rs index b5450e2066..9f34677c92 100644 --- a/air/src/constraints/chiplets/bitwise/mod.rs +++ b/air/src/constraints/chiplets/bitwise/mod.rs @@ -418,16 +418,3 @@ pub fn agg_bits(row: &[E], start_idx: usize) -> E { pub const BITWISE_K0_MASK: [Felt; OP_CYCLE_LEN] = [ONE, ZERO, ZERO, ZERO, ZERO, ZERO, ZERO, ZERO]; pub const BITWISE_K1_MASK: [Felt; OP_CYCLE_LEN] = [ONE, ONE, ONE, ONE, ONE, ONE, ONE, ZERO]; - -// TEST HELPERS -// ================================================================================================ - -/// Returns the values from the bitwise periodic columns for the specified cycle row. -#[cfg(test)] -fn get_periodic_values(cycle_row: usize) -> [Felt; 2] { - match cycle_row { - 0 => [ONE, ONE], - 8 => [ZERO, ZERO], - _ => [ZERO, ONE], - } -} diff --git a/air/src/constraints/chiplets/bitwise/tests.rs b/air/src/constraints/chiplets/bitwise/tests.rs index 3609f34d08..c824377946 100644 --- a/air/src/constraints/chiplets/bitwise/tests.rs +++ b/air/src/constraints/chiplets/bitwise/tests.rs @@ -1,8 +1,7 @@ use super::{ - enforce_constraints, get_periodic_values, EvaluationFrame, BITWISE_A_COL_IDX, - BITWISE_A_COL_RANGE, BITWISE_B_COL_IDX, BITWISE_B_COL_RANGE, BITWISE_OUTPUT_COL_IDX, - BITWISE_PREV_OUTPUT_COL_IDX, BITWISE_SELECTOR_COL_IDX, NUM_CONSTRAINTS, NUM_DECOMP_BITS, ONE, - OP_CYCLE_LEN, ZERO, + enforce_constraints, EvaluationFrame, BITWISE_A_COL_IDX, BITWISE_A_COL_RANGE, + BITWISE_B_COL_IDX, BITWISE_B_COL_RANGE, BITWISE_OUTPUT_COL_IDX, BITWISE_PREV_OUTPUT_COL_IDX, + BITWISE_SELECTOR_COL_IDX, NUM_CONSTRAINTS, NUM_DECOMP_BITS, ONE, OP_CYCLE_LEN, ZERO, }; use crate::{ trace::{ @@ -12,7 +11,7 @@ use crate::{ }, TRACE_WIDTH, }, - Felt, + Felt, RowIndex, }; use rand_utils::rand_value; @@ -29,7 +28,7 @@ fn test_bitwise_change_ops_fail() { let a = rand_value::(); let b = rand_value::(); - let cycle_row: usize = rand_value::() as usize % (OP_CYCLE_LEN - 1); + let cycle_row: RowIndex = (rand_value::() as usize % (OP_CYCLE_LEN - 1)).into(); let frame = get_test_frame_with_two_ops(BITWISE_XOR, BITWISE_AND, a, b, cycle_row); let result = get_constraint_evaluation(frame, cycle_row); @@ -45,7 +44,7 @@ fn test_bitwise_change_ops_fail() { /// cycle when the low limb of a is one. #[test] fn output_aggregation_and() { - let cycle_row = 0; + let cycle_row: RowIndex = 0.into(); // create a valid test frame manually let mut current = vec![ZERO; TRACE_WIDTH]; @@ -116,8 +115,8 @@ proptest! { #[test] fn test_bitwise_and(a in any::(), b in any::(), cycle_row in 0..(OP_CYCLE_LEN - 1)) { let expected = [ZERO; NUM_CONSTRAINTS]; - let frame = get_test_frame(BITWISE_AND, a, b, cycle_row); - let result = get_constraint_evaluation(frame, cycle_row); + let frame = get_test_frame(BITWISE_AND, a, b, cycle_row.into()); + let result = get_constraint_evaluation(frame, cycle_row.into()); assert_eq!(expected, result); } @@ -126,8 +125,8 @@ proptest! { #[test] fn test_bitwise_xor(a in any::(), b in any::(), cycle_row in 0..(OP_CYCLE_LEN - 1)) { let expected = [ZERO; NUM_CONSTRAINTS]; - let frame = get_test_frame(BITWISE_XOR, a, b, cycle_row); - let result = get_constraint_evaluation(frame, cycle_row); + let frame = get_test_frame(BITWISE_XOR, a, b, cycle_row.into()); + let result = get_constraint_evaluation(frame, cycle_row.into()); assert_eq!(expected, result); } } @@ -137,7 +136,10 @@ proptest! { /// Returns the result of Bitwise constraint evaluations on the provided frame starting at the /// specified row. -fn get_constraint_evaluation(frame: EvaluationFrame, row: usize) -> [Felt; NUM_CONSTRAINTS] { +fn get_constraint_evaluation( + frame: EvaluationFrame, + row: RowIndex, +) -> [Felt; NUM_CONSTRAINTS] { let periodic_values = get_periodic_values(row); let mut result = [ZERO; NUM_CONSTRAINTS]; @@ -151,16 +153,16 @@ fn get_constraint_evaluation(frame: EvaluationFrame, row: usize) -> [Felt; /// cycle. /// /// # Errors -/// It expects the specified `cycle_row_num` for the current row to be such that the next row will +/// It expects the specified `cycle_row` for the current row to be such that the next row will /// still be in the same cycle. It will fail if the row number input is >= OP_CYCLE_LEN - 1. pub fn get_test_frame( operation: Felt, a: u32, b: u32, - cycle_row_num: usize, + cycle_row: RowIndex, ) -> EvaluationFrame { assert!( - cycle_row_num < OP_CYCLE_LEN - 1, + cycle_row < OP_CYCLE_LEN - 1, "Failed to build test EvaluationFrame for bitwise operation. The next row would be in a new cycle." ); @@ -173,16 +175,16 @@ pub fn get_test_frame( next[BITWISE_SELECTOR_COL_IDX] = operation; // Set the input aggregation and decomposition values. - set_frame_inputs(&mut current, &mut next, a, b, cycle_row_num); + set_frame_inputs(&mut current, &mut next, a, b, cycle_row); // Compute the output for the specified operation and inputs and shift it for each row. - let (previous_shift, current_shift, next_shift) = get_row_shifts(cycle_row_num); + let (previous_shift, current_shift, next_shift) = get_row_shifts(cycle_row); let result = get_output(operation, a, b); let output_current = result >> current_shift; let output_next = result >> next_shift; // Set the previous output. - let output_prev = if cycle_row_num == 0 { + let output_prev = if cycle_row == 0 { ZERO } else { Felt::new((result >> previous_shift) as u64) @@ -202,17 +204,17 @@ pub fn get_test_frame( /// frames within a cycle. /// /// # Errors -/// It expects the specified `cycle_row_num` for the current row to be such that the next row will +/// It expects the specified `cycle_row` for the current row to be such that the next row will /// still be in the same cycle. It will fail if the row number input is >= OP_CYCLE_LEN - 1. pub fn get_test_frame_with_two_ops( op_current: Felt, op_next: Felt, a: u32, b: u32, - cycle_row_num: usize, + cycle_row: RowIndex, ) -> EvaluationFrame { assert!( - cycle_row_num < OP_CYCLE_LEN - 1, + cycle_row < OP_CYCLE_LEN - 1, "Failed to build test EvaluationFrame for bitwise operation. The next row would be in a new cycle." ); @@ -225,16 +227,16 @@ pub fn get_test_frame_with_two_ops( next[BITWISE_SELECTOR_COL_IDX] = op_next; // Set the input aggregation and decomposition values. - set_frame_inputs(&mut current, &mut next, a, b, cycle_row_num); + set_frame_inputs(&mut current, &mut next, a, b, cycle_row); // Compute the outputs for the specified operations and inputs and shift them for each row. - let (previous_shift, current_shift, next_shift) = get_row_shifts(cycle_row_num); + let (previous_shift, current_shift, next_shift) = get_row_shifts(cycle_row); let result_op_current = get_output(op_current, a, b); let output_current = result_op_current >> current_shift; let output_next = get_output(op_next, a, b) >> next_shift; // Set the previous output. - let output_prev = if cycle_row_num == 0 { + let output_prev = if cycle_row == 0 { ZERO } else { Felt::new((result_op_current >> previous_shift) as u64) @@ -249,11 +251,11 @@ pub fn get_test_frame_with_two_ops( EvaluationFrame::::from_rows(current, next) } -/// Returns the shift amount for the previous, current, and next rows, based on the `cycle_row_num`, +/// Returns the shift amount for the previous, current, and next rows, based on the `cycle_row`, /// which is the number of the `current` row within the operation cycle. -fn get_row_shifts(cycle_row_num: usize) -> (usize, usize, usize) { +fn get_row_shifts(cycle_row: RowIndex) -> (usize, usize, usize) { // Define the shift amount for output in this row and the next row. - let current_shift = NUM_DECOMP_BITS * (OP_CYCLE_LEN - cycle_row_num - 1); + let current_shift = NUM_DECOMP_BITS * (OP_CYCLE_LEN - usize::from(cycle_row) - 1); let previous_shift = current_shift + NUM_DECOMP_BITS; let next_shift = current_shift - NUM_DECOMP_BITS; @@ -262,10 +264,10 @@ fn get_row_shifts(cycle_row_num: usize) -> (usize, usize, usize) { /// Sets the input aggregation and decomposition columns in the provided current and next rows with /// the correct values corresponding to the provided inputs `a` and `b` and the specified -/// `cycle_row_num`, which is the number of the `current` row within the operation cycle. -fn set_frame_inputs(current: &mut [Felt], next: &mut [Felt], a: u32, b: u32, cycle_row_num: usize) { +/// `cycle_row`, which is the number of the `current` row within the operation cycle. +fn set_frame_inputs(current: &mut [Felt], next: &mut [Felt], a: u32, b: u32, cycle_row: RowIndex) { // Get the shift amounts for the specified rows. - let (_, current_shift, next_shift) = get_row_shifts(cycle_row_num); + let (_, current_shift, next_shift) = get_row_shifts(cycle_row); // Set the input aggregation values. let current_a = (a >> current_shift) as u64; @@ -297,3 +299,13 @@ fn get_output(operation: Felt, a: u32, b: u32) -> u32 { panic!("Test bitwise EvaluationFrame requested for unrecognized operation."); } } + +/// Returns the values from the bitwise periodic columns for the specified cycle row. +#[cfg(test)] +fn get_periodic_values(cycle_row: crate::RowIndex) -> [Felt; 2] { + match cycle_row.into() { + 0u32 => [ONE, ONE], + 8u32 => [ZERO, ZERO], + _ => [ZERO, ONE], + } +} diff --git a/air/src/constraints/chiplets/hasher/tests.rs b/air/src/constraints/chiplets/hasher/tests.rs index c36219a655..1473bd9e19 100644 --- a/air/src/constraints/chiplets/hasher/tests.rs +++ b/air/src/constraints/chiplets/hasher/tests.rs @@ -4,7 +4,7 @@ use super::{ }; use crate::{ trace::chiplets::hasher::{Selectors, LINEAR_HASH, STATE_WIDTH}, - Felt, TRACE_WIDTH, + Felt, RowIndex, TRACE_WIDTH, }; use alloc::vec::Vec; use rand_utils::rand_array; @@ -20,12 +20,12 @@ use winter_air::EvaluationFrame; fn hash_round() { let expected = [ZERO; NUM_CONSTRAINTS]; - let cycle_row_num: usize = 3; + let cycle_row = 3.into(); let current_selectors = [ZERO, LINEAR_HASH[1], LINEAR_HASH[2]]; let next_selectors = current_selectors; - let frame = get_test_hashing_frame(current_selectors, next_selectors, cycle_row_num); - let result = get_constraint_evaluation(frame, cycle_row_num); + let frame = get_test_hashing_frame(current_selectors, next_selectors, cycle_row); + let result = get_constraint_evaluation(frame, cycle_row); assert_eq!(expected, result); } @@ -36,10 +36,10 @@ fn hash_round() { /// the specified row. fn get_constraint_evaluation( frame: EvaluationFrame, - cycle_row_num: usize, + cycle_row: RowIndex, ) -> [Felt; NUM_CONSTRAINTS] { let mut result = [ZERO; NUM_CONSTRAINTS]; - let periodic_values = get_test_periodic_values(cycle_row_num); + let periodic_values = get_test_periodic_values(cycle_row); enforce_constraints(&frame, &periodic_values, &mut result, ONE); @@ -47,12 +47,12 @@ fn get_constraint_evaluation( } /// Returns the values from the periodic columns for the specified cycle row. -fn get_test_periodic_values(cycle_row: usize) -> Vec { +fn get_test_periodic_values(cycle_row: RowIndex) -> Vec { // Set the periodic column values. - let mut periodic_values = match cycle_row { - 0 => vec![ZERO, ZERO, ONE], - 7 => vec![ZERO, ONE, ZERO], - 8 => vec![ONE, ZERO, ZERO], + let mut periodic_values = match cycle_row.into() { + 0u32 => vec![ZERO, ZERO, ONE], + 7u32 => vec![ZERO, ONE, ZERO], + 8u32 => vec![ONE, ZERO, ZERO], _ => vec![ZERO, ZERO, ZERO], }; @@ -70,7 +70,7 @@ fn get_test_periodic_values(cycle_row: usize) -> Vec { fn get_test_hashing_frame( current_selectors: Selectors, next_selectors: Selectors, - cycle_row_num: usize, + cycle_row: RowIndex, ) -> EvaluationFrame { let mut current = vec![ZERO; TRACE_WIDTH]; let mut next = vec![ZERO; TRACE_WIDTH]; @@ -84,7 +84,7 @@ fn get_test_hashing_frame( current[HASHER_STATE_COL_RANGE].copy_from_slice(&state); // Set the hasher state after a single permutation. - apply_round(&mut state, cycle_row_num); + apply_round(&mut state, cycle_row.into()); next[HASHER_STATE_COL_RANGE].copy_from_slice(&state); // Set the node index values to zero for hash computations. diff --git a/air/src/lib.rs b/air/src/lib.rs index 7076ac5767..c6ec61c90a 100644 --- a/air/src/lib.rs +++ b/air/src/lib.rs @@ -23,6 +23,7 @@ pub use constraints::stack; use constraints::{chiplets, range}; pub mod trace; +pub use trace::rows::RowIndex; use trace::*; mod errors; diff --git a/air/src/trace/main_trace.rs b/air/src/trace/main_trace.rs index d51c3fcec9..ff0b4e8f0c 100644 --- a/air/src/trace/main_trace.rs +++ b/air/src/trace/main_trace.rs @@ -1,3 +1,5 @@ +use crate::RowIndex; + use super::{ super::ColMatrix, chiplets::{ @@ -66,17 +68,17 @@ impl MainTrace { // -------------------------------------------------------------------------------------------- /// Returns the value of the clk column at row i. - pub fn clk(&self, i: usize) -> Felt { + pub fn clk(&self, i: RowIndex) -> Felt { self.columns.get_column(CLK_COL_IDX)[i] } /// Returns the value of the fmp column at row i. - pub fn fmp(&self, i: usize) -> Felt { + pub fn fmp(&self, i: RowIndex) -> Felt { self.columns.get_column(FMP_COL_IDX)[i] } /// Returns the value of the ctx column at row i. - pub fn ctx(&self, i: usize) -> Felt { + pub fn ctx(&self, i: RowIndex) -> Felt { self.columns.get_column(CTX_COL_IDX)[i] } @@ -84,22 +86,22 @@ impl MainTrace { // -------------------------------------------------------------------------------------------- /// Returns the value in the block address column at the row i. - pub fn addr(&self, i: usize) -> Felt { + pub fn addr(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET)[i] } /// Helper method to detect change of address. - pub fn is_addr_change(&self, i: usize) -> bool { + pub fn is_addr_change(&self, i: RowIndex) -> bool { self.addr(i) != self.addr(i + 1) } /// The i-th decoder helper register at `row`. - pub fn helper_register(&self, i: usize, row: usize) -> Felt { + pub fn helper_register(&self, i: usize, row: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + USER_OP_HELPERS_OFFSET + i)[row] } /// Returns the hasher state at row i. - pub fn decoder_hasher_state(&self, i: usize) -> [Felt; NUM_HASHER_COLUMNS] { + pub fn decoder_hasher_state(&self, i: RowIndex) -> [Felt; NUM_HASHER_COLUMNS] { let mut state = [ZERO; NUM_HASHER_COLUMNS]; for (idx, col_idx) in DECODER_HASHER_RANGE.enumerate() { let column = self.columns.get_column(col_idx); @@ -109,7 +111,7 @@ impl MainTrace { } /// Returns the first half of the hasher state at row i. - pub fn decoder_hasher_state_first_half(&self, i: usize) -> Word { + pub fn decoder_hasher_state_first_half(&self, i: RowIndex) -> Word { let mut state = [ZERO; DIGEST_LEN]; for (col, s) in state.iter_mut().enumerate() { *s = self.columns.get_column(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + col)[i]; @@ -118,7 +120,7 @@ impl MainTrace { } /// Returns the second half of the hasher state at row i. - pub fn decoder_hasher_state_second_half(&self, i: usize) -> Word { + pub fn decoder_hasher_state_second_half(&self, i: RowIndex) -> Word { const SECOND_WORD_OFFSET: usize = 4; let mut state = [ZERO; DIGEST_LEN]; for (col, s) in state.iter_mut().enumerate() { @@ -131,12 +133,12 @@ impl MainTrace { } /// Returns a specific element from the hasher state at row i. - pub fn decoder_hasher_state_element(&self, element: usize, i: usize) -> Felt { + pub fn decoder_hasher_state_element(&self, element: usize, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + element)[i + 1] } /// Returns the current function hash (i.e., root) at row i. - pub fn fn_hash(&self, i: usize) -> [Felt; DIGEST_LEN] { + pub fn fn_hash(&self, i: RowIndex) -> [Felt; DIGEST_LEN] { let mut state = [ZERO; DIGEST_LEN]; for (col, s) in state.iter_mut().enumerate() { *s = self.columns.get_column(FN_HASH_OFFSET + col)[i]; @@ -145,53 +147,53 @@ impl MainTrace { } /// Returns the `is_loop_body` flag at row i. - pub fn is_loop_body_flag(&self, i: usize) -> Felt { + pub fn is_loop_body_flag(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + IS_LOOP_BODY_FLAG_COL_IDX)[i] } /// Returns the `is_loop` flag at row i. - pub fn is_loop_flag(&self, i: usize) -> Felt { + pub fn is_loop_flag(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX)[i] } /// Returns the `is_call` flag at row i. - pub fn is_call_flag(&self, i: usize) -> Felt { + pub fn is_call_flag(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + IS_CALL_FLAG_COL_IDX)[i] } /// Returns the `is_syscall` flag at row i. - pub fn is_syscall_flag(&self, i: usize) -> Felt { + pub fn is_syscall_flag(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + IS_SYSCALL_FLAG_COL_IDX)[i] } /// Returns the operation batch flags at row i. This indicates the number of op groups in /// the current batch that is being processed. - pub fn op_batch_flag(&self, i: usize) -> [Felt; NUM_OP_BATCH_FLAGS] { + pub fn op_batch_flag(&self, i: RowIndex) -> [Felt; NUM_OP_BATCH_FLAGS] { [ - self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET, i), - self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 1, i), - self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 2, i), + self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET, i.into()), + self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 1, i.into()), + self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 2, i.into()), ] } /// Returns the operation group count. This indicates the number of operation that remain /// to be executed in the current span block. - pub fn group_count(&self, i: usize) -> Felt { + pub fn group_count(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + GROUP_COUNT_COL_IDX)[i] } /// Returns the delta between the current and next group counts. - pub fn delta_group_count(&self, i: usize) -> Felt { + pub fn delta_group_count(&self, i: RowIndex) -> Felt { self.group_count(i) - self.group_count(i + 1) } /// Returns the `in_span` flag at row i. - pub fn is_in_span(&self, i: usize) -> Felt { + pub fn is_in_span(&self, i: RowIndex) -> Felt { self.columns.get_column(DECODER_TRACE_OFFSET + IN_SPAN_COL_IDX)[i] } /// Constructs the i-th op code value from its individual bits. - pub fn get_op_code(&self, i: usize) -> Felt { + pub fn get_op_code(&self, i: RowIndex) -> Felt { let col_b0 = self.columns.get_column(DECODER_TRACE_OFFSET + 1); let col_b1 = self.columns.get_column(DECODER_TRACE_OFFSET + 2); let col_b2 = self.columns.get_column(DECODER_TRACE_OFFSET + 3); @@ -209,18 +211,23 @@ impl MainTrace { + b6.mul_small(64) } + /// Returns an iterator of [`RowIndex`] values over the row indices of this trace. + pub fn row_iter(&self) -> impl Iterator { + (0..self.num_rows()).map(RowIndex::from) + } + /// Returns a flag indicating whether the current operation induces a left shift of the operand /// stack. - pub fn is_left_shift(&self, i: usize) -> bool { - let b0 = self.columns.get(DECODER_TRACE_OFFSET + 1, i); - let b1 = self.columns.get(DECODER_TRACE_OFFSET + 2, i); - let b2 = self.columns.get(DECODER_TRACE_OFFSET + 3, i); - let b3 = self.columns.get(DECODER_TRACE_OFFSET + 4, i); - let b4 = self.columns.get(DECODER_TRACE_OFFSET + 5, i); - let b5 = self.columns.get(DECODER_TRACE_OFFSET + 6, i); - let b6 = self.columns.get(DECODER_TRACE_OFFSET + 7, i); - let e0 = self.columns.get(DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET, i); - let h5 = self.columns.get(DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX, i); + pub fn is_left_shift(&self, i: RowIndex) -> bool { + let b0 = self.columns.get(DECODER_TRACE_OFFSET + 1, i.into()); + let b1 = self.columns.get(DECODER_TRACE_OFFSET + 2, i.into()); + let b2 = self.columns.get(DECODER_TRACE_OFFSET + 3, i.into()); + let b3 = self.columns.get(DECODER_TRACE_OFFSET + 4, i.into()); + let b4 = self.columns.get(DECODER_TRACE_OFFSET + 5, i.into()); + let b5 = self.columns.get(DECODER_TRACE_OFFSET + 6, i.into()); + let b6 = self.columns.get(DECODER_TRACE_OFFSET + 7, i.into()); + let e0 = self.columns.get(DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET, i.into()); + let h5 = self.columns.get(DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX, i.into()); // group with left shift effect grouped by a common prefix ([b6, b5, b4] == [ZERO, ONE, ZERO])|| @@ -236,14 +243,14 @@ impl MainTrace { /// Returns a flag indicating whether the current operation induces a right shift of the operand /// stack. - pub fn is_right_shift(&self, i: usize) -> bool { - let b0 = self.columns.get(DECODER_TRACE_OFFSET + 1, i); - let b1 = self.columns.get(DECODER_TRACE_OFFSET + 2, i); - let b2 = self.columns.get(DECODER_TRACE_OFFSET + 3, i); - let b3 = self.columns.get(DECODER_TRACE_OFFSET + 4, i); - let b4 = self.columns.get(DECODER_TRACE_OFFSET + 5, i); - let b5 = self.columns.get(DECODER_TRACE_OFFSET + 6, i); - let b6 = self.columns.get(DECODER_TRACE_OFFSET + 7, i); + pub fn is_right_shift(&self, i: RowIndex) -> bool { + let b0 = self.columns.get(DECODER_TRACE_OFFSET + 1, i.into()); + let b1 = self.columns.get(DECODER_TRACE_OFFSET + 2, i.into()); + let b2 = self.columns.get(DECODER_TRACE_OFFSET + 3, i.into()); + let b3 = self.columns.get(DECODER_TRACE_OFFSET + 4, i.into()); + let b4 = self.columns.get(DECODER_TRACE_OFFSET + 5, i.into()); + let b5 = self.columns.get(DECODER_TRACE_OFFSET + 6, i.into()); + let b6 = self.columns.get(DECODER_TRACE_OFFSET + 7, i.into()); // group with right shift effect grouped by a common prefix [b6, b5, b4] == [ZERO, ONE, ONE]|| @@ -257,22 +264,22 @@ impl MainTrace { // -------------------------------------------------------------------------------------------- /// Returns the value of the stack depth column at row i. - pub fn stack_depth(&self, i: usize) -> Felt { + pub fn stack_depth(&self, i: RowIndex) -> Felt { self.columns.get_column(STACK_TRACE_OFFSET + B0_COL_IDX)[i] } /// Returns the element at row i in a given stack trace column. - pub fn stack_element(&self, column: usize, i: usize) -> Felt { + pub fn stack_element(&self, column: usize, i: RowIndex) -> Felt { self.columns.get_column(STACK_TRACE_OFFSET + column)[i] } /// Returns the address of the top element in the stack overflow table at row i. - pub fn parent_overflow_address(&self, i: usize) -> Felt { + pub fn parent_overflow_address(&self, i: RowIndex) -> Felt { self.columns.get_column(STACK_TRACE_OFFSET + B1_COL_IDX)[i] } /// Returns a flag indicating whether the overflow stack is non-empty. - pub fn is_non_empty_overflow(&self, i: usize) -> bool { + pub fn is_non_empty_overflow(&self, i: RowIndex) -> bool { let b0 = self.columns.get_column(STACK_TRACE_OFFSET + B0_COL_IDX)[i]; let h0 = self.columns.get_column(STACK_TRACE_OFFSET + H0_COL_IDX)[i]; (b0 - Felt::new(16)) * h0 == ONE @@ -282,37 +289,37 @@ impl MainTrace { // -------------------------------------------------------------------------------------------- /// Returns chiplet column number 0 at row i. - pub fn chiplet_selector_0(&self, i: usize) -> Felt { + pub fn chiplet_selector_0(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET)[i] } /// Returns chiplet column number 1 at row i. - pub fn chiplet_selector_1(&self, i: usize) -> Felt { + pub fn chiplet_selector_1(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 1)[i] } /// Returns chiplet column number 2 at row i. - pub fn chiplet_selector_2(&self, i: usize) -> Felt { + pub fn chiplet_selector_2(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 2)[i] } /// Returns chiplet column number 3 at row i. - pub fn chiplet_selector_3(&self, i: usize) -> Felt { + pub fn chiplet_selector_3(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 3)[i] } /// Returns chiplet column number 4 at row i. - pub fn chiplet_selector_4(&self, i: usize) -> Felt { + pub fn chiplet_selector_4(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 4)[i] } /// Returns `true` if a row is part of the hash chiplet. - pub fn is_hash_row(&self, i: usize) -> bool { + pub fn is_hash_row(&self, i: RowIndex) -> bool { self.chiplet_selector_0(i) == ZERO } /// Returns the (full) state of the hasher chiplet at row i. - pub fn chiplet_hasher_state(&self, i: usize) -> [Felt; STATE_WIDTH] { + pub fn chiplet_hasher_state(&self, i: RowIndex) -> [Felt; STATE_WIDTH] { let mut state = [ZERO; STATE_WIDTH]; for (idx, col_idx) in HASHER_STATE_COL_RANGE.enumerate() { let column = self.columns.get_column(col_idx); @@ -322,74 +329,74 @@ impl MainTrace { } /// Returns the hasher's node index column at row i - pub fn chiplet_node_index(&self, i: usize) -> Felt { - self.columns.get(HASHER_NODE_INDEX_COL_IDX, i) + pub fn chiplet_node_index(&self, i: RowIndex) -> Felt { + self.columns.get(HASHER_NODE_INDEX_COL_IDX, i.into()) } /// Returns `true` if a row is part of the bitwise chiplet. - pub fn is_bitwise_row(&self, i: usize) -> bool { + pub fn is_bitwise_row(&self, i: RowIndex) -> bool { self.chiplet_selector_0(i) == ONE && self.chiplet_selector_1(i) == ZERO } /// Returns the bitwise column holding the aggregated value of input `a` at row i. - pub fn chiplet_bitwise_a(&self, i: usize) -> Felt { + pub fn chiplet_bitwise_a(&self, i: RowIndex) -> Felt { self.columns.get_column(BITWISE_A_COL_IDX)[i] } /// Returns the bitwise column holding the aggregated value of input `b` at row i. - pub fn chiplet_bitwise_b(&self, i: usize) -> Felt { + pub fn chiplet_bitwise_b(&self, i: RowIndex) -> Felt { self.columns.get_column(BITWISE_B_COL_IDX)[i] } /// Returns the bitwise column holding the aggregated value of the output at row i. - pub fn chiplet_bitwise_z(&self, i: usize) -> Felt { + pub fn chiplet_bitwise_z(&self, i: RowIndex) -> Felt { self.columns.get_column(BITWISE_OUTPUT_COL_IDX)[i] } /// Returns `true` if a row is part of the memory chiplet. - pub fn is_memory_row(&self, i: usize) -> bool { + pub fn is_memory_row(&self, i: RowIndex) -> bool { self.chiplet_selector_0(i) == ONE && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ZERO } /// Returns the i-th row of the chiplet column containing memory context. - pub fn chiplet_memory_ctx(&self, i: usize) -> Felt { + pub fn chiplet_memory_ctx(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_CTX_COL_IDX)[i] } /// Returns the i-th row of the chiplet column containing memory address. - pub fn chiplet_memory_addr(&self, i: usize) -> Felt { + pub fn chiplet_memory_addr(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_ADDR_COL_IDX)[i] } /// Returns the i-th row of the chiplet column containing clock cycle. - pub fn chiplet_memory_clk(&self, i: usize) -> Felt { + pub fn chiplet_memory_clk(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_CLK_COL_IDX)[i] } /// Returns the i-th row of the chiplet column containing the zeroth memory value element. - pub fn chiplet_memory_value_0(&self, i: usize) -> Felt { + pub fn chiplet_memory_value_0(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_V_COL_RANGE.start)[i] } /// Returns the i-th row of the chiplet column containing the first memory value element. - pub fn chiplet_memory_value_1(&self, i: usize) -> Felt { + pub fn chiplet_memory_value_1(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_V_COL_RANGE.start + 1)[i] } /// Returns the i-th row of the chiplet column containing the second memory value element. - pub fn chiplet_memory_value_2(&self, i: usize) -> Felt { + pub fn chiplet_memory_value_2(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_V_COL_RANGE.start + 2)[i] } /// Returns the i-th row of the chiplet column containing the third memory value element. - pub fn chiplet_memory_value_3(&self, i: usize) -> Felt { + pub fn chiplet_memory_value_3(&self, i: RowIndex) -> Felt { self.columns.get_column(MEMORY_V_COL_RANGE.start + 3)[i] } /// Returns `true` if a row is part of the kernel chiplet. - pub fn is_kernel_row(&self, i: usize) -> bool { + pub fn is_kernel_row(&self, i: RowIndex) -> bool { self.chiplet_selector_0(i) == ONE && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE @@ -397,31 +404,31 @@ impl MainTrace { } /// Returns the i-th row of the kernel chiplet `addr` column. - pub fn chiplet_kernel_addr(&self, i: usize) -> Felt { + pub fn chiplet_kernel_addr(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 5)[i] } /// Returns the i-th row of the chiplet column containing the zeroth element of the kernel /// procedure root. - pub fn chiplet_kernel_root_0(&self, i: usize) -> Felt { + pub fn chiplet_kernel_root_0(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 6)[i] } /// Returns the i-th row of the chiplet column containing the first element of the kernel /// procedure root. - pub fn chiplet_kernel_root_1(&self, i: usize) -> Felt { + pub fn chiplet_kernel_root_1(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 7)[i] } /// Returns the i-th row of the chiplet column containing the second element of the kernel /// procedure root. - pub fn chiplet_kernel_root_2(&self, i: usize) -> Felt { + pub fn chiplet_kernel_root_2(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 8)[i] } /// Returns the i-th row of the chiplet column containing the third element of the kernel /// procedure root. - pub fn chiplet_kernel_root_3(&self, i: usize) -> Felt { + pub fn chiplet_kernel_root_3(&self, i: RowIndex) -> Felt { self.columns.get_column(CHIPLETS_OFFSET + 9)[i] } @@ -430,8 +437,8 @@ impl MainTrace { /// Returns `true` if the hasher chiplet flags indicate the initialization of verifying /// a Merkle path to an old node during Merkle root update procedure (MRUPDATE). - pub fn f_mv(&self, i: usize) -> bool { - (i % HASH_CYCLE_LEN == 0) + pub fn f_mv(&self, i: RowIndex) -> bool { + (usize::from(i) % HASH_CYCLE_LEN == 0) && self.chiplet_selector_0(i) == ZERO && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE @@ -440,8 +447,8 @@ impl MainTrace { /// Returns `true` if the hasher chiplet flags indicate the continuation of verifying /// a Merkle path to an old node during Merkle root update procedure (MRUPDATE). - pub fn f_mva(&self, i: usize) -> bool { - (i % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1) + pub fn f_mva(&self, i: RowIndex) -> bool { + (usize::from(i) % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1) && self.chiplet_selector_0(i) == ZERO && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE @@ -450,8 +457,8 @@ impl MainTrace { /// Returns `true` if the hasher chiplet flags indicate the initialization of verifying /// a Merkle path to a new node during Merkle root update procedure (MRUPDATE). - pub fn f_mu(&self, i: usize) -> bool { - (i % HASH_CYCLE_LEN == 0) + pub fn f_mu(&self, i: RowIndex) -> bool { + (usize::from(i) % HASH_CYCLE_LEN == 0) && self.chiplet_selector_0(i) == ZERO && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE @@ -460,8 +467,8 @@ impl MainTrace { /// Returns `true` if the hasher chiplet flags indicate the continuation of verifying /// a Merkle path to a new node during Merkle root update procedure (MRUPDATE). - pub fn f_mua(&self, i: usize) -> bool { - (i % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1) + pub fn f_mua(&self, i: RowIndex) -> bool { + (usize::from(i) % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1) && self.chiplet_selector_0(i) == ZERO && self.chiplet_selector_1(i) == ONE && self.chiplet_selector_2(i) == ONE diff --git a/air/src/trace/mod.rs b/air/src/trace/mod.rs index f61e1cae05..001a225511 100644 --- a/air/src/trace/mod.rs +++ b/air/src/trace/mod.rs @@ -5,6 +5,7 @@ pub mod chiplets; pub mod decoder; pub mod main_trace; pub mod range; +pub mod rows; pub mod stack; // CONSTANTS diff --git a/air/src/trace/rows.rs b/air/src/trace/rows.rs new file mode 100644 index 0000000000..831fa04f31 --- /dev/null +++ b/air/src/trace/rows.rs @@ -0,0 +1,301 @@ +use core::{ + fmt::{Display, Formatter}, + ops::{Add, AddAssign, Bound, Index, IndexMut, Mul, RangeBounds, Sub, SubAssign}, +}; + +use vm_core::Felt; + +/// Represents the types of errors that can occur when converting from and into [`RowIndex`] and +/// using its operations. +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum RowIndexError { + #[error("value is too large to be converted into RowIndex: {0}")] + InvalidSize(T), +} + +// ROW INDEX +// ================================================================================================ + +/// A newtype wrapper around a usize value representing a step in the execution trace. +#[derive(Debug, Copy, Clone, Eq, Ord, PartialOrd)] +pub struct RowIndex(u32); + +impl Display for RowIndex { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "{}", self.0) + } +} + +// FROM ROW INDEX +// ================================================================================================ + +impl From for u32 { + fn from(step: RowIndex) -> u32 { + step.0 + } +} + +impl From for u64 { + fn from(step: RowIndex) -> u64 { + step.0 as u64 + } +} + +impl From for usize { + fn from(step: RowIndex) -> usize { + step.0 as usize + } +} + +impl From for Felt { + fn from(step: RowIndex) -> Felt { + Felt::from(step.0) + } +} + +// INTO ROW INDEX +// ================================================================================================ + +/// Converts a usize value into a [`RowIndex`]. +/// +/// # Panics +/// +/// This function will panic if the number represented by the usize is greater than the maximum +/// [`RowIndex`] value, `u32::MAX`. +impl From for RowIndex { + fn from(value: usize) -> Self { + let value = u32::try_from(value).map_err(|_| RowIndexError::InvalidSize(value)).unwrap(); + value.into() + } +} + +/// Converts a u64 value into a [`RowIndex`]. +/// +/// # Panics +/// +/// This function will panic if the number represented by the u64 is greater than the maximum +/// [`RowIndex`] value, `u32::MAX`. +impl TryFrom for RowIndex { + type Error = RowIndexError; + + fn try_from(value: u64) -> Result { + let value = u32::try_from(value).map_err(|_| RowIndexError::InvalidSize(value))?; + Ok(RowIndex::from(value)) + } +} + +impl From for RowIndex { + fn from(value: u32) -> Self { + Self(value) + } +} + +/// Converts an i32 value into a [`RowIndex`]. +/// +/// # Panics +/// +/// This function will panic if the number represented by the i32 is less than 0. +impl From for RowIndex { + fn from(value: i32) -> Self { + let value = u32::try_from(value).map_err(|_| RowIndexError::InvalidSize(value)).unwrap(); + RowIndex(value) + } +} + +// ROW INDEX OPS +// ================================================================================================ + +/// Subtracts a usize from a [`RowIndex`]. +/// +/// # Panics +/// +/// This function will panic if the number represented by the usize is greater than the maximum +/// [`RowIndex`] value, `u32::MAX`. +impl Sub for RowIndex { + type Output = RowIndex; + + fn sub(self, rhs: usize) -> Self::Output { + let rhs = u32::try_from(rhs).map_err(|_| RowIndexError::InvalidSize(rhs)).unwrap(); + RowIndex(self.0 - rhs) + } +} + +impl SubAssign for RowIndex { + fn sub_assign(&mut self, rhs: u32) { + self.0 -= rhs; + } +} + +impl RowIndex { + pub fn saturating_sub(self, rhs: u32) -> Self { + RowIndex(self.0.saturating_sub(rhs)) + } + + pub fn max(self, other: u32) -> Self { + RowIndex(self.0.max(other)) + } +} + +/// Adds a usize to a [`RowIndex`]. +/// +/// # Panics +/// +/// This function will panic if the number represented by the usize is greater than the maximum +/// [`RowIndex`] value, `u32::MAX`. +impl Add for RowIndex { + type Output = RowIndex; + + fn add(self, rhs: usize) -> Self::Output { + let rhs = u32::try_from(rhs).map_err(|_| RowIndexError::InvalidSize(rhs)).unwrap(); + RowIndex(self.0 + rhs) + } +} + +impl Add for u32 { + type Output = RowIndex; + + fn add(self, rhs: RowIndex) -> Self::Output { + RowIndex(self + rhs.0) + } +} + +/// Adds a usize value to a RowIndex in place. +/// +/// # Panics +/// +/// This function will panic if the number represented by the usize is greater than the maximum +/// [`RowIndex`] value, `u32::MAX`. +impl AddAssign for RowIndex { + fn add_assign(&mut self, rhs: usize) { + let rhs: RowIndex = rhs.into(); + self.0 += rhs.0; + } +} + +impl Mul for usize { + type Output = RowIndex; + + fn mul(self, rhs: RowIndex) -> Self::Output { + (self * rhs.0 as usize).into() + } +} + +// ROW INDEX EQUALITY AND ORDERING +// ================================================================================================ + +impl PartialEq for RowIndex { + fn eq(&self, rhs: &RowIndex) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq for RowIndex { + fn eq(&self, rhs: &usize) -> bool { + self.0 == u32::try_from(*rhs).map_err(|_| RowIndexError::InvalidSize(*rhs)).unwrap() + } +} + +impl PartialEq for i32 { + fn eq(&self, rhs: &RowIndex) -> bool { + *self as u32 == u32::from(*rhs) + } +} + +impl PartialOrd for RowIndex { + fn partial_cmp(&self, rhs: &usize) -> Option { + let rhs = u32::try_from(*rhs).map_err(|_| RowIndexError::InvalidSize(*rhs)).unwrap(); + self.0.partial_cmp(&rhs) + } +} + +impl Index for [T] { + type Output = T; + fn index(&self, i: RowIndex) -> &Self::Output { + &self[i.0 as usize] + } +} + +impl IndexMut for [T] { + fn index_mut(&mut self, i: RowIndex) -> &mut Self::Output { + &mut self[i.0 as usize] + } +} + +impl RangeBounds for RowIndex { + fn start_bound(&self) -> Bound<&Self> { + Bound::Included(self) + } + fn end_bound(&self) -> Bound<&Self> { + Bound::Included(self) + } +} + +// TESTS +// ================================================================================================ +#[cfg(test)] +mod tests { + use alloc::collections::BTreeMap; + + #[test] + fn row_index_conversions() { + use super::RowIndex; + // Into + let _: RowIndex = 5.into(); + let _: RowIndex = 5u32.into(); + let _: RowIndex = (5usize).into(); + + // From + let _: u32 = RowIndex(5).into(); + let _: u64 = RowIndex(5).into(); + let _: usize = RowIndex(5).into(); + } + + #[test] + fn row_index_ops() { + use super::RowIndex; + + // Equality + assert_eq!(RowIndex(5), 5); + assert_eq!(RowIndex(5), RowIndex(5)); + assert!(RowIndex(5) == RowIndex(5)); + assert!(RowIndex(5) >= RowIndex(5)); + assert!(RowIndex(6) >= RowIndex(5)); + assert!(RowIndex(5) > RowIndex(4)); + assert!(RowIndex(5) <= RowIndex(5)); + assert!(RowIndex(4) <= RowIndex(5)); + assert!(RowIndex(5) < RowIndex(6)); + + // Arithmetic + assert_eq!(RowIndex(5) + 3, 8); + assert_eq!(RowIndex(5) - 3, 2); + assert_eq!(3 + RowIndex(5), 8); + assert_eq!(2 * RowIndex(5), 10); + + // Add assign + let mut step = RowIndex(5); + step += 5; + assert_eq!(step, 10); + } + + #[test] + fn row_index_range() { + use super::RowIndex; + let mut tree: BTreeMap = BTreeMap::new(); + tree.insert(RowIndex(0), 0); + tree.insert(RowIndex(1), 1); + tree.insert(RowIndex(2), 2); + let acc = + tree.range(RowIndex::from(0)..RowIndex::from(tree.len())) + .fold(0, |acc, (key, val)| { + assert_eq!(*key, RowIndex::from(acc)); + assert_eq!(*val, acc); + acc + 1 + }); + assert_eq!(acc, 3); + } + + #[test] + fn row_index_display() { + assert_eq!(format!("{}", super::RowIndex(5)), "5"); + } +} diff --git a/air/src/utils.rs b/air/src/utils.rs index 70a29a9d38..53506af1e3 100644 --- a/air/src/utils.rs +++ b/air/src/utils.rs @@ -1,6 +1,7 @@ -use alloc::vec::Vec; use core::ops::Range; +use alloc::vec::Vec; + use super::FieldElement; use vm_core::utils::range as create_range; diff --git a/miden/Cargo.toml b/miden/Cargo.toml index 82256a1a79..be07ffb21f 100644 --- a/miden/Cargo.toml +++ b/miden/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "miden-vm" version = "0.10.0" -description="Miden virtual machine" +description = "Miden virtual machine" documentation = "https://docs.rs/miden-vm/0.10.0" categories = ["cryptography", "emulators", "no-std"] keywords = ["miden", "stark", "virtual-machine", "zkp"] diff --git a/miden/tests/integration/exec_iters.rs b/miden/tests/integration/exec_iters.rs index 624025301c..142d47d9a8 100644 --- a/miden/tests/integration/exec_iters.rs +++ b/miden/tests/integration/exec_iters.rs @@ -1,3 +1,4 @@ +use processor::RowIndex; use processor::{AsmOpInfo, ContextId, VmState}; use test_utils::{assert_eq, build_debug_test, Felt, ToElements, ONE}; use vm_core::{debuginfo::Location, AssemblyOp, Operation}; @@ -39,7 +40,7 @@ fn test_exec_iter() { }); let expected_states = vec![ VmState { - clk: 0, + clk: RowIndex::from(0), ctx: ContextId::root(), op: None, asmop: None, @@ -48,7 +49,7 @@ fn test_exec_iter() { memory: Vec::new(), }, VmState { - clk: 1, + clk: RowIndex::from(1), ctx: ContextId::root(), op: Some(Operation::Join), asmop: None, @@ -57,7 +58,7 @@ fn test_exec_iter() { memory: Vec::new(), }, VmState { - clk: 2, + clk: RowIndex::from(2), ctx: ContextId::root(), op: Some(Operation::Span), asmop: None, @@ -66,7 +67,7 @@ fn test_exec_iter() { memory: Vec::new(), }, VmState { - clk: 3, + clk: RowIndex::from(3), ctx: ContextId::root(), op: Some(Operation::Pad), asmop: Some(AsmOpInfo::new( @@ -84,7 +85,7 @@ fn test_exec_iter() { memory: Vec::new(), }, VmState { - clk: 4, + clk: RowIndex::from(4), ctx: ContextId::root(), op: Some(Operation::Incr), asmop: Some(AsmOpInfo::new( @@ -102,7 +103,7 @@ fn test_exec_iter() { memory: Vec::new(), }, VmState { - clk: 5, + clk: RowIndex::from(5), ctx: ContextId::root(), op: Some(Operation::MStoreW), asmop: Some(AsmOpInfo::new( @@ -120,7 +121,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 6, + clk: RowIndex::from(6), ctx: ContextId::root(), op: Some(Operation::Drop), asmop: Some(AsmOpInfo::new( @@ -138,7 +139,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 7, + clk: RowIndex::from(7), ctx: ContextId::root(), op: Some(Operation::Drop), asmop: Some(AsmOpInfo::new( @@ -156,7 +157,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 8, + clk: RowIndex::from(8), ctx: ContextId::root(), op: Some(Operation::Drop), asmop: Some(AsmOpInfo::new( @@ -174,7 +175,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 9, + clk: RowIndex::from(9), ctx: ContextId::root(), op: Some(Operation::Drop), asmop: Some(AsmOpInfo::new( @@ -192,7 +193,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 10, + clk: RowIndex::from(10), ctx: ContextId::root(), op: Some(Operation::Push(Felt::new(17))), asmop: Some(AsmOpInfo::new( @@ -210,7 +211,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 11, + clk: RowIndex::from(11), ctx: ContextId::root(), op: Some(Operation::Noop), asmop: None, @@ -219,7 +220,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 12, + clk: RowIndex::from(12), ctx: ContextId::root(), op: Some(Operation::End), asmop: None, @@ -228,7 +229,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 13, + clk: RowIndex::from(13), ctx: ContextId::root(), op: Some(Operation::Span), asmop: None, @@ -237,7 +238,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 14, + clk: RowIndex::from(14), ctx: ContextId::root(), op: Some(Operation::Push(ONE)), asmop: None, @@ -246,7 +247,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 15, + clk: RowIndex::from(15), ctx: ContextId::root(), op: Some(Operation::FmpUpdate), asmop: None, @@ -255,7 +256,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 16, + clk: RowIndex::from(16), ctx: ContextId::root(), op: Some(Operation::Pad), asmop: Some(AsmOpInfo::new( @@ -273,7 +274,7 @@ fn test_exec_iter() { memory: mem.clone(), }, VmState { - clk: 17, + clk: RowIndex::from(17), ctx: ContextId::root(), op: Some(Operation::FmpAdd), asmop: Some(AsmOpInfo::new( @@ -292,7 +293,7 @@ fn test_exec_iter() { memory: mem, }, VmState { - clk: 18, + clk: RowIndex::from(18), ctx: ContextId::root(), op: Some(Operation::MStore), asmop: Some(AsmOpInfo::new( @@ -313,7 +314,7 @@ fn test_exec_iter() { ], }, VmState { - clk: 19, + clk: RowIndex::from(19), ctx: ContextId::root(), op: Some(Operation::Drop), asmop: Some(AsmOpInfo::new( @@ -334,7 +335,7 @@ fn test_exec_iter() { ], }, VmState { - clk: 20, + clk: RowIndex::from(20), ctx: ContextId::root(), op: Some(Operation::Push(Felt::new(18446744069414584320))), asmop: None, @@ -347,7 +348,7 @@ fn test_exec_iter() { ], }, VmState { - clk: 21, + clk: RowIndex::from(21), ctx: ContextId::root(), op: Some(Operation::FmpUpdate), asmop: None, @@ -359,7 +360,7 @@ fn test_exec_iter() { ], }, VmState { - clk: 22, + clk: RowIndex::from(22), ctx: ContextId::root(), op: Some(Operation::Noop), asmop: None, @@ -371,7 +372,7 @@ fn test_exec_iter() { ], }, VmState { - clk: 23, + clk: RowIndex::from(23), ctx: ContextId::root(), op: Some(Operation::End), asmop: None, @@ -383,7 +384,7 @@ fn test_exec_iter() { ], }, VmState { - clk: 24, + clk: RowIndex::from(24), ctx: ContextId::root(), op: Some(Operation::End), asmop: None, diff --git a/miden/tests/integration/operations/decorators/asmop.rs b/miden/tests/integration/operations/decorators/asmop.rs index 0270427f86..df6bae7991 100644 --- a/miden/tests/integration/operations/decorators/asmop.rs +++ b/miden/tests/integration/operations/decorators/asmop.rs @@ -1,3 +1,4 @@ +use processor::RowIndex; use processor::{AsmOpInfo, VmStateIterator}; use test_utils::{assert_eq, build_debug_test}; use vm_core::{debuginfo::Location, AssemblyOp, Felt, Operation}; @@ -25,17 +26,17 @@ fn asmop_one_span_block_test() { let vm_state_iterator = test.execute_iter(); let expected_vm_state = vec![ VmStatePartial { - clk: 0, + clk: RowIndex::from(0), asmop: None, op: None, }, VmStatePartial { - clk: 1, + clk: RowIndex::from(1), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 2, + clk: RowIndex::from(2), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -49,7 +50,7 @@ fn asmop_one_span_block_test() { op: Some(Operation::Pad), }, VmStatePartial { - clk: 3, + clk: RowIndex::from(3), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc, @@ -63,7 +64,7 @@ fn asmop_one_span_block_test() { op: Some(Operation::Incr), }, VmStatePartial { - clk: 4, + clk: RowIndex::from(4), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push2_loc, @@ -77,7 +78,7 @@ fn asmop_one_span_block_test() { op: Some(Operation::Push(Felt::new(2))), }, VmStatePartial { - clk: 5, + clk: RowIndex::from(5), asmop: Some(AsmOpInfo::new( AssemblyOp::new(add_loc, "#exec::#main".to_string(), 1, "add".to_string(), false), 1, @@ -85,7 +86,7 @@ fn asmop_one_span_block_test() { op: Some(Operation::Add), }, VmStatePartial { - clk: 6, + clk: RowIndex::from(6), asmop: None, op: Some(Operation::End), }, @@ -117,17 +118,17 @@ fn asmop_with_one_procedure() { let vm_state_iterator = test.execute_iter(); let expected_vm_state = vec![ VmStatePartial { - clk: 0, + clk: RowIndex::from(0), asmop: None, op: None, }, VmStatePartial { - clk: 1, + clk: RowIndex::from(1), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 2, + clk: RowIndex::from(2), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -141,7 +142,7 @@ fn asmop_with_one_procedure() { op: Some(Operation::Pad), }, VmStatePartial { - clk: 3, + clk: RowIndex::from(3), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc, @@ -155,7 +156,7 @@ fn asmop_with_one_procedure() { op: Some(Operation::Incr), }, VmStatePartial { - clk: 4, + clk: RowIndex::from(4), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push2_loc, @@ -169,7 +170,7 @@ fn asmop_with_one_procedure() { op: Some(Operation::Push(Felt::new(2))), }, VmStatePartial { - clk: 5, + clk: RowIndex::from(5), asmop: Some(AsmOpInfo::new( AssemblyOp::new(add_loc, "#exec::foo".to_string(), 1, "add".to_string(), false), 1, @@ -177,7 +178,7 @@ fn asmop_with_one_procedure() { op: Some(Operation::Add), }, VmStatePartial { - clk: 6, + clk: RowIndex::from(6), asmop: None, op: Some(Operation::End), }, @@ -213,27 +214,27 @@ fn asmop_repeat_test() { let vm_state_iterator = test.execute_iter(); let expected_vm_state = vec![ VmStatePartial { - clk: 0, + clk: RowIndex::from(0), asmop: None, op: None, }, VmStatePartial { - clk: 1, + clk: RowIndex::from(1), asmop: None, op: Some(Operation::Join), }, VmStatePartial { - clk: 2, + clk: RowIndex::from(2), asmop: None, op: Some(Operation::Join), }, VmStatePartial { - clk: 3, + clk: RowIndex::from(3), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 4, + clk: RowIndex::from(4), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -247,7 +248,7 @@ fn asmop_repeat_test() { op: Some(Operation::Pad), }, VmStatePartial { - clk: 5, + clk: RowIndex::from(5), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -261,7 +262,7 @@ fn asmop_repeat_test() { op: Some(Operation::Incr), }, VmStatePartial { - clk: 6, + clk: RowIndex::from(6), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push2_loc.clone(), @@ -275,7 +276,7 @@ fn asmop_repeat_test() { op: Some(Operation::Push(Felt::new(2))), }, VmStatePartial { - clk: 7, + clk: RowIndex::from(7), asmop: Some(AsmOpInfo::new( AssemblyOp::new( add_loc.clone(), @@ -290,17 +291,17 @@ fn asmop_repeat_test() { }, // End first Span VmStatePartial { - clk: 8, + clk: RowIndex::from(8), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 9, + clk: RowIndex::from(9), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 10, + clk: RowIndex::from(10), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -314,7 +315,7 @@ fn asmop_repeat_test() { op: Some(Operation::Pad), }, VmStatePartial { - clk: 11, + clk: RowIndex::from(11), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -328,7 +329,7 @@ fn asmop_repeat_test() { op: Some(Operation::Incr), }, VmStatePartial { - clk: 12, + clk: RowIndex::from(12), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push2_loc.clone(), @@ -342,7 +343,7 @@ fn asmop_repeat_test() { op: Some(Operation::Push(Felt::new(2))), }, VmStatePartial { - clk: 13, + clk: RowIndex::from(13), asmop: Some(AsmOpInfo::new( AssemblyOp::new( add_loc.clone(), @@ -357,23 +358,23 @@ fn asmop_repeat_test() { }, // End second Span VmStatePartial { - clk: 14, + clk: RowIndex::from(14), asmop: None, op: Some(Operation::End), }, // End first Join VmStatePartial { - clk: 15, + clk: RowIndex::from(15), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 16, + clk: RowIndex::from(16), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 17, + clk: RowIndex::from(17), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -387,7 +388,7 @@ fn asmop_repeat_test() { op: Some(Operation::Pad), }, VmStatePartial { - clk: 18, + clk: RowIndex::from(18), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc, @@ -401,7 +402,7 @@ fn asmop_repeat_test() { op: Some(Operation::Incr), }, VmStatePartial { - clk: 19, + clk: RowIndex::from(19), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push2_loc, @@ -415,7 +416,7 @@ fn asmop_repeat_test() { op: Some(Operation::Push(Felt::new(2))), }, VmStatePartial { - clk: 20, + clk: RowIndex::from(20), asmop: Some(AsmOpInfo::new( AssemblyOp::new(add_loc, "#exec::#main".to_string(), 1, "add".to_string(), false), 1, @@ -424,13 +425,13 @@ fn asmop_repeat_test() { }, // End Span VmStatePartial { - clk: 21, + clk: RowIndex::from(21), asmop: None, op: Some(Operation::End), }, // End second Join VmStatePartial { - clk: 22, + clk: RowIndex::from(22), asmop: None, op: Some(Operation::End), }, @@ -476,22 +477,22 @@ fn asmop_conditional_execution_test() { let vm_state_iterator = test.execute_iter(); let expected_vm_state = vec![ VmStatePartial { - clk: 0, + clk: RowIndex::from(0), asmop: None, op: None, }, VmStatePartial { - clk: 1, + clk: RowIndex::from(1), asmop: None, op: Some(Operation::Join), }, VmStatePartial { - clk: 2, + clk: RowIndex::from(2), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 3, + clk: RowIndex::from(3), asmop: Some(AsmOpInfo::new( AssemblyOp::new(eq_loc, "#exec::#main".to_string(), 1, "eq".to_string(), false), 1, @@ -499,22 +500,22 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Eq), }, VmStatePartial { - clk: 4, + clk: RowIndex::from(4), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 5, + clk: RowIndex::from(5), asmop: None, op: Some(Operation::Split), }, VmStatePartial { - clk: 6, + clk: RowIndex::from(6), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 7, + clk: RowIndex::from(7), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc.clone(), @@ -528,7 +529,7 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Pad), }, VmStatePartial { - clk: 8, + clk: RowIndex::from(8), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push1_loc, @@ -542,7 +543,7 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Incr), }, VmStatePartial { - clk: 9, + clk: RowIndex::from(9), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push2_loc, @@ -556,7 +557,7 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Push(Felt::new(2))), }, VmStatePartial { - clk: 10, + clk: RowIndex::from(10), asmop: Some(AsmOpInfo::new( AssemblyOp::new(add_loc, "#exec::#main".to_string(), 1, "add".to_string(), false), 1, @@ -564,17 +565,17 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Add), }, VmStatePartial { - clk: 11, + clk: RowIndex::from(11), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 12, + clk: RowIndex::from(12), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 13, + clk: RowIndex::from(13), asmop: None, op: Some(Operation::End), }, @@ -608,22 +609,22 @@ fn asmop_conditional_execution_test() { let vm_state_iterator = test.execute_iter(); let expected_vm_state = vec![ VmStatePartial { - clk: 0, + clk: RowIndex::from(0), asmop: None, op: None, }, VmStatePartial { - clk: 1, + clk: RowIndex::from(1), asmop: None, op: Some(Operation::Join), }, VmStatePartial { - clk: 2, + clk: RowIndex::from(2), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 3, + clk: RowIndex::from(3), asmop: Some(AsmOpInfo::new( AssemblyOp::new(eq_loc, "#exec::#main".to_string(), 1, "eq".to_string(), false), 1, @@ -631,22 +632,22 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Eq), }, VmStatePartial { - clk: 4, + clk: RowIndex::from(4), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 5, + clk: RowIndex::from(5), asmop: None, op: Some(Operation::Split), }, VmStatePartial { - clk: 6, + clk: RowIndex::from(6), asmop: None, op: Some(Operation::Span), }, VmStatePartial { - clk: 7, + clk: RowIndex::from(7), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push3_loc, @@ -660,7 +661,7 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Push(Felt::new(3))), }, VmStatePartial { - clk: 8, + clk: RowIndex::from(8), asmop: Some(AsmOpInfo::new( AssemblyOp::new( push4_loc, @@ -674,7 +675,7 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Push(Felt::new(4))), }, VmStatePartial { - clk: 9, + clk: RowIndex::from(9), asmop: Some(AsmOpInfo::new( AssemblyOp::new(add_loc, "#exec::#main".to_string(), 1, "add".to_string(), false), 1, @@ -682,22 +683,22 @@ fn asmop_conditional_execution_test() { op: Some(Operation::Add), }, VmStatePartial { - clk: 10, + clk: RowIndex::from(10), asmop: None, op: Some(Operation::Noop), }, VmStatePartial { - clk: 11, + clk: RowIndex::from(11), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 12, + clk: RowIndex::from(12), asmop: None, op: Some(Operation::End), }, VmStatePartial { - clk: 13, + clk: RowIndex::from(13), asmop: None, op: Some(Operation::End), }, @@ -726,7 +727,7 @@ fn build_vm_state(vm_state_iterator: VmStateIterator) -> Vec { /// * op: Operation executed at the specific clock cycle #[derive(Clone, Debug, Eq, PartialEq)] struct VmStatePartial { - clk: u32, + clk: RowIndex, asmop: Option, op: Option, } diff --git a/miden/tests/integration/operations/field_ops.rs b/miden/tests/integration/operations/field_ops.rs index 13d3c54998..9070d72516 100644 --- a/miden/tests/integration/operations/field_ops.rs +++ b/miden/tests/integration/operations/field_ops.rs @@ -212,7 +212,7 @@ fn div_fail() { // --- test divide by zero -------------------------------------------------------------------- let test = build_op_test!(asm_op, &[1, 0]); - expect_exec_error!(test, ExecutionError::DivideByZero(1)); + expect_exec_error!(test, ExecutionError::DivideByZero(1.into())); } #[test] @@ -277,7 +277,7 @@ fn inv_fail() { // --- test no inv on 0 ----------------------------------------------------------------------- let test = build_op_test!(asm_op, &[0]); - expect_exec_error!(test, ExecutionError::DivideByZero(1)); + expect_exec_error!(test, ExecutionError::DivideByZero(1.into())); let asm_op = "inv.1"; @@ -318,7 +318,7 @@ fn pow2_fail() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 16, + clk: 16.into(), err_code: 0, err_msg: None, } @@ -353,7 +353,7 @@ fn exp_bits_length_fail() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 18, + clk: 18.into(), err_code: 0, err_msg: None } @@ -402,7 +402,7 @@ fn ilog2_fail() { let asm_op = "ilog2"; let test = build_op_test!(asm_op, &[0]); - expect_exec_error!(test, ExecutionError::LogArgumentZero(1)); + expect_exec_error!(test, ExecutionError::LogArgumentZero(1.into())); } // FIELD OPS BOOLEAN - MANUAL TESTS diff --git a/miden/tests/integration/operations/io_ops/adv_ops.rs b/miden/tests/integration/operations/io_ops/adv_ops.rs index 0f5f6bfc97..c3dac567aa 100644 --- a/miden/tests/integration/operations/io_ops/adv_ops.rs +++ b/miden/tests/integration/operations/io_ops/adv_ops.rs @@ -32,7 +32,7 @@ fn adv_push() { fn adv_push_invalid() { // attempting to read from empty advice stack should throw an error let test = build_op_test!("adv_push.1"); - expect_exec_error!(test, ExecutionError::AdviceStackReadFailed(1)); + expect_exec_error!(test, ExecutionError::AdviceStackReadFailed(1.into())); } // OVERWRITING VALUES ON THE STACK (LOAD) @@ -53,7 +53,7 @@ fn adv_loadw() { fn adv_loadw_invalid() { // attempting to read from empty advice stack should throw an error let test = build_op_test!("adv_loadw", &[0, 0, 0, 0]); - expect_exec_error!(test, AdviceStackReadFailed(1)); + expect_exec_error!(test, AdviceStackReadFailed(1.into())); } // MOVING ELEMENTS TO MEMORY VIA THE STACK (PIPE) diff --git a/miden/tests/integration/operations/sys_ops.rs b/miden/tests/integration/operations/sys_ops.rs index 5457a630f2..366173d0fb 100644 --- a/miden/tests/integration/operations/sys_ops.rs +++ b/miden/tests/integration/operations/sys_ops.rs @@ -24,7 +24,7 @@ fn assert_with_code() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 1, + clk: 1.into(), err_code: 123, err_msg: None, } @@ -39,7 +39,7 @@ fn assert_fail() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 1, + clk: 1.into(), err_code: 0, err_msg: None, } @@ -65,7 +65,7 @@ fn assert_eq_fail() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 2, + clk: 2.into(), err_code: 0, err_msg: None, } @@ -75,7 +75,7 @@ fn assert_eq_fail() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 2, + clk: 2.into(), err_code: 0, err_msg: None, } diff --git a/miden/tests/integration/operations/u32_ops/arithmetic_ops.rs b/miden/tests/integration/operations/u32_ops/arithmetic_ops.rs index 495d61a8bb..80c4fad282 100644 --- a/miden/tests/integration/operations/u32_ops/arithmetic_ops.rs +++ b/miden/tests/integration/operations/u32_ops/arithmetic_ops.rs @@ -470,7 +470,7 @@ fn u32div_fail() { // should fail if b == 0. let test = build_op_test!(asm_op, &[1, 0]); - expect_exec_error!(test, ExecutionError::DivideByZero(1)); + expect_exec_error!(test, ExecutionError::DivideByZero(1.into())); } #[test] @@ -511,7 +511,7 @@ fn u32mod_fail() { // should fail if b == 0 let test = build_op_test!(asm_op, &[1, 0]); - expect_exec_error!(test, ExecutionError::DivideByZero(1)); + expect_exec_error!(test, ExecutionError::DivideByZero(1.into())); } #[test] @@ -557,7 +557,7 @@ fn u32divmod_fail() { // should fail if b == 0. let test = build_op_test!(asm_op, &[1, 0]); - expect_exec_error!(test, ExecutionError::DivideByZero(1)); + expect_exec_error!(test, ExecutionError::DivideByZero(1.into())); } // U32 OPERATIONS TESTS - RANDOMIZED - ARITHMETIC OPERATIONS diff --git a/processor/src/chiplets/aux_trace/mod.rs b/processor/src/chiplets/aux_trace/mod.rs index fece6d7be2..3cb5a3c846 100644 --- a/processor/src/chiplets/aux_trace/mod.rs +++ b/processor/src/chiplets/aux_trace/mod.rs @@ -1,16 +1,19 @@ use alloc::vec::Vec; -use miden_air::trace::{ - chiplets::{ - bitwise::OP_CYCLE_LEN as BITWISE_OP_CYCLE_LEN, - hasher::{ - CAPACITY_LEN, DIGEST_RANGE, HASH_CYCLE_LEN, LINEAR_HASH_LABEL, MP_VERIFY_LABEL, - MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD_LABEL, NUM_ROUNDS, RETURN_HASH_LABEL, - RETURN_STATE_LABEL, STATE_WIDTH, +use miden_air::{ + trace::{ + chiplets::{ + bitwise::OP_CYCLE_LEN as BITWISE_OP_CYCLE_LEN, + hasher::{ + CAPACITY_LEN, DIGEST_RANGE, HASH_CYCLE_LEN, LINEAR_HASH_LABEL, MP_VERIFY_LABEL, + MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD_LABEL, NUM_ROUNDS, RETURN_HASH_LABEL, + RETURN_STATE_LABEL, STATE_WIDTH, + }, + kernel_rom::KERNEL_PROC_LABEL, + memory::{MEMORY_READ_LABEL, MEMORY_WRITE_LABEL}, }, - kernel_rom::KERNEL_PROC_LABEL, - memory::{MEMORY_READ_LABEL, MEMORY_WRITE_LABEL}, + main_trace::MainTrace, }, - main_trace::MainTrace, + RowIndex, }; use vm_core::{ Word, ONE, OPCODE_CALL, OPCODE_DYN, OPCODE_END, OPCODE_HPERM, OPCODE_JOIN, OPCODE_LOOP, @@ -62,11 +65,11 @@ impl AuxTraceBuilder { pub struct ChipletsVTableColBuilder {} impl> AuxColumnBuilder for ChipletsVTableColBuilder { - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: usize) -> E { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E { chiplets_vtable_remove_sibling(main_trace, alphas, row) } - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: usize) -> E { + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E { chiplets_vtable_add_sibling(main_trace, alphas, row) * build_kernel_procedure_table_inclusions(main_trace, alphas, row) } @@ -77,7 +80,7 @@ impl> AuxColumnBuilder for ChipletsVTableCo /// Constructs the removals from the table when the hasher absorbs a new sibling node while /// computing the new Merkle root. -fn chiplets_vtable_remove_sibling(main_trace: &MainTrace, alphas: &[E], row: usize) -> E +fn chiplets_vtable_remove_sibling(main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E where E: FieldElement, { @@ -118,7 +121,7 @@ where /// Constructs the inclusions to the table when the hasher absorbs a new sibling node while /// computing the old Merkle root. -fn chiplets_vtable_add_sibling(main_trace: &MainTrace, alphas: &[E], row: usize) -> E +fn chiplets_vtable_add_sibling(main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E where E: FieldElement, { @@ -155,7 +158,11 @@ where } /// Builds the inclusions to the kernel procedure table at `row`. -fn build_kernel_procedure_table_inclusions(main_trace: &MainTrace, alphas: &[E], row: usize) -> E +fn build_kernel_procedure_table_inclusions( + main_trace: &MainTrace, + alphas: &[E], + row: RowIndex, +) -> E where E: FieldElement, { @@ -190,7 +197,7 @@ pub struct BusColumnBuilder {} impl> AuxColumnBuilder for BusColumnBuilder { /// Constructs the requests made by the VM-components to the chiplets at `row`. - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: usize) -> E + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E where E: FieldElement, { @@ -221,7 +228,7 @@ impl> AuxColumnBuilder for BusColumnBuilder } /// Constructs the responses from the chiplets to the other VM-components at `row`. - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: usize) -> E + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E where E: FieldElement, { @@ -247,7 +254,7 @@ fn build_control_block_request>( main_trace: &MainTrace, op_code_felt: Felt, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let op_label = LINEAR_HASH_LABEL; let addr_nxt = main_trace.addr(row + 1); @@ -267,7 +274,7 @@ fn build_syscall_block_request>( main_trace: &MainTrace, op_code_felt: Felt, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let factor1 = build_control_block_request(main_trace, op_code_felt, alphas, row); @@ -287,7 +294,7 @@ fn build_syscall_block_request>( fn build_span_block_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let op_label = LINEAR_HASH_LABEL; let addr_nxt = main_trace.addr(row + 1); @@ -306,7 +313,7 @@ fn build_span_block_request>( fn build_respan_block_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let op_label = LINEAR_HASH_LABEL; let addr_nxt = main_trace.addr(row + 1); @@ -329,7 +336,7 @@ fn build_respan_block_request>( fn build_end_block_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let op_label = RETURN_HASH_LABEL; let addr = main_trace.addr(row) + Felt::from(NUM_ROUNDS as u8); @@ -352,7 +359,7 @@ fn build_bitwise_request>( main_trace: &MainTrace, is_xor: Felt, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let op_label = get_op_label(ONE, ZERO, is_xor, ZERO); let a = main_trace.stack_element(1, row); @@ -371,7 +378,7 @@ fn build_mem_request_element>( main_trace: &MainTrace, op_label: u8, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let word = [ main_trace.stack_element(0, row + 1), @@ -389,7 +396,7 @@ fn build_mem_request_word>( main_trace: &MainTrace, op_label: u8, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let word = [ main_trace.stack_element(3, row + 1), @@ -406,7 +413,7 @@ fn build_mem_request_word>( fn build_mstream_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let word1 = [ main_trace.stack_element(7, row + 1), @@ -433,7 +440,7 @@ fn build_mstream_request>( fn build_rcomb_base_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let tz0 = main_trace.helper_register(0, row); let tz1 = main_trace.helper_register(1, row); @@ -457,7 +464,7 @@ fn build_rcomb_base_request>( fn build_hperm_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let helper_0 = main_trace.helper_register(0, row); @@ -532,7 +539,7 @@ fn build_hperm_request>( fn build_mpverify_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let helper_0 = main_trace.helper_register(0, row); @@ -594,7 +601,7 @@ fn build_mpverify_request>( fn build_mrupdate_request>( main_trace: &MainTrace, alphas: &[E], - row: usize, + row: RowIndex, ) -> E { let helper_0 = main_trace.helper_register(0, row); @@ -701,12 +708,7 @@ fn build_mrupdate_request>( // ================================================================================================ /// Builds the response from the hasher chiplet at `row`. -fn build_hasher_chiplet_responses( - main_trace: &MainTrace, - // TODO: change type of the `row` variable to `u32` - row: usize, - alphas: &[E], -) -> E +fn build_hasher_chiplet_responses(main_trace: &MainTrace, row: RowIndex, alphas: &[E]) -> E where E: FieldElement, { @@ -718,7 +720,7 @@ where let op_label = get_op_label(selector0, selector1, selector2, selector3); // f_bp, f_mp, f_mv or f_mu == 1 - if row % HASH_CYCLE_LEN == 0 { + if usize::from(row) % HASH_CYCLE_LEN == 0 { let state = main_trace.chiplet_hasher_state(row); let alphas_state = &alphas[NUM_HEADER_ALPHAS..(NUM_HEADER_ALPHAS + STATE_WIDTH)]; let node_index = main_trace.chiplet_node_index(row); @@ -729,7 +731,7 @@ where if selector1 == ONE && selector2 == ZERO && selector3 == ZERO { let header = alphas[0] + alphas[1].mul_base(transition_label) - + alphas[2].mul_base(Felt::from((row + 1) as u32)) + + alphas[2].mul_base(Felt::from(row + 1)) + alphas[3].mul_base(node_index); multiplicand = header + build_value(alphas_state, &state); @@ -740,7 +742,7 @@ where if selector1 == ONE && !(selector2 == ZERO && selector3 == ZERO) { let header = alphas[0] + alphas[1].mul_base(transition_label) - + alphas[2].mul_base(Felt::from((row + 1) as u32)) + + alphas[2].mul_base(Felt::from(row + 1)) + alphas[3].mul_base(node_index); let bit = (node_index.as_int() & 1) as u8; @@ -752,7 +754,7 @@ where } // f_hout, f_sout, f_abp == 1 - if row % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1 { + if usize::from(row) % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1 { let state = main_trace.chiplet_hasher_state(row); let alphas_state = &alphas[NUM_HEADER_ALPHAS..(NUM_HEADER_ALPHAS + STATE_WIDTH)]; let node_index = main_trace.chiplet_node_index(row); @@ -763,7 +765,7 @@ where if selector1 == ZERO && selector2 == ZERO && selector3 == ZERO { let header = alphas[0] + alphas[1].mul_base(transition_label) - + alphas[2].mul_base(Felt::from((row + 1) as u32)) + + alphas[2].mul_base(Felt::from(row + 1)) + alphas[3].mul_base(node_index); multiplicand = header + build_value(&alphas_state[DIGEST_RANGE], &state[DIGEST_RANGE]); @@ -774,7 +776,7 @@ where if selector1 == ZERO && selector2 == ZERO && selector3 == ONE { let header = alphas[0] + alphas[1].mul_base(transition_label) - + alphas[2].mul_base(Felt::from((row + 1) as u32)) + + alphas[2].mul_base(Felt::from(row + 1)) + alphas[3].mul_base(node_index); multiplicand = header + build_value(alphas_state, &state); @@ -785,7 +787,7 @@ where if selector1 == ONE && selector2 == ZERO && selector3 == ZERO { let header = alphas[0] + alphas[1].mul_base(transition_label) - + alphas[2].mul_base(Felt::from((row + 1) as u32)) + + alphas[2].mul_base(Felt::from(row + 1)) + alphas[3].mul_base(node_index); let state_nxt = main_trace.chiplet_hasher_state(row + 1); @@ -803,12 +805,12 @@ where } /// Builds the response from the bitwise chiplet at `row`. -fn build_bitwise_chiplet_responses(main_trace: &MainTrace, row: usize, alphas: &[E]) -> E +fn build_bitwise_chiplet_responses(main_trace: &MainTrace, row: RowIndex, alphas: &[E]) -> E where E: FieldElement, { let is_xor = main_trace.chiplet_selector_2(row); - if row % BITWISE_OP_CYCLE_LEN == BITWISE_OP_CYCLE_LEN - 1 { + if usize::from(row) % BITWISE_OP_CYCLE_LEN == BITWISE_OP_CYCLE_LEN - 1 { let op_label = get_op_label(ONE, ZERO, is_xor, ZERO); let a = main_trace.chiplet_bitwise_a(row); @@ -826,7 +828,7 @@ where } /// Builds the response from the memory chiplet at `row`. -fn build_memory_chiplet_responses(main_trace: &MainTrace, row: usize, alphas: &[E]) -> E +fn build_memory_chiplet_responses(main_trace: &MainTrace, row: RowIndex, alphas: &[E]) -> E where E: FieldElement, { @@ -853,7 +855,7 @@ where } /// Builds the response from the kernel chiplet at `row`. -fn build_kernel_chiplet_responses(main_trace: &MainTrace, row: usize, alphas: &[E]) -> E +fn build_kernel_chiplet_responses(main_trace: &MainTrace, row: RowIndex, alphas: &[E]) -> E where E: FieldElement, { @@ -915,7 +917,7 @@ fn compute_memory_request>( main_trace: &MainTrace, op_label: u8, alphas: &[E], - row: usize, + row: RowIndex, addr: Felt, value: Word, ) -> E { diff --git a/processor/src/chiplets/hasher/tests.rs b/processor/src/chiplets/hasher/tests.rs index 713816ebdb..6dd790110b 100644 --- a/processor/src/chiplets/hasher/tests.rs +++ b/processor/src/chiplets/hasher/tests.rs @@ -608,7 +608,6 @@ fn check_hasher_state_trace(trace: &[Vec], row_idx: usize, init_state: Has } } -/// Makes sure that the trace is copied correctly on memoization fn check_memoized_trace( trace: &[Vec], start_row: usize, diff --git a/processor/src/chiplets/kernel_rom/mod.rs b/processor/src/chiplets/kernel_rom/mod.rs index f52451ab46..1cd0fd9efb 100644 --- a/processor/src/chiplets/kernel_rom/mod.rs +++ b/processor/src/chiplets/kernel_rom/mod.rs @@ -1,6 +1,6 @@ use super::{Digest, ExecutionError, Felt, Kernel, TraceFragment, Word, ONE, ZERO}; use alloc::collections::BTreeMap; -use miden_air::trace::chiplets::kernel_rom::TRACE_WIDTH; +use miden_air::{trace::chiplets::kernel_rom::TRACE_WIDTH, RowIndex}; #[cfg(test)] mod tests; @@ -99,7 +99,7 @@ impl KernelRom { /// Populates the provided execution trace fragment with execution trace of this kernel ROM. pub fn fill_trace(self, trace: &mut TraceFragment) { debug_assert_eq!(TRACE_WIDTH, trace.width(), "inconsistent trace fragment width"); - let mut row = 0; + let mut row: RowIndex = 0.into(); for (idx, access_info) in self.access_map.values().enumerate() { let idx = Felt::from(idx as u16); @@ -145,7 +145,7 @@ impl ProcAccessInfo { } /// Writes a single row into the provided trace fragment for this procedure access entry. - pub fn write_into_trace(&self, trace: &mut TraceFragment, row: usize, idx: Felt) { + pub fn write_into_trace(&self, trace: &mut TraceFragment, row: RowIndex, idx: Felt) { let s0 = if self.num_accesses == 0 { ZERO } else { ONE }; trace.set(row, 0, s0); trace.set(row, 1, idx); diff --git a/processor/src/chiplets/memory/mod.rs b/processor/src/chiplets/memory/mod.rs index bfa49756c9..2b1c86b229 100644 --- a/processor/src/chiplets/memory/mod.rs +++ b/processor/src/chiplets/memory/mod.rs @@ -4,8 +4,11 @@ use super::{ }; use crate::system::ContextId; use alloc::{collections::BTreeMap, vec::Vec}; -use miden_air::trace::chiplets::memory::{ - ADDR_COL_IDX, CLK_COL_IDX, CTX_COL_IDX, D0_COL_IDX, D1_COL_IDX, D_INV_COL_IDX, V_COL_RANGE, +use miden_air::{ + trace::chiplets::memory::{ + ADDR_COL_IDX, CLK_COL_IDX, CTX_COL_IDX, D0_COL_IDX, D1_COL_IDX, D_INV_COL_IDX, V_COL_RANGE, + }, + RowIndex, }; mod segment; @@ -114,7 +117,7 @@ impl Memory { /// Returns the entire memory state for the specified execution context at the specified cycle. /// The state is returned as a vector of (address, value) tuples, and includes addresses which /// have been accessed at least once. - pub fn get_state_at(&self, ctx: ContextId, clk: u32) -> Vec<(u64, Word)> { + pub fn get_state_at(&self, ctx: ContextId, clk: RowIndex) -> Vec<(u64, Word)> { if clk == 0 { return vec![]; } @@ -132,13 +135,13 @@ impl Memory { /// /// If the specified address hasn't been previously written to, four ZERO elements are /// returned. This effectively implies that memory is initialized to ZERO. - pub fn read(&mut self, ctx: ContextId, addr: u32, clk: u32) -> Word { + pub fn read(&mut self, ctx: ContextId, addr: u32, clk: RowIndex) -> Word { self.num_trace_rows += 1; self.trace.entry(ctx).or_default().read(addr, Felt::from(clk)) } /// Writes the provided word at the specified context/address. - pub fn write(&mut self, ctx: ContextId, addr: u32, clk: u32, value: Word) { + pub fn write(&mut self, ctx: ContextId, addr: u32, clk: RowIndex, value: Word) { self.num_trace_rows += 1; self.trace.entry(ctx).or_default().write(addr, Felt::from(clk), value); } @@ -148,7 +151,7 @@ impl Memory { /// Adds all of the range checks required by the [Memory] chiplet to the provided /// [RangeChecker] chiplet instance, along with their row in the finalized execution trace. - pub fn append_range_checks(&self, memory_start_row: usize, range: &mut RangeChecker) { + pub fn append_range_checks(&self, memory_start_row: RowIndex, range: &mut RangeChecker) { // set the previous address and clock cycle to the first address and clock cycle of the // trace; we also adjust the clock cycle so that delta value for the first row would end // up being ZERO. if the trace is empty, return without any further processing. @@ -158,7 +161,7 @@ impl Memory { }; // op range check index - let mut row = memory_start_row as u32; + let mut row = memory_start_row; for (&ctx, segment) in self.trace.iter() { for (&addr, addr_trace) in segment.inner().iter() { @@ -171,7 +174,7 @@ impl Memory { let delta = if prev_ctx != ctx { (u32::from(ctx) - u32::from(prev_ctx)).into() } else if prev_addr != addr { - (addr - prev_addr) as u64 + u64::from(addr - prev_addr) } else { clk - prev_clk - 1 }; @@ -203,7 +206,7 @@ impl Memory { // iterate through addresses in ascending order, and write trace row for each memory access // into the trace. we expect the trace to be 14 columns wide. - let mut row = 0; + let mut row: RowIndex = 0.into(); for (ctx, segment) in self.trace { let ctx = Felt::from(ctx); diff --git a/processor/src/chiplets/memory/segment.rs b/processor/src/chiplets/memory/segment.rs index b3137d3185..b5d04225a3 100644 --- a/processor/src/chiplets/memory/segment.rs +++ b/processor/src/chiplets/memory/segment.rs @@ -1,6 +1,7 @@ use alloc::{collections::BTreeMap, vec::Vec}; -use miden_air::trace::chiplets::memory::{ - Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_WRITE, +use miden_air::{ + trace::chiplets::memory::{Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_WRITE}, + RowIndex, }; use super::{Felt, Word, INIT_MEM_VALUE}; @@ -33,7 +34,7 @@ impl MemorySegmentTrace { } /// Returns the entire memory state at the beginning of the specified cycle. - pub fn get_state_at(&self, clk: u32) -> Vec<(u64, Word)> { + pub fn get_state_at(&self, clk: RowIndex) -> Vec<(u64, Word)> { let mut result: Vec<(u64, Word)> = Vec::new(); if clk == 0 { @@ -43,7 +44,7 @@ impl MemorySegmentTrace { // since we record memory state at the end of a given cycle, to get memory state at the end // of a cycle, we need to look at the previous cycle. that is, memory state at the end of // the previous cycle is the same as memory state the the beginning of the current cycle. - let search_clk = (clk - 1) as u64; + let search_clk: u64 = (clk - 1).into(); for (&addr, addr_trace) in self.0.iter() { match addr_trace.binary_search_by(|access| access.clk().as_int().cmp(&search_clk)) { diff --git a/processor/src/chiplets/memory/tests.rs b/processor/src/chiplets/memory/tests.rs index a99d59bb5e..b0aa7f2b2a 100644 --- a/processor/src/chiplets/memory/tests.rs +++ b/processor/src/chiplets/memory/tests.rs @@ -4,8 +4,12 @@ use super::{ }; use crate::ContextId; use alloc::vec::Vec; -use miden_air::trace::chiplets::memory::{ - Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_WRITE, TRACE_WIDTH as MEMORY_TRACE_WIDTH, +use miden_air::{ + trace::chiplets::memory::{ + Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_WRITE, + TRACE_WIDTH as MEMORY_TRACE_WIDTH, + }, + RowIndex, }; use vm_core::Word; @@ -22,27 +26,27 @@ fn mem_read() { // read a value from address 0; clk = 1 let addr0 = 0; - let value = mem.read(ContextId::root(), addr0, 1); + let value = mem.read(ContextId::root(), addr0, 1.into()); assert_eq!(EMPTY_WORD, value); assert_eq!(1, mem.size()); assert_eq!(1, mem.trace_len()); // read a value from address 3; clk = 2 let addr3 = 3; - let value = mem.read(ContextId::root(), addr3, 2); + let value = mem.read(ContextId::root(), addr3, 2.into()); assert_eq!(EMPTY_WORD, value); assert_eq!(2, mem.size()); assert_eq!(2, mem.trace_len()); // read a value from address 0 again; clk = 3 - let value = mem.read(ContextId::root(), addr0, 3); + let value = mem.read(ContextId::root(), addr0, 3.into()); assert_eq!(EMPTY_WORD, value); assert_eq!(2, mem.size()); assert_eq!(3, mem.trace_len()); // read a value from address 2; clk = 4 let addr2 = 2; - let value = mem.read(ContextId::root(), addr2, 4); + let value = mem.read(ContextId::root(), addr2, 4.into()); assert_eq!(EMPTY_WORD, value); assert_eq!(3, mem.size()); assert_eq!(4, mem.trace_len()); @@ -53,18 +57,18 @@ fn mem_read() { // address 0 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = MemoryAccess::new(ContextId::root(), addr0, 1, EMPTY_WORD); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 1.into(), EMPTY_WORD); prev_row = verify_memory_access(&trace, 0, MEMORY_INIT_READ, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr0, 3, EMPTY_WORD); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 3.into(), EMPTY_WORD); prev_row = verify_memory_access(&trace, 1, MEMORY_COPY_READ, &memory_access, prev_row); // address 2 - let memory_access = MemoryAccess::new(ContextId::root(), addr2, 4, EMPTY_WORD); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 4.into(), EMPTY_WORD); prev_row = verify_memory_access(&trace, 2, MEMORY_INIT_READ, &memory_access, prev_row); // address 3 - let memory_access = MemoryAccess::new(ContextId::root(), addr3, 2, EMPTY_WORD); + let memory_access = MemoryAccess::new(ContextId::root(), addr3, 2.into(), EMPTY_WORD); verify_memory_access(&trace, 3, MEMORY_INIT_READ, &memory_access, prev_row); } @@ -75,7 +79,7 @@ fn mem_write() { // write a value into address 0; clk = 1 let addr0 = 0; let value1 = [ONE, ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr0, 1, value1); + mem.write(ContextId::root(), addr0, 1.into(), value1); assert_eq!(value1, mem.get_value(ContextId::root(), addr0).unwrap()); assert_eq!(1, mem.size()); assert_eq!(1, mem.trace_len()); @@ -83,7 +87,7 @@ fn mem_write() { // write a value into address 2; clk = 2 let addr2 = 2; let value5 = [Felt::new(5), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr2, 2, value5); + mem.write(ContextId::root(), addr2, 2.into(), value5); assert_eq!(value5, mem.get_value(ContextId::root(), addr2).unwrap()); assert_eq!(2, mem.size()); assert_eq!(2, mem.trace_len()); @@ -91,14 +95,14 @@ fn mem_write() { // write a value into address 1; clk = 3 let addr1 = 1; let value7 = [Felt::new(7), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr1, 3, value7); + mem.write(ContextId::root(), addr1, 3.into(), value7); assert_eq!(value7, mem.get_value(ContextId::root(), addr1).unwrap()); assert_eq!(3, mem.size()); assert_eq!(3, mem.trace_len()); // write a value into address 0; clk = 4 let value9 = [Felt::new(9), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr0, 4, value9); + mem.write(ContextId::root(), addr0, 4.into(), value9); assert_eq!(value7, mem.get_value(ContextId::root(), addr1).unwrap()); assert_eq!(3, mem.size()); assert_eq!(4, mem.trace_len()); @@ -109,18 +113,18 @@ fn mem_write() { // address 0 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = MemoryAccess::new(ContextId::root(), addr0, 1, value1); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 1.into(), value1); prev_row = verify_memory_access(&trace, 0, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr0, 4, value9); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 4.into(), value9); prev_row = verify_memory_access(&trace, 1, MEMORY_WRITE, &memory_access, prev_row); // address 1 - let memory_access = MemoryAccess::new(ContextId::root(), addr1, 3, value7); + let memory_access = MemoryAccess::new(ContextId::root(), addr1, 3.into(), value7); prev_row = verify_memory_access(&trace, 2, MEMORY_WRITE, &memory_access, prev_row); // address 2 - let memory_access = MemoryAccess::new(ContextId::root(), addr2, 2, value5); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 2.into(), value5); verify_memory_access(&trace, 3, MEMORY_WRITE, &memory_access, prev_row); } @@ -131,35 +135,35 @@ fn mem_write_read() { // write 1 into address 5; clk = 1 let addr5 = 5; let value1 = [ONE, ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr5, 1, value1); + mem.write(ContextId::root(), addr5, 1.into(), value1); // write 4 into address 2; clk = 2 let addr2 = 2; let value4 = [Felt::new(4), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr2, 2, value4); + mem.write(ContextId::root(), addr2, 2.into(), value4); // read a value from address 5; clk = 3 - mem.read(ContextId::root(), addr5, 3); + mem.read(ContextId::root(), addr5, 3.into()); // write 2 into address 5; clk = 4 let value2 = [Felt::new(2), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr5, 4, value2); + mem.write(ContextId::root(), addr5, 4.into(), value2); // read a value from address 2; clk = 5 - mem.read(ContextId::root(), addr2, 5); + mem.read(ContextId::root(), addr2, 5.into()); // write 7 into address 2; clk = 6 let value7 = [Felt::new(7), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), addr2, 6, value7); + mem.write(ContextId::root(), addr2, 6.into(), value7); // read a value from address 5; clk = 7 - mem.read(ContextId::root(), addr5, 7); + mem.read(ContextId::root(), addr5, 7.into()); // read a value from address 2; clk = 8 - mem.read(ContextId::root(), addr2, 8); + mem.read(ContextId::root(), addr2, 8.into()); // read a value from address 5; clk = 9 - mem.read(ContextId::root(), addr5, 9); + mem.read(ContextId::root(), addr5, 9.into()); // check generated trace and memory data provided to the ChipletsBus; rows should be sorted by // address and then clock cycle @@ -167,32 +171,32 @@ fn mem_write_read() { // address 2 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = MemoryAccess::new(ContextId::root(), addr2, 2, value4); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 2.into(), value4); prev_row = verify_memory_access(&trace, 0, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr2, 5, value4); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 5.into(), value4); prev_row = verify_memory_access(&trace, 1, MEMORY_COPY_READ, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr2, 6, value7); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 6.into(), value7); prev_row = verify_memory_access(&trace, 2, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr2, 8, value7); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 8.into(), value7); prev_row = verify_memory_access(&trace, 3, MEMORY_COPY_READ, &memory_access, prev_row); // address 5 - let memory_access = MemoryAccess::new(ContextId::root(), addr5, 1, value1); + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 1.into(), value1); prev_row = verify_memory_access(&trace, 4, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr5, 3, value1); + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 3.into(), value1); prev_row = verify_memory_access(&trace, 5, MEMORY_COPY_READ, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr5, 4, value2); + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 4.into(), value2); prev_row = verify_memory_access(&trace, 6, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr5, 7, value2); + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 7.into(), value2); prev_row = verify_memory_access(&trace, 7, MEMORY_COPY_READ, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), addr5, 9, value2); + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 9.into(), value2); verify_memory_access(&trace, 8, MEMORY_COPY_READ, &memory_access, prev_row); } @@ -202,33 +206,33 @@ fn mem_multi_context() { // write a value into ctx = ContextId::root(), addr = 0; clk = 1 let value1 = [ONE, ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), 0, 1, value1); + mem.write(ContextId::root(), 0, 1.into(), value1); assert_eq!(value1, mem.get_value(ContextId::root(), 0).unwrap()); assert_eq!(1, mem.size()); assert_eq!(1, mem.trace_len()); // write a value into ctx = 3, addr = 1; clk = 4 let value2 = [ZERO, ONE, ZERO, ZERO]; - mem.write(3.into(), 1, 4, value2); + mem.write(3.into(), 1, 4.into(), value2); assert_eq!(value2, mem.get_value(3.into(), 1).unwrap()); assert_eq!(2, mem.size()); assert_eq!(2, mem.trace_len()); // read a value from ctx = 3, addr = 1; clk = 6 - let value = mem.read(3.into(), 1, 6); + let value = mem.read(3.into(), 1, 6.into()); assert_eq!(value2, value); assert_eq!(2, mem.size()); assert_eq!(3, mem.trace_len()); // write a value into ctx = 3, addr = 0; clk = 7 let value3 = [ZERO, ZERO, ONE, ZERO]; - mem.write(3.into(), 0, 7, value3); + mem.write(3.into(), 0, 7.into(), value3); assert_eq!(value3, mem.get_value(3.into(), 0).unwrap()); assert_eq!(3, mem.size()); assert_eq!(4, mem.trace_len()); // read a value from ctx = 0, addr = 0; clk = 9 - let value = mem.read(ContextId::root(), 0, 9); + let value = mem.read(ContextId::root(), 0, 9.into()); assert_eq!(value1, value); assert_eq!(3, mem.size()); assert_eq!(5, mem.trace_len()); @@ -239,21 +243,21 @@ fn mem_multi_context() { // ctx = 0, addr = 0 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = MemoryAccess::new(ContextId::root(), 0, 1, value1); + let memory_access = MemoryAccess::new(ContextId::root(), 0, 1.into(), value1); prev_row = verify_memory_access(&trace, 0, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(ContextId::root(), 0, 9, value1); + let memory_access = MemoryAccess::new(ContextId::root(), 0, 9.into(), value1); prev_row = verify_memory_access(&trace, 1, MEMORY_COPY_READ, &memory_access, prev_row); // ctx = 3, addr = 0 - let memory_access = MemoryAccess::new(3.into(), 0, 7, value3); + let memory_access = MemoryAccess::new(3.into(), 0, 7.into(), value3); prev_row = verify_memory_access(&trace, 2, MEMORY_WRITE, &memory_access, prev_row); // ctx = 3, addr = 1 - let memory_access = MemoryAccess::new(3.into(), 1, 4, value2); + let memory_access = MemoryAccess::new(3.into(), 1, 4.into(), value2); prev_row = verify_memory_access(&trace, 3, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryAccess::new(3.into(), 1, 6, value2); + let memory_access = MemoryAccess::new(3.into(), 1, 6.into(), value2); verify_memory_access(&trace, 4, MEMORY_COPY_READ, &memory_access, prev_row); } @@ -264,33 +268,33 @@ fn mem_get_state_at() { // Write 1 into (ctx = 0, addr = 5) at clk = 1. // This means that mem[5] = 1 at the beginning of clk = 2 let value1 = [ONE, ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), 5, 1, value1); + mem.write(ContextId::root(), 5, 1.into(), value1); // Write 4 into (ctx = 0, addr = 2) at clk = 2. // This means that mem[2] = 4 at the beginning of clk = 3 let value4 = [Felt::new(4), ZERO, ZERO, ZERO]; - mem.write(ContextId::root(), 2, 2, value4); + mem.write(ContextId::root(), 2, 2.into(), value4); // write 7 into (ctx = 3, addr = 3) at clk = 4 // This means that mem[3] = 7 at the beginning of clk = 4 let value7 = [Felt::new(7), ZERO, ZERO, ZERO]; - mem.write(3.into(), 3, 4, value7); + mem.write(3.into(), 3, 4.into(), value7); // Check memory state at clk = 2 - assert_eq!(mem.get_state_at(ContextId::root(), 2), vec![(5, value1)]); - assert_eq!(mem.get_state_at(3.into(), 2), vec![]); + assert_eq!(mem.get_state_at(ContextId::root(), 2.into()), vec![(5, value1)]); + assert_eq!(mem.get_state_at(3.into(), 2.into()), vec![]); // Check memory state at clk = 3 - assert_eq!(mem.get_state_at(ContextId::root(), 3), vec![(2, value4), (5, value1)]); - assert_eq!(mem.get_state_at(3.into(), 3), vec![]); + assert_eq!(mem.get_state_at(ContextId::root(), 3.into()), vec![(2, value4), (5, value1)]); + assert_eq!(mem.get_state_at(3.into(), 3.into()), vec![]); // Check memory state at clk = 4 - assert_eq!(mem.get_state_at(ContextId::root(), 4), vec![(2, value4), (5, value1)]); - assert_eq!(mem.get_state_at(3.into(), 4), vec![]); + assert_eq!(mem.get_state_at(ContextId::root(), 4.into()), vec![(2, value4), (5, value1)]); + assert_eq!(mem.get_state_at(3.into(), 4.into()), vec![]); // Check memory state at clk = 5 - assert_eq!(mem.get_state_at(ContextId::root(), 5), vec![(2, value4), (5, value1)]); - assert_eq!(mem.get_state_at(3.into(), 5), vec![(3, value7)]); + assert_eq!(mem.get_state_at(ContextId::root(), 5.into()), vec![(2, value4), (5, value1)]); + assert_eq!(mem.get_state_at(3.into(), 5.into()), vec![(3, value7)]); } // HELPER STRUCT & FUNCTIONS @@ -305,7 +309,7 @@ pub struct MemoryAccess { } impl MemoryAccess { - pub fn new(ctx: ContextId, addr: u32, clk: u32, word: Word) -> Self { + pub fn new(ctx: ContextId, addr: u32, clk: RowIndex, word: Word) -> Self { Self { ctx, addr: Felt::from(addr), diff --git a/processor/src/chiplets/mod.rs b/processor/src/chiplets/mod.rs index dad7b0b04d..a298e1271e 100644 --- a/processor/src/chiplets/mod.rs +++ b/processor/src/chiplets/mod.rs @@ -5,7 +5,10 @@ use super::{ TraceFragment, Word, CHIPLETS_WIDTH, EMPTY_WORD, ONE, ZERO, }; use alloc::vec::Vec; -use miden_air::trace::chiplets::hasher::{Digest, HasherState}; +use miden_air::{ + trace::chiplets::hasher::{Digest, HasherState}, + RowIndex, +}; use vm_core::{mast::OpBatch, Kernel}; mod bitwise; @@ -113,7 +116,7 @@ mod tests; /// +---+---+---+---+---------------------------------------------------------+ pub struct Chiplets { /// Current clock cycle of the VM. - clk: u32, + clk: RowIndex, hasher: Hasher, bitwise: Bitwise, memory: Memory, @@ -126,7 +129,7 @@ impl Chiplets { /// Returns a new [Chiplets] component instantiated with the provided Kernel. pub fn new(kernel: Kernel) -> Self { Self { - clk: 0, + clk: RowIndex::from(0), hasher: Hasher::default(), bitwise: Bitwise::default(), memory: Memory::default(), @@ -149,22 +152,22 @@ impl Chiplets { } /// Returns the index of the first row of [Bitwise] execution trace. - pub fn bitwise_start(&self) -> usize { - self.hasher.trace_len() + pub fn bitwise_start(&self) -> RowIndex { + self.hasher.trace_len().into() } /// Returns the index of the first row of the [Memory] execution trace. - pub fn memory_start(&self) -> usize { + pub fn memory_start(&self) -> RowIndex { self.bitwise_start() + self.bitwise.trace_len() } /// Returns the index of the first row of [KernelRom] execution trace. - pub fn kernel_rom_start(&self) -> usize { + pub fn kernel_rom_start(&self) -> RowIndex { self.memory_start() + self.memory.trace_len() } /// Returns the index of the first row of the padding section of the execution trace. - pub fn padding_start(&self) -> usize { + pub fn padding_start(&self) -> RowIndex { self.kernel_rom_start() + self.kernel_rom.trace_len() } @@ -341,7 +344,7 @@ impl Chiplets { /// Returns the entire memory state for the specified execution context at the specified cycle. /// The state is returned as a vector of (address, value) tuples, and includes addresses which /// have been accessed at least once. - pub fn get_mem_state_at(&self, ctx: ContextId, clk: u32) -> Vec<(u64, Word)> { + pub fn get_mem_state_at(&self, ctx: ContextId, clk: RowIndex) -> Vec<(u64, Word)> { self.memory.get_state_at(ctx, clk) } @@ -415,11 +418,11 @@ impl Chiplets { /// It returns the auxiliary trace builders for generating auxiliary trace columns that depend /// on data from [Chiplets]. fn fill_trace(self, trace: &mut [Vec; CHIPLETS_WIDTH]) { - // get the rows where chiplets begin. - let bitwise_start = self.bitwise_start(); - let memory_start = self.memory_start(); - let kernel_rom_start = self.kernel_rom_start(); - let padding_start = self.padding_start(); + // get the rows where:usize chiplets begin. + let bitwise_start: usize = self.bitwise_start().into(); + let memory_start: usize = self.memory_start().into(); + let kernel_rom_start: usize = self.kernel_rom_start().into(); + let padding_start: usize = self.padding_start().into(); let Chiplets { clk: _, diff --git a/processor/src/debug.rs b/processor/src/debug.rs index c891dce2ae..31ac8eebf5 100644 --- a/processor/src/debug.rs +++ b/processor/src/debug.rs @@ -3,6 +3,7 @@ use alloc::{ vec::Vec, }; use core::fmt; +use miden_air::RowIndex; use vm_core::{AssemblyOp, Operation, StackOutputs, Word}; @@ -14,7 +15,7 @@ use crate::{ /// VmState holds a current process state information at a specific clock cycle. #[derive(Clone, Debug, Eq, PartialEq)] pub struct VmState { - pub clk: u32, + pub clk: RowIndex, pub ctx: ContextId, pub op: Option, pub asmop: Option, @@ -56,7 +57,7 @@ pub struct VmStateIterator { stack: Stack, system: System, error: Option, - clk: u32, + clk: RowIndex, asmop_idx: usize, forward: bool, trace_len_summary: TraceLenSummary, @@ -76,7 +77,7 @@ impl VmStateIterator { stack, system, error: result.err(), - clk: 0, + clk: RowIndex::from(0), asmop_idx: 0, forward: true, trace_len_summary, @@ -109,7 +110,7 @@ impl VmStateIterator { &assembly_ops[self.asmop_idx - 1], // difference between current clock cycle and start clock cycle of the current // asmop - (a.max(b) - a.min(b)) as u8, + u32::from(a.max(b) - usize::from(a.min(b.into()))) as u8, ) } else { (next_asmop, 0) //dummy value, never used. @@ -117,7 +118,7 @@ impl VmStateIterator { // if this is the first op in the sequence corresponding to the next asmop, returns a new // instance of [AsmOp] instantiated with next asmop, num_cycles and cycle_idx of 1. - if next_asmop.0 as u32 == self.clk - 1 { + if next_asmop.0 as u32 == u32::from(self.clk - 1) { // cycle_idx starts at 1 instead of 0 to remove ambiguity let cycle_idx = 1; let asmop = AsmOpInfo::new(next_asmop.1.clone(), cycle_idx); @@ -154,7 +155,7 @@ impl VmStateIterator { let op = if self.clk == 0 { None } else { - Some(self.decoder.debug_info().operations()[self.clk as usize - 1]) + Some(self.decoder.debug_info().operations()[self.clk - 1]) }; let (asmop, is_start) = self.get_asmop(); @@ -195,7 +196,7 @@ impl VmStateIterator { let range_table_len = range.get_number_range_checker_rows(); chiplets.append_range_checks(range); - TraceLenSummary::new(clk as usize, range_table_len, ChipletsLengths::new(chiplets)) + TraceLenSummary::new(clk.into(), range_table_len, ChipletsLengths::new(chiplets)) } } @@ -224,7 +225,7 @@ impl Iterator for VmStateIterator { let op = if self.clk == 0 { None } else { - Some(self.decoder.debug_info().operations()[self.clk as usize - 1]) + Some(self.decoder.debug_info().operations()[usize::from(self.clk) - 1]) }; let (asmop, is_start) = self.get_asmop(); diff --git a/processor/src/decoder/aux_trace/block_hash_table.rs b/processor/src/decoder/aux_trace/block_hash_table.rs index 21d2d7a9da..d59c267394 100644 --- a/processor/src/decoder/aux_trace/block_hash_table.rs +++ b/processor/src/decoder/aux_trace/block_hash_table.rs @@ -1,3 +1,4 @@ +use miden_air::RowIndex; use vm_core::{ Word, OPCODE_DYN, OPCODE_END, OPCODE_HALT, OPCODE_JOIN, OPCODE_LOOP, OPCODE_REPEAT, OPCODE_SPLIT, ZERO, @@ -28,7 +29,7 @@ impl> AuxColumnBuilder for BlockHashTableCo } /// Removes a row from the block hash table. - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: usize) -> E { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E { let op_code = main_trace.get_op_code(row).as_int() as u8; match op_code { @@ -38,7 +39,7 @@ impl> AuxColumnBuilder for BlockHashTableCo } /// Adds a row to the block hash table. - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: usize) -> E { + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E { let op_code = main_trace.get_op_code(row).as_int() as u8; match op_code { @@ -88,8 +89,9 @@ impl BlockHashTableRow { // Computes the initial row in the block hash table. pub fn table_init(main_trace: &MainTrace) -> Self { let program_hash = { - let row_with_halt = (0..main_trace.num_rows()) - .find(|row| main_trace.get_op_code(*row) == Felt::from(OPCODE_HALT)) + let row_with_halt = main_trace + .row_iter() + .find(|&row| main_trace.get_op_code(row) == Felt::from(OPCODE_HALT)) .expect("execution trace must include at least one occurrence of HALT"); main_trace.decoder_hasher_state_first_half(row_with_halt) @@ -105,7 +107,7 @@ impl BlockHashTableRow { /// Computes the row to be removed from the block hash table when encountering an `END` /// operation. - pub fn from_end(main_trace: &MainTrace, row: usize) -> Self { + pub fn from_end(main_trace: &MainTrace, row: RowIndex) -> Self { let op_code_next = main_trace.get_op_code(row + 1).as_int() as u8; let parent_block_id = main_trace.addr(row + 1); let child_block_hash = main_trace.decoder_hasher_state_first_half(row); @@ -137,7 +139,7 @@ impl BlockHashTableRow { /// Computes the row corresponding to the left or right child to add to the block hash table /// when encountering a `JOIN` operation. - pub fn from_join(main_trace: &MainTrace, row: usize, is_first_child: bool) -> Self { + pub fn from_join(main_trace: &MainTrace, row: RowIndex, is_first_child: bool) -> Self { let child_block_hash = if is_first_child { main_trace.decoder_hasher_state_first_half(row) } else { @@ -153,7 +155,7 @@ impl BlockHashTableRow { } /// Computes the row to add to the block hash table when encountering a `SPLIT` operation. - pub fn from_split(main_trace: &MainTrace, row: usize) -> Self { + pub fn from_split(main_trace: &MainTrace, row: RowIndex) -> Self { let stack_top = main_trace.stack_element(0, row); let parent_block_id = main_trace.addr(row + 1); // Note: only one child of a split block is executed. Hence, `is_first_child` is always @@ -183,7 +185,7 @@ impl BlockHashTableRow { /// Computes the row (optionally) to add to the block hash table when encountering a `LOOP` /// operation. That is, a loop will have a child to execute when the top of the stack is 1. - pub fn from_loop(main_trace: &MainTrace, row: usize) -> Option { + pub fn from_loop(main_trace: &MainTrace, row: RowIndex) -> Option { let stack_top = main_trace.stack_element(0, row); if stack_top == ONE { @@ -202,7 +204,7 @@ impl BlockHashTableRow { /// `REPEAT` marks the start of a new loop iteration, and hence the loop's child block needs to /// be added to the block hash table once again (since it was removed in the previous `END` /// instruction). - pub fn from_repeat(main_trace: &MainTrace, row: usize) -> Self { + pub fn from_repeat(main_trace: &MainTrace, row: RowIndex) -> Self { Self { parent_block_id: main_trace.addr(row + 1), child_block_hash: main_trace.decoder_hasher_state_first_half(row), @@ -212,7 +214,7 @@ impl BlockHashTableRow { } /// Computes the row to add to the block hash table when encountering a `DYN` operation. - pub fn from_dyn(main_trace: &MainTrace, row: usize) -> Self { + pub fn from_dyn(main_trace: &MainTrace, row: RowIndex) -> Self { let child_block_hash = { // Note: the child block hash is found on the stack, and hence in reverse order. let s0 = main_trace.stack_element(0, row); diff --git a/processor/src/decoder/aux_trace/block_stack_table.rs b/processor/src/decoder/aux_trace/block_stack_table.rs index ed838bdb97..e030f9b171 100644 --- a/processor/src/decoder/aux_trace/block_stack_table.rs +++ b/processor/src/decoder/aux_trace/block_stack_table.rs @@ -1,3 +1,4 @@ +use miden_air::RowIndex; use vm_core::{ OPCODE_CALL, OPCODE_DYN, OPCODE_END, OPCODE_JOIN, OPCODE_LOOP, OPCODE_RESPAN, OPCODE_SPAN, OPCODE_SPLIT, OPCODE_SYSCALL, @@ -15,7 +16,7 @@ pub struct BlockStackColumnBuilder {} impl> AuxColumnBuilder for BlockStackColumnBuilder { /// Removes a row from the block stack table. - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: RowIndex) -> E { let op_code_felt = main_trace.get_op_code(i); let op_code = op_code_felt.as_int() as u8; @@ -29,7 +30,7 @@ impl> AuxColumnBuilder for BlockStackColumn } /// Adds a row to the block stack table. - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: RowIndex) -> E { let op_code_felt = main_trace.get_op_code(i); let op_code = op_code_felt.as_int() as u8; @@ -49,7 +50,7 @@ impl> AuxColumnBuilder for BlockStackColumn /// Computes the multiplicand representing the removal of a row from the block stack table. fn get_block_stack_table_removal_multiplicand>( main_trace: &MainTrace, - i: usize, + i: RowIndex, is_respan: bool, alphas: &[E], ) -> E { @@ -102,7 +103,7 @@ fn get_block_stack_table_removal_multiplicand> /// Computes the multiplicand representing the inclusion of a new row to the block stack table. fn get_block_stack_table_inclusion_multiplicand>( main_trace: &MainTrace, - i: usize, + i: RowIndex, alphas: &[E], op_code: u8, ) -> E { diff --git a/processor/src/decoder/aux_trace/op_group_table.rs b/processor/src/decoder/aux_trace/op_group_table.rs index b921481900..c124aa71b3 100644 --- a/processor/src/decoder/aux_trace/op_group_table.rs +++ b/processor/src/decoder/aux_trace/op_group_table.rs @@ -1,5 +1,8 @@ use super::{AuxColumnBuilder, Felt, FieldElement, MainTrace, ONE}; -use miden_air::trace::decoder::{OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS}; +use miden_air::{ + trace::decoder::{OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS}, + RowIndex, +}; use vm_core::{OPCODE_PUSH, OPCODE_RESPAN, OPCODE_SPAN}; // OP GROUP TABLE COLUMN @@ -12,7 +15,7 @@ pub struct OpGroupTableColumnBuilder {} impl> AuxColumnBuilder for OpGroupTableColumnBuilder { /// Removes a row from the block hash table. - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: RowIndex) -> E { let delete_group_flag = main_trace.delta_group_count(i) * main_trace.is_in_span(i); if delete_group_flag == ONE { @@ -23,7 +26,7 @@ impl> AuxColumnBuilder for OpGroupTableColu } /// Adds a row to the block hash table. - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: RowIndex) -> E { let op_code_felt = main_trace.get_op_code(i); let op_code = op_code_felt.as_int() as u8; @@ -42,7 +45,7 @@ impl> AuxColumnBuilder for OpGroupTableColu /// Computes the multiplicand representing the inclusion of a new row to the op group table. fn get_op_group_table_inclusion_multiplicand>( main_trace: &MainTrace, - i: usize, + i: RowIndex, alphas: &[E], ) -> E { let block_id = main_trace.addr(i + 1); @@ -79,7 +82,7 @@ fn get_op_group_table_inclusion_multiplicand>( /// Computes the multiplicand representing the removal of a row from the op group table. fn get_op_group_table_removal_multiplicand>( main_trace: &MainTrace, - i: usize, + i: RowIndex, alphas: &[E], ) -> E { let group_count = main_trace.group_count(i); diff --git a/processor/src/decoder/mod.rs b/processor/src/decoder/mod.rs index 36e4a6b19b..88a7e65774 100644 --- a/processor/src/decoder/mod.rs +++ b/processor/src/decoder/mod.rs @@ -3,12 +3,15 @@ use super::{ ZERO, }; use alloc::vec::Vec; -use miden_air::trace::{ - chiplets::hasher::DIGEST_LEN, - decoder::{ - NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, NUM_OP_BITS, NUM_OP_BITS_EXTRA_COLS, - OP_BATCH_1_GROUPS, OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS, +use miden_air::{ + trace::{ + chiplets::hasher::DIGEST_LEN, + decoder::{ + NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, NUM_OP_BITS, NUM_OP_BITS_EXTRA_COLS, + OP_BATCH_1_GROUPS, OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS, + }, }, + RowIndex, }; use vm_core::{ mast::{ @@ -700,7 +703,7 @@ impl Decoder { // -------------------------------------------------------------------------------------------- /// Appends an asmop decorator at the specified clock cycle to the asmop list in debug mode. - pub fn append_asmop(&mut self, clk: u32, asmop: AssemblyOp) { + pub fn append_asmop(&mut self, clk: RowIndex, asmop: AssemblyOp) { self.debug_info.append_asmop(clk, asmop); } @@ -821,7 +824,7 @@ impl DebugInfo { } /// Appends an asmop decorator at the specified clock cycle to the asmop list in debug mode. - pub fn append_asmop(&mut self, clk: u32, asmop: AssemblyOp) { - self.assembly_ops.push((clk as usize, asmop)); + pub fn append_asmop(&mut self, clk: RowIndex, asmop: AssemblyOp) { + self.assembly_ops.push((clk.into(), asmop)); } } diff --git a/processor/src/errors.rs b/processor/src/errors.rs index 203384367f..b030cd07c7 100644 --- a/processor/src/errors.rs +++ b/processor/src/errors.rs @@ -5,6 +5,7 @@ use super::{ }; use alloc::string::String; use core::fmt::{Display, Formatter}; +use miden_air::RowIndex; use vm_core::{mast::MastNodeId, stack::STACK_TOP_SIZE, utils::to_hex}; use winter_prover::{math::FieldElement, ProverError}; @@ -17,15 +18,15 @@ use std::error::Error; #[derive(Debug, Clone, PartialEq, Eq)] pub enum ExecutionError { AdviceMapKeyNotFound(Word), - AdviceStackReadFailed(u32), + AdviceStackReadFailed(RowIndex), CallerNotInSyscall, CycleLimitExceeded(u32), - DivideByZero(u32), + DivideByZero(RowIndex), DynamicNodeNotFound(Digest), EventError(String), Ext2InttError(Ext2InttError), FailedAssertion { - clk: u32, + clk: RowIndex, err_code: u32, err_msg: Option, }, @@ -46,7 +47,7 @@ pub enum ExecutionError { depth: Felt, value: Felt, }, - LogArgumentZero(u32), + LogArgumentZero(RowIndex), MalformedSignatureKey(&'static str), MalformedMastForestInHost { root_digest: Digest, diff --git a/processor/src/host/debug.rs b/processor/src/host/debug.rs index 8cb2b31abd..56d56b60e7 100644 --- a/processor/src/host/debug.rs +++ b/processor/src/host/debug.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; +use miden_air::RowIndex; use std::{print, println}; use vm_core::{DebugOptions, Word}; @@ -35,13 +36,13 @@ pub fn print_debug_info(process: &S, options: &DebugOptions) { // ================================================================================================ struct Printer { - clk: u32, + clk: RowIndex, ctx: ContextId, fmp: u32, } impl Printer { - fn new(clk: u32, ctx: ContextId, fmp: u64) -> Self { + fn new(clk: RowIndex, ctx: ContextId, fmp: u64) -> Self { Self { clk, ctx, diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 3f5648c4ea..149d9f275c 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -13,6 +13,7 @@ use miden_air::trace::{ CHIPLETS_WIDTH, DECODER_TRACE_WIDTH, MIN_TRACE_LEN, RANGE_CHECK_TRACE_WIDTH, STACK_TRACE_WIDTH, SYS_TRACE_WIDTH, }; +pub use miden_air::RowIndex; pub use miden_air::{ExecutionOptions, ExecutionOptionsError}; pub use vm_core::{ chiplets::hasher::Digest, @@ -597,7 +598,7 @@ where /// A trait that defines a set of methods which allow access to the state of the process. pub trait ProcessState { /// Returns the current clock cycle of a process. - fn clk(&self) -> u32; + fn clk(&self) -> RowIndex; /// Returns the current execution context ID. fn ctx(&self) -> ContextId; @@ -637,7 +638,7 @@ pub trait ProcessState { } impl ProcessState for Process { - fn clk(&self) -> u32 { + fn clk(&self) -> RowIndex { self.system.clk() } diff --git a/processor/src/range/aux_trace.rs b/processor/src/range/aux_trace.rs index 1e6c2ff62c..a8b55b2560 100644 --- a/processor/src/range/aux_trace.rs +++ b/processor/src/range/aux_trace.rs @@ -2,6 +2,7 @@ use alloc::{collections::BTreeMap, vec::Vec}; use miden_air::trace::main_trace::MainTrace; use miden_air::trace::range::{M_COL_IDX, V_COL_IDX}; +use miden_air::RowIndex; use super::{uninit_vector, Felt, FieldElement, NUM_RAND_ROWS}; @@ -15,7 +16,7 @@ pub struct AuxTraceBuilder { lookup_values: Vec, /// Range check lookups performed by all user operations, grouped and sorted by the clock cycle /// at which they are requested. - cycle_lookups: BTreeMap>, + cycle_lookups: BTreeMap>, // The index of the first row of Range Checker's trace when the padded rows end and values to // be range checked start. values_start: usize, @@ -26,7 +27,7 @@ impl AuxTraceBuilder { // -------------------------------------------------------------------------------------------- pub fn new( lookup_values: Vec, - cycle_lookups: BTreeMap>, + cycle_lookups: BTreeMap>, values_start: usize, ) -> Self { Self { @@ -71,8 +72,10 @@ impl AuxTraceBuilder { let mut b_range_idx = 0_usize; // the first half of the trace only includes values from the operations. - for (clk, range_checks) in self.cycle_lookups.range(0..self.values_start as u32) { - let clk = *clk as usize; + for (clk, range_checks) in + self.cycle_lookups.range(RowIndex::from(0)..RowIndex::from(self.values_start)) + { + let clk: usize = (*clk).into(); // if we skipped some cycles since the last update was processed, values in the last // updated row should be copied over until the current cycle. @@ -121,7 +124,7 @@ impl AuxTraceBuilder { } // subtract the range checks requested by operations - if let Some(range_checks) = self.cycle_lookups.get(&(row_idx as u32)) { + if let Some(range_checks) = self.cycle_lookups.get(&(row_idx as u32).into()) { for lookup in range_checks.iter() { let value = divisors.get(lookup).expect("invalid lookup value"); b_range[b_range_idx] -= *value; diff --git a/processor/src/range/mod.rs b/processor/src/range/mod.rs index 99e3be3a23..4756ba8cf4 100644 --- a/processor/src/range/mod.rs +++ b/processor/src/range/mod.rs @@ -1,4 +1,5 @@ use alloc::{collections::BTreeMap, vec::Vec}; +use miden_air::RowIndex; use super::{trace::NUM_RAND_ROWS, Felt, FieldElement, RangeCheckTrace, ZERO}; use crate::utils::uninit_vector; @@ -43,7 +44,7 @@ pub struct RangeChecker { /// Range check lookups performed by all user operations, grouped and sorted by clock cycle. /// Each cycle is mapped to a vector of the range checks requested at that cycle, which can /// come from the stack, memory, or both. - cycle_lookups: BTreeMap>, + cycle_lookups: BTreeMap>, } impl RangeChecker { @@ -71,7 +72,7 @@ impl RangeChecker { } /// Adds range check lookups from the stack or memory to this [RangeChecker] instance. - pub fn add_range_checks(&mut self, clk: u32, values: &[u16]) { + pub fn add_range_checks(&mut self, clk: RowIndex, values: &[u16]) { // range checks requests only come from memory or from the stack, which always request 2 or // 4 lookups respectively. debug_assert!(values.len() == 2 || values.len() == 4); diff --git a/processor/src/stack/aux_trace.rs b/processor/src/stack/aux_trace.rs index 2f42ca816a..a3ac35574e 100644 --- a/processor/src/stack/aux_trace.rs +++ b/processor/src/stack/aux_trace.rs @@ -1,7 +1,7 @@ use super::{Felt, FieldElement, OverflowTableRow}; use crate::trace::AuxColumnBuilder; use alloc::vec::Vec; -use miden_air::trace::main_trace::MainTrace; +use miden_air::{trace::main_trace::MainTrace, RowIndex}; // AUXILIARY TRACE BUILDER // ================================================================================================ @@ -40,7 +40,7 @@ impl> AuxColumnBuilder for AuxTraceBuilder } /// Removes a row from the stack overflow table. - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: RowIndex) -> E { let is_left_shift = main_trace.is_left_shift(i); let is_non_empty_overflow = main_trace.is_non_empty_overflow(i); @@ -57,7 +57,7 @@ impl> AuxColumnBuilder for AuxTraceBuilder } /// Adds a row to the stack overflow table. - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: RowIndex) -> E { let is_right_shift = main_trace.is_right_shift(i); if is_right_shift { diff --git a/processor/src/stack/mod.rs b/processor/src/stack/mod.rs index cd65cc74a0..c218eb16c8 100644 --- a/processor/src/stack/mod.rs +++ b/processor/src/stack/mod.rs @@ -1,6 +1,7 @@ use super::{Felt, FieldElement, StackInputs, StackOutputs, ONE, STACK_TRACE_WIDTH, ZERO}; use alloc::vec::Vec; use core::cmp; +use miden_air::RowIndex; use vm_core::{stack::STACK_TOP_SIZE, Word, WORD_SIZE}; mod trace; @@ -52,7 +53,7 @@ const MAX_TOP_IDX: usize = STACK_TOP_SIZE - 1; /// column are set by the prover non-deterministically to 1 / (b0−16) when b0 != 16, and to any /// other value otherwise. pub struct Stack { - clk: u32, + clk: RowIndex, trace: StackTrace, overflow: OverflowTable, active_depth: usize, @@ -86,7 +87,7 @@ impl Stack { }; Self { - clk: 0, + clk: RowIndex::from(0), trace, overflow, active_depth: depth, @@ -103,7 +104,7 @@ impl Stack { } /// Returns the current clock cycle of the execution trace. - pub fn current_clk(&self) -> u32 { + pub fn current_clk(&self) -> RowIndex { self.clk } @@ -111,7 +112,7 @@ impl Stack { /// /// Trace length of the stack is equal to the number of cycles executed by the VM. pub fn trace_len(&self) -> usize { - self.clk as usize + self.clk.into() } /// Returns a copy of the item currently at the top of the stack. @@ -125,13 +126,13 @@ impl Stack { /// # Panics /// Panics if invoked for non-last clock cycle on a stack instantiated with /// `keep_overflow_trace` set to false. - pub fn get_state_at(&self, clk: u32) -> Vec { + pub fn get_state_at(&self, clk: RowIndex) -> Vec { let mut result = Vec::with_capacity(self.active_depth); self.trace.append_state_into(&mut result, clk); if clk == self.clk { self.overflow.append_into(&mut result); } else { - self.overflow.append_state_into(&mut result, clk as u64); + self.overflow.append_state_into(&mut result, clk.into()); } result @@ -186,7 +187,7 @@ impl Stack { /// same position at the next clock cycle. pub fn copy_state(&mut self, start_pos: usize) { self.trace.copy_stack_state_at( - self.clk, + self.clk.into(), start_pos, // TODO: change type of `active_depth` to `u32` Felt::try_from(self.active_depth as u64) @@ -213,7 +214,7 @@ impl Stack { } _ => { // Update the stack & overflow table. - let from_overflow = self.overflow.pop(self.clk as u64); + let from_overflow = self.overflow.pop(u64::from(self.clk)); self.trace.stack_shift_left_at( self.clk, start_pos, @@ -287,7 +288,7 @@ impl Stack { /// overwritten with random values. This parameter is unused because last rows are just /// duplicates of the prior rows and thus can be safely overwritten. pub fn into_trace(self, trace_len: usize, num_rand_rows: usize) -> super::StackTrace { - let clk = self.current_clk() as usize; + let clk = usize::from(self.current_clk()); // make sure that only the duplicate rows will be overwritten with random values assert!(clk + num_rand_rows <= trace_len, "target trace length too small"); diff --git a/processor/src/stack/tests.rs b/processor/src/stack/tests.rs index c7a9a06a07..05d3592975 100644 --- a/processor/src/stack/tests.rs +++ b/processor/src/stack/tests.rs @@ -97,7 +97,7 @@ fn shift_left() { // Shift right twice to add 2 items to the overflow table. stack.shift_right(0); - let prev_overflow_addr = stack.current_clk() as usize; + let prev_overflow_addr: usize = stack.current_clk().into(); stack.advance_clock(); stack.shift_right(0); stack.advance_clock(); @@ -137,7 +137,7 @@ fn shift_right() { // ---- right shift an entire stack of minimum depth ------------------------------------------ let expected_stack = build_stack(&[0, 4, 3, 2, 1]); - let expected_helpers = build_helpers_partial(1, stack.current_clk() as usize); + let expected_helpers = build_helpers_partial(1, stack.current_clk().into()); stack.shift_right(0); stack.advance_clock(); @@ -150,7 +150,7 @@ fn shift_right() { // ---- right shift when the overflow table is non-empty -------------------------------------- let expected_stack = build_stack(&[0, 0, 4, 3, 2, 1]); - let expected_helpers = build_helpers_partial(2, stack.current_clk() as usize); + let expected_helpers = build_helpers_partial(2, stack.current_clk().into()); stack.shift_right(0); stack.advance_clock(); diff --git a/processor/src/stack/trace.rs b/processor/src/stack/trace.rs index c8534df24f..54d977db41 100644 --- a/processor/src/stack/trace.rs +++ b/processor/src/stack/trace.rs @@ -1,7 +1,10 @@ use super::{super::utils::get_trace_len, Felt, MAX_TOP_IDX, ONE, STACK_TRACE_WIDTH, ZERO}; use crate::utils::math::batch_inversion; use alloc::vec::Vec; -use miden_air::trace::stack::{H0_COL_IDX, NUM_STACK_HELPER_COLS, STACK_TOP_SIZE}; +use miden_air::{ + trace::stack::{H0_COL_IDX, NUM_STACK_HELPER_COLS, STACK_TOP_SIZE}, + RowIndex, +}; use vm_core::FieldElement; // STACK TRACE @@ -43,21 +46,21 @@ impl StackTrace { /// Returns a copy of the item at the top of the stack at the specified clock cycle. #[inline(always)] - pub fn peek_at(&self, clk: u32) -> Felt { - self.stack[0][clk as usize] + pub fn peek_at(&self, clk: RowIndex) -> Felt { + self.stack[0][usize::from(clk)] } /// Returns the value located at the specified position on the stack at the specified clock /// cycle. #[inline(always)] - pub fn get_stack_value_at(&self, clk: u32, pos: usize) -> Felt { - self.stack[pos][clk as usize] + pub fn get_stack_value_at(&self, clk: RowIndex, pos: usize) -> Felt { + self.stack[pos][usize::from(clk)] } /// Sets the value at the specified position on the stack at the specified cycle. #[inline(always)] - pub fn set_stack_value_at(&mut self, clk: u32, pos: usize, value: Felt) { - self.stack[pos][clk as usize] = value; + pub fn set_stack_value_at(&mut self, clk: RowIndex, pos: usize, value: Felt) { + self.stack[pos][usize::from(clk)] = value; } /// Copies the stack values starting at the specified position at the specified clock cycle to @@ -67,13 +70,11 @@ impl StackTrace { /// stack depth and overflow address. pub fn copy_stack_state_at( &mut self, - clk: u32, + clk: usize, start_pos: usize, stack_depth: Felt, next_overflow_addr: Felt, ) { - let clk = clk as usize; - // copy over stack top columns for i in start_pos..STACK_TOP_SIZE { self.stack[i][clk + 1] = self.stack[i][clk]; @@ -98,12 +99,12 @@ impl StackTrace { /// after the entire trace is constructed. pub fn stack_shift_left_at( &mut self, - clk: u32, + clk: RowIndex, start_pos: usize, last_value: Felt, next_overflow_addr: Option, ) { - let clk = clk as usize; + let clk = usize::from(clk); // update stack top columns for i in start_pos..=MAX_TOP_IDX { @@ -133,8 +134,8 @@ impl StackTrace { /// cycle. /// - Set h0 to (depth - 16). Inverses of these values will be computed in into_array() method /// after the entire trace is constructed. - pub fn stack_shift_right_at(&mut self, clk: u32, start_pos: usize) { - let clk = clk as usize; + pub fn stack_shift_right_at(&mut self, clk: RowIndex, start_pos: usize) { + let clk = usize::from(clk); // update stack top columns for i in start_pos..MAX_TOP_IDX { @@ -152,10 +153,10 @@ impl StackTrace { /// Makes sure there is enough memory allocated for the trace to accommodate a new row. /// /// Trace length is doubled every time it needs to be increased. - pub fn ensure_trace_capacity(&mut self, clk: u32) { + pub fn ensure_trace_capacity(&mut self, clk: RowIndex) { let current_capacity = get_trace_len(&self.stack); // current_capacity as trace_length can not be bigger than clk, so it is safe to cast to u32 - if clk + 1 >= current_capacity as u32 { + if u32::from(clk + 1) >= current_capacity as u32 { let new_length = current_capacity * 2; for column in self.stack.iter_mut().chain(self.helpers.iter_mut()) { column.resize(new_length, ZERO); @@ -164,9 +165,9 @@ impl StackTrace { } /// Appends stack top state (16 items) at the specified clock cycle into the provided vector. - pub fn append_state_into(&self, result: &mut Vec, clk: u32) { + pub fn append_state_into(&self, result: &mut Vec, clk: RowIndex) { for column in self.stack.iter() { - result.push(column[clk as usize]); + result.push(column[usize::from(clk)]); } } @@ -201,20 +202,20 @@ impl StackTrace { /// Returns the stack trace state at the specified clock cycle. #[cfg(any(test, feature = "testing"))] - pub fn get_stack_state_at(&self, clk: u32) -> [Felt; STACK_TOP_SIZE] { + pub fn get_stack_state_at(&self, clk: RowIndex) -> [Felt; STACK_TOP_SIZE] { let mut result = [ZERO; STACK_TOP_SIZE]; for (result, column) in result.iter_mut().zip(self.stack.iter()) { - *result = column[clk as usize]; + *result = column[usize::from(clk)]; } result } /// Returns the trace state of the stack helper columns at the specified clock cycle. #[cfg(test)] - pub fn get_helpers_state_at(&self, clk: u32) -> [Felt; NUM_STACK_HELPER_COLS] { + pub fn get_helpers_state_at(&self, clk: RowIndex) -> [Felt; NUM_STACK_HELPER_COLS] { let mut result = [ZERO; NUM_STACK_HELPER_COLS]; for (result, column) in result.iter_mut().zip(self.helpers.iter()) { - *result = column[clk as usize]; + *result = column[usize::from(clk)]; } result } diff --git a/processor/src/system/mod.rs b/processor/src/system/mod.rs index 539bc00229..98e43ecb2c 100644 --- a/processor/src/system/mod.rs +++ b/processor/src/system/mod.rs @@ -1,6 +1,7 @@ use super::{ExecutionError, Felt, FieldElement, SysTrace, Word, EMPTY_WORD, ONE, ZERO}; use alloc::vec::Vec; use core::fmt::{self, Display}; +use miden_air::RowIndex; #[cfg(test)] mod tests; @@ -38,7 +39,7 @@ pub const FMP_MAX: u64 = 3 * 2_u64.pow(30) - 1; /// - hash of the function which initiated the current execution context. if the context was /// initiated from the root context, this will be set to ZEROs. pub struct System { - clk: u32, + clk: RowIndex, ctx: ContextId, fmp: Felt, in_syscall: bool, @@ -63,7 +64,7 @@ impl System { fmp_trace[0] = fmp; Self { - clk: 0, + clk: RowIndex::from(0), ctx: ContextId::root(), fmp, in_syscall: false, @@ -86,7 +87,7 @@ impl System { /// Returns the current clock cycle of a process. #[inline(always)] - pub fn clk(&self) -> u32 { + pub fn clk(&self) -> RowIndex { self.clk } @@ -118,19 +119,19 @@ impl System { /// Trace length of the system columns is equal to the number of cycles executed by the VM. #[inline(always)] pub fn trace_len(&self) -> usize { - self.clk as usize + self.clk.into() } /// Returns execution context ID at the specified clock cycle. #[inline(always)] - pub fn get_ctx_at(&self, clk: u32) -> ContextId { - (self.ctx_trace[clk as usize].as_int() as u32).into() + pub fn get_ctx_at(&self, clk: RowIndex) -> ContextId { + (self.ctx_trace[usize::from(clk)].as_int() as u32).into() } /// Returns free memory pointer at the specified clock cycle. #[inline(always)] - pub fn get_fmp_at(&self, clk: u32) -> Felt { - self.fmp_trace[clk as usize] + pub fn get_fmp_at(&self, clk: RowIndex) -> Felt { + self.fmp_trace[usize::from(clk)] } // STATE MUTATORS @@ -141,11 +142,11 @@ impl System { self.clk += 1; // Check that maximum number of cycles is not exceeded. - if self.clk > max_cycles { + if u32::from(self.clk) > max_cycles { return Err(ExecutionError::CycleLimitExceeded(max_cycles)); } - let clk = self.clk as usize; + let clk: usize = self.clk.into(); self.clk_trace[clk] = Felt::from(self.clk); self.fmp_trace[clk] = self.fmp; @@ -235,7 +236,7 @@ impl System { /// overwritten with random values. This parameter is unused because last rows are just /// duplicates of the prior rows and thus can be safely overwritten. pub fn into_trace(mut self, trace_len: usize, num_rand_rows: usize) -> SysTrace { - let clk = self.clk() as usize; + let clk: usize = self.clk().into(); // make sure that only the duplicate rows will be overwritten with random values assert!(clk + num_rand_rows <= trace_len, "target trace length too small"); @@ -284,7 +285,7 @@ impl System { /// Trace length is doubled every time it needs to be increased. pub fn ensure_trace_capacity(&mut self) { let current_capacity = self.clk_trace.len(); - if self.clk + 1 >= current_capacity as u32 { + if self.clk + 1 >= RowIndex::from(current_capacity) { let new_length = current_capacity * 2; self.clk_trace.resize(new_length, ZERO); self.ctx_trace.resize(new_length, ZERO); @@ -316,6 +317,12 @@ impl ContextId { } } +impl From for ContextId { + fn from(value: RowIndex) -> Self { + Self(u32::from(value)) + } +} + impl From for ContextId { fn from(value: u32) -> Self { Self(value) diff --git a/processor/src/trace/mod.rs b/processor/src/trace/mod.rs index 15b1a06b4f..cf4036aa59 100644 --- a/processor/src/trace/mod.rs +++ b/processor/src/trace/mod.rs @@ -285,9 +285,9 @@ where let clk = system.clk(); // trace lengths of system and stack components must be equal to the number of executed cycles - assert_eq!(clk as usize, system.trace_len(), "inconsistent system trace lengths"); - assert_eq!(clk as usize, decoder.trace_len(), "inconsistent decoder trace length"); - assert_eq!(clk as usize, stack.trace_len(), "inconsistent stack trace lengths"); + assert_eq!(usize::from(clk), system.trace_len(), "inconsistent system trace lengths"); + assert_eq!(usize::from(clk), decoder.trace_len(), "inconsistent decoder trace length"); + assert_eq!(usize::from(clk), stack.trace_len(), "inconsistent stack trace lengths"); // Add the range checks required by the chiplets to the range checker. chiplets.append_range_checks(&mut range); @@ -296,7 +296,7 @@ where let range_table_len = range.get_number_range_checker_rows(); // Get the trace length required to hold all execution trace steps. - let max_len = range_table_len.max(clk as usize).max(chiplets.trace_len()); + let max_len = range_table_len.max(clk.into()).max(chiplets.trace_len()); // pad the trace length to the next power of two and ensure that there is space for the // rows to hold random values @@ -308,7 +308,7 @@ where // get the lengths of the traces: main, range, and chiplets let trace_len_summary = - TraceLenSummary::new(clk as usize, range_table_len, ChipletsLengths::new(&chiplets)); + TraceLenSummary::new(clk.into(), range_table_len, ChipletsLengths::new(&chiplets)); // combine all trace segments into the main trace let system_trace = system.into_trace(trace_len, NUM_RAND_ROWS); diff --git a/processor/src/trace/tests/chiplets/bitwise.rs b/processor/src/trace/tests/chiplets/bitwise.rs index 218f86bb16..d37a5bd5d6 100644 --- a/processor/src/trace/tests/chiplets/bitwise.rs +++ b/processor/src/trace/tests/chiplets/bitwise.rs @@ -2,9 +2,12 @@ use super::{ build_trace_from_ops, rand_array, rand_value, ExecutionTrace, Felt, FieldElement, Operation, Trace, AUX_TRACE_RAND_ELEMENTS, CHIPLETS_AUX_TRACE_OFFSET, HASH_CYCLE_LEN, NUM_RAND_ROWS, ONE, }; -use miden_air::trace::chiplets::{ - bitwise::{BITWISE_AND, BITWISE_AND_LABEL, BITWISE_XOR, BITWISE_XOR_LABEL, OP_CYCLE_LEN}, - BITWISE_A_COL_IDX, BITWISE_B_COL_IDX, BITWISE_OUTPUT_COL_IDX, BITWISE_TRACE_OFFSET, +use miden_air::{ + trace::chiplets::{ + bitwise::{BITWISE_AND, BITWISE_AND_LABEL, BITWISE_XOR, BITWISE_XOR_LABEL, OP_CYCLE_LEN}, + BITWISE_A_COL_IDX, BITWISE_B_COL_IDX, BITWISE_OUTPUT_COL_IDX, BITWISE_TRACE_OFFSET, + }, + RowIndex, }; /// Tests the generation of the `b_chip` bus column when only bitwise lookups are included. It @@ -121,7 +124,8 @@ fn b_chip_trace_bitwise() { Felt::from(a ^ b), ); expected *= value.inv(); - expected *= build_expected_bitwise_from_trace(&trace, &rand_elements, response_1_row - 1); + expected *= + build_expected_bitwise_from_trace(&trace, &rand_elements, (response_1_row - 1).into()); assert_eq!(expected, b_chip[response_1_row]); // Nothing changes until the decoder requests the result of the `SPAN` hash at cycle 21. @@ -142,7 +146,8 @@ fn b_chip_trace_bitwise() { // At the end of the next bitwise cycle, the response for `U32and` is provided by the Bitwise // chiplet. - expected *= build_expected_bitwise_from_trace(&trace, &rand_elements, response_2_row - 1); + expected *= + build_expected_bitwise_from_trace(&trace, &rand_elements, (response_2_row - 1).into()); assert_eq!(expected, b_chip[response_2_row]); // Nothing changes until the next time the Bitwise chiplet responds. @@ -152,7 +157,8 @@ fn b_chip_trace_bitwise() { // At the end of the next bitwise cycle, the response for `U32and` is provided by the Bitwise // chiplet. - expected *= build_expected_bitwise_from_trace(&trace, &rand_elements, response_3_row - 1); + expected *= + build_expected_bitwise_from_trace(&trace, &rand_elements, (response_3_row - 1).into()); assert_eq!(expected, b_chip[response_3_row]); // The value in b_chip should be ONE now and for the rest of the trace. @@ -168,7 +174,11 @@ fn build_expected_bitwise(alphas: &[Felt], label: Felt, a: Felt, b: Felt, result alphas[0] + alphas[1] * label + alphas[2] * a + alphas[3] * b + alphas[4] * result } -fn build_expected_bitwise_from_trace(trace: &ExecutionTrace, alphas: &[Felt], row: usize) -> Felt { +fn build_expected_bitwise_from_trace( + trace: &ExecutionTrace, + alphas: &[Felt], + row: RowIndex, +) -> Felt { let selector = trace.main_trace.get_column(BITWISE_TRACE_OFFSET)[row]; let op_id = if selector == BITWISE_AND { diff --git a/processor/src/trace/tests/chiplets/hasher.rs b/processor/src/trace/tests/chiplets/hasher.rs index 38be61b933..866c3c1aea 100644 --- a/processor/src/trace/tests/chiplets/hasher.rs +++ b/processor/src/trace/tests/chiplets/hasher.rs @@ -6,18 +6,21 @@ use super::{ use crate::StackInputs; use alloc::vec::Vec; use core::ops::Range; -use miden_air::trace::{ - chiplets::{ - hasher::{ - HasherState, Selectors, CAPACITY_DOMAIN_IDX, CAPACITY_LEN, DIGEST_RANGE, - HASH_CYCLE_LEN, LINEAR_HASH, LINEAR_HASH_LABEL, MP_VERIFY, MP_VERIFY_LABEL, - MR_UPDATE_NEW, MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD, MR_UPDATE_OLD_LABEL, RETURN_HASH, - RETURN_HASH_LABEL, RETURN_STATE, RETURN_STATE_LABEL, STATE_WIDTH, +use miden_air::{ + trace::{ + chiplets::{ + hasher::{ + HasherState, Selectors, CAPACITY_DOMAIN_IDX, CAPACITY_LEN, DIGEST_RANGE, + HASH_CYCLE_LEN, LINEAR_HASH, LINEAR_HASH_LABEL, MP_VERIFY, MP_VERIFY_LABEL, + MR_UPDATE_NEW, MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD, MR_UPDATE_OLD_LABEL, + RETURN_HASH, RETURN_HASH_LABEL, RETURN_STATE, RETURN_STATE_LABEL, STATE_WIDTH, + }, + HASHER_NODE_INDEX_COL_IDX, HASHER_STATE_COL_RANGE, HASHER_TRACE_OFFSET, }, - HASHER_NODE_INDEX_COL_IDX, HASHER_STATE_COL_RANGE, HASHER_TRACE_OFFSET, + decoder::{NUM_OP_BITS, OP_BITS_OFFSET}, + CLK_COL_IDX, DECODER_TRACE_OFFSET, }, - decoder::{NUM_OP_BITS, OP_BITS_OFFSET}, - CLK_COL_IDX, DECODER_TRACE_OFFSET, + RowIndex, }; use vm_core::{ chiplets::hasher::apply_permutation, @@ -71,14 +74,14 @@ pub fn b_chip_span() { // initialize the request state. let mut state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut state, 0); + fill_state_from_decoder_with_domain(&trace, &mut state, 0.into()); // request the initialization of the span hash let request_init = build_expected(&alphas, LINEAR_HASH_LABEL, state, [ZERO; STATE_WIDTH], ONE, ZERO); let mut expected = request_init.inv(); // provide the initialization of the span hash - expected *= build_expected_from_trace(&trace, &alphas, 0); + expected *= build_expected_from_trace(&trace, &alphas, 0.into()); assert_eq!(expected, b_chip[1]); // Nothing changes when there is no communication with the hash chiplet. @@ -105,7 +108,7 @@ pub fn b_chip_span() { } // At the end of the hash cycle, the result of the span hash is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, HASH_CYCLE_LEN - 1); + expected *= build_expected_from_trace(&trace, &alphas, (HASH_CYCLE_LEN - 1).into()); assert_eq!(expected, b_chip[HASH_CYCLE_LEN]); // The value in b_chip should be ONE now and for the rest of the trace. @@ -142,14 +145,14 @@ pub fn b_chip_span_with_respan() { // initialize the request state. let mut state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut state, 0); + fill_state_from_decoder_with_domain(&trace, &mut state, 0.into()); // request the initialization of the span hash let request_init = build_expected(&alphas, LINEAR_HASH_LABEL, state, [ZERO; STATE_WIDTH], ONE, ZERO); let mut expected = request_init.inv(); // provide the initialization of the span hash - expected *= build_expected_from_trace(&trace, &alphas, 0); + expected *= build_expected_from_trace(&trace, &alphas, 0.into()); assert_eq!(expected, b_chip[1]); // Nothing changes when there is no communication with the hash chiplet. @@ -159,7 +162,7 @@ pub fn b_chip_span_with_respan() { // At the end of the first hash cycle at cycle 7, the absorption of the next operation batch is // provided by the hasher. - expected *= build_expected_from_trace(&trace, &alphas, 7); + expected *= build_expected_from_trace(&trace, &alphas, 7.into()); assert_eq!(expected, b_chip[8]); // Nothing changes when there is no communication with the hash chiplet. @@ -170,7 +173,7 @@ pub fn b_chip_span_with_respan() { apply_permutation(&mut state); let prev_state = state; // get the state with the next absorbed batch. - fill_state_from_decoder(&trace, &mut state, 9); + fill_state_from_decoder(&trace, &mut state, 9.into()); let request_respan = build_expected(&alphas, LINEAR_HASH_LABEL, prev_state, state, Felt::new(8), ZERO); @@ -184,7 +187,7 @@ pub fn b_chip_span_with_respan() { // At cycle 15 at the end of the second hash cycle, the result of the span hash is provided by // the hasher - expected *= build_expected_from_trace(&trace, &alphas, 15); + expected *= build_expected_from_trace(&trace, &alphas, 15.into()); assert_eq!(expected, b_chip[16]); // Nothing changes when there is no communication with the hash chiplet. @@ -238,20 +241,20 @@ pub fn b_chip_merge() { // initialize the request state. let mut split_state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut split_state, 0); + fill_state_from_decoder_with_domain(&trace, &mut split_state, 0.into()); // request the initialization of the span hash let split_init = build_expected(&alphas, LINEAR_HASH_LABEL, split_state, [ZERO; STATE_WIDTH], ONE, ZERO); let mut expected = split_init.inv(); // provide the initialization of the span hash - expected *= build_expected_from_trace(&trace, &alphas, 0); + expected *= build_expected_from_trace(&trace, &alphas, 0.into()); assert_eq!(expected, b_chip[1]); // at cycle 1 the initialization of the span block hash for the false branch is requested by the // decoder let mut f_branch_state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut f_branch_state, 1); + fill_state_from_decoder_with_domain(&trace, &mut f_branch_state, 1.into()); // request the initialization of the false branch hash let f_branch_init = build_expected( &alphas, @@ -299,12 +302,12 @@ pub fn b_chip_merge() { } // at cycle 7 the result of the merge is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 7); + expected *= build_expected_from_trace(&trace, &alphas, 7.into()); assert_eq!(expected, b_chip[8]); // at cycle 8 the initialization of the hash of the span block for the false branch is provided // by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 8); + expected *= build_expected_from_trace(&trace, &alphas, 8.into()); assert_eq!(expected, b_chip[9]); // Nothing changes when there is no communication with the hash chiplet. @@ -313,7 +316,7 @@ pub fn b_chip_merge() { } // at cycle 15 the result of the span block for the false branch is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 15); + expected *= build_expected_from_trace(&trace, &alphas, 15.into()); assert_eq!(expected, b_chip[16]); // The value in b_chip should be ONE now and for the rest of the trace. @@ -356,13 +359,13 @@ pub fn b_chip_permutation() { // initialize the request state. let mut span_state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut span_state, 0); + fill_state_from_decoder_with_domain(&trace, &mut span_state, 0.into()); // request the initialization of the span hash let span_init = build_expected(&alphas, LINEAR_HASH_LABEL, span_state, [ZERO; STATE_WIDTH], ONE, ZERO); let mut expected = span_init.inv(); // provide the initialization of the span hash - expected *= build_expected_from_trace(&trace, &alphas, 0); + expected *= build_expected_from_trace(&trace, &alphas, 0.into()); assert_eq!(expected, b_chip[1]); // at cycle 1 hperm is executed and the initialization and result of the hash are both @@ -409,11 +412,11 @@ pub fn b_chip_permutation() { } // at cycle 7 the result of the span hash is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 7); + expected *= build_expected_from_trace(&trace, &alphas, 7.into()); assert_eq!(expected, b_chip[8]); // at cycle 8 the initialization of the hperm hash is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 8); + expected *= build_expected_from_trace(&trace, &alphas, 8.into()); assert_eq!(expected, b_chip[9]); // Nothing changes when there is no communication with the hash chiplet. @@ -422,7 +425,7 @@ pub fn b_chip_permutation() { } // at cycle 15 the result of the hperm hash is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 15); + expected *= build_expected_from_trace(&trace, &alphas, 15.into()); assert_eq!(expected, b_chip[16]); // The value in b_chip should be ONE now and for the rest of the trace. @@ -471,13 +474,13 @@ fn b_chip_mpverify() { // initialize the request state. let mut span_state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut span_state, 0); + fill_state_from_decoder_with_domain(&trace, &mut span_state, 0.into()); // request the initialization of the span hash let span_init = build_expected(&alphas, LINEAR_HASH_LABEL, span_state, [ZERO; STATE_WIDTH], ONE, ZERO); let mut expected = span_init.inv(); // provide the initialization of the span hash - expected *= build_expected_from_trace(&trace, &alphas, 0); + expected *= build_expected_from_trace(&trace, &alphas, 0.into()); assert_eq!(expected, b_chip[1]); // at cycle 1 a merkle path verification is executed and the initialization and result of the @@ -546,11 +549,11 @@ fn b_chip_mpverify() { } // at cycle 7 the result of the span hash is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 7); + expected *= build_expected_from_trace(&trace, &alphas, 7.into()); assert_eq!(expected, b_chip[8]); // at cycle 8 the initialization of the merkle path is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 8); + expected *= build_expected_from_trace(&trace, &alphas, 8.into()); assert_eq!(expected, b_chip[9]); // Nothing changes when there is no communication with the hash chiplet. @@ -559,7 +562,7 @@ fn b_chip_mpverify() { } // when the merkle path verification has been completed the hasher provides the result - expected *= build_expected_from_trace(&trace, &alphas, mp_verify_complete - 1); + expected *= build_expected_from_trace(&trace, &alphas, (mp_verify_complete - 1).into()); assert_eq!(expected, b_chip[mp_verify_complete]); // The value in b_chip should be ONE now and for the rest of the trace. @@ -617,13 +620,13 @@ fn b_chip_mrupdate() { // initialize the request state. let mut span_state = [ZERO; STATE_WIDTH]; - fill_state_from_decoder_with_domain(&trace, &mut span_state, 0); + fill_state_from_decoder_with_domain(&trace, &mut span_state, 0.into()); // request the initialization of the span hash let span_init = build_expected(&alphas, LINEAR_HASH_LABEL, span_state, [ZERO; STATE_WIDTH], ONE, ZERO); let mut expected = span_init.inv(); // provide the initialization of the span hash - expected *= build_expected_from_trace(&trace, &alphas, 0); + expected *= build_expected_from_trace(&trace, &alphas, 0.into()); assert_eq!(expected, b_chip[1]); // at cycle 1 a merkle path verification is executed and the initialization and result of the @@ -746,11 +749,11 @@ fn b_chip_mrupdate() { } // at cycle 7 the result of the span hash is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 7); + expected *= build_expected_from_trace(&trace, &alphas, 7.into()); assert_eq!(expected, b_chip[8]); // at cycle 8 the initialization of the first merkle path is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, 8); + expected *= build_expected_from_trace(&trace, &alphas, 8.into()); assert_eq!(expected, b_chip[9]); // Nothing changes when there is no communication with the hash chiplet. @@ -759,11 +762,11 @@ fn b_chip_mrupdate() { } // when the first merkle path verification has been completed the hasher provides the result - expected *= build_expected_from_trace(&trace, &alphas, mp_old_verify_complete - 1); + expected *= build_expected_from_trace(&trace, &alphas, (mp_old_verify_complete - 1).into()); assert_eq!(expected, b_chip[mp_old_verify_complete]); // at cycle 32 the initialization of the second merkle path is provided by the hasher - expected *= build_expected_from_trace(&trace, &alphas, mp_old_verify_complete); + expected *= build_expected_from_trace(&trace, &alphas, mp_old_verify_complete.into()); assert_eq!(expected, b_chip[mp_old_verify_complete + 1]); // Nothing changes when there is no communication with the hash chiplet. @@ -772,7 +775,7 @@ fn b_chip_mrupdate() { } // when the merkle path verification has been completed the hasher provides the result - expected *= build_expected_from_trace(&trace, &alphas, mp_new_verify_complete - 1); + expected *= build_expected_from_trace(&trace, &alphas, (mp_new_verify_complete - 1).into()); assert_eq!(expected, b_chip[mp_new_verify_complete]); // The value in b_chip should be ONE now and for the rest of the trace. @@ -827,7 +830,7 @@ fn build_expected( /// Reduces the specified row in the execution trace to an expected value representing a hash /// operation lookup. -fn build_expected_from_trace(trace: &ExecutionTrace, alphas: &[Felt], row: usize) -> Felt { +fn build_expected_from_trace(trace: &ExecutionTrace, alphas: &[Felt], row: RowIndex) -> Felt { let s0 = trace.main_trace.get_column(HASHER_TRACE_OFFSET)[row]; let s1 = trace.main_trace.get_column(HASHER_TRACE_OFFSET + 1)[row]; let s2 = trace.main_trace.get_column(HASHER_TRACE_OFFSET + 2)[row]; @@ -888,7 +891,7 @@ fn get_label_from_selectors(selectors: Selectors) -> Option { fn fill_state_from_decoder_with_domain( trace: &ExecutionTrace, state: &mut HasherState, - row: usize, + row: RowIndex, ) { let domain = extract_control_block_domain_from_trace(trace, row); state[CAPACITY_DOMAIN_IDX] = domain; @@ -898,7 +901,7 @@ fn fill_state_from_decoder_with_domain( /// Populates the provided HasherState with the state stored in the decoder's execution trace at the /// specified row. -fn fill_state_from_decoder(trace: &ExecutionTrace, state: &mut HasherState, row: usize) { +fn fill_state_from_decoder(trace: &ExecutionTrace, state: &mut HasherState, row: RowIndex) { for (i, col_idx) in DECODER_HASHER_STATE_RANGE.enumerate() { state[CAPACITY_LEN + i] = trace.main_trace.get_column(col_idx)[row]; } @@ -906,7 +909,7 @@ fn fill_state_from_decoder(trace: &ExecutionTrace, state: &mut HasherState, row: /// Extract the control block domain from the execution trace. This is achieved /// by calculating the op code as [bit_0 * 2**0 + bit_1 * 2**1 + ... + bit_6 * 2**6] -fn extract_control_block_domain_from_trace(trace: &ExecutionTrace, row: usize) -> Felt { +fn extract_control_block_domain_from_trace(trace: &ExecutionTrace, row: RowIndex) -> Felt { // calculate the op code let opcode_value = DECODER_OP_BITS_RANGE.rev().fold(0u8, |result, bit_index| { let op_bit = trace.main_trace.get_column(bit_index)[row].as_int() as u8; diff --git a/processor/src/trace/tests/chiplets/memory.rs b/processor/src/trace/tests/chiplets/memory.rs index f4049e8011..2c7a750833 100644 --- a/processor/src/trace/tests/chiplets/memory.rs +++ b/processor/src/trace/tests/chiplets/memory.rs @@ -2,10 +2,13 @@ use super::{ build_trace_from_ops, rand_array, ExecutionTrace, Felt, FieldElement, Operation, Trace, Word, AUX_TRACE_RAND_ELEMENTS, CHIPLETS_AUX_TRACE_OFFSET, NUM_RAND_ROWS, ONE, ZERO, }; -use miden_air::trace::chiplets::{ - memory::{MEMORY_READ_LABEL, MEMORY_WRITE, MEMORY_WRITE_LABEL, NUM_ELEMENTS}, - MEMORY_ADDR_COL_IDX, MEMORY_CLK_COL_IDX, MEMORY_CTX_COL_IDX, MEMORY_SELECTORS_COL_IDX, - MEMORY_V_COL_RANGE, +use miden_air::{ + trace::chiplets::{ + memory::{MEMORY_READ_LABEL, MEMORY_WRITE, MEMORY_WRITE_LABEL, NUM_ELEMENTS}, + MEMORY_ADDR_COL_IDX, MEMORY_CLK_COL_IDX, MEMORY_CTX_COL_IDX, MEMORY_SELECTORS_COL_IDX, + MEMORY_V_COL_RANGE, + }, + RowIndex, }; /// Tests the generation of the `b_chip` bus column when only memory lookups are included. It @@ -83,15 +86,15 @@ fn b_chip_trace_mem() { let value = build_expected_memory(&rand_elements, MEMORY_READ_LABEL, ZERO, ZERO, Felt::new(8), word); expected *= value.inv(); - expected *= build_expected_memory_from_trace(&trace, &rand_elements, 8); + expected *= build_expected_memory_from_trace(&trace, &rand_elements, 8.into()); assert_eq!(expected, b_chip[9]); // At cycle 9, `MLoad` is provided by memory. - expected *= build_expected_memory_from_trace(&trace, &rand_elements, 9); + expected *= build_expected_memory_from_trace(&trace, &rand_elements, 9.into()); assert_eq!(expected, b_chip[10]); // At cycle 10, `MLoadW` is provided by memory. - expected *= build_expected_memory_from_trace(&trace, &rand_elements, 10); + expected *= build_expected_memory_from_trace(&trace, &rand_elements, 10.into()); assert_eq!(expected, b_chip[11]); // At cycle 11, `MStore` is requested by the stack and the first read of `MStream` is provided @@ -105,11 +108,11 @@ fn b_chip_trace_mem() { [ONE, ZERO, ZERO, ZERO], ); expected *= value.inv(); - expected *= build_expected_memory_from_trace(&trace, &rand_elements, 11); + expected *= build_expected_memory_from_trace(&trace, &rand_elements, 11.into()); assert_eq!(expected, b_chip[12]); // At cycle 12, `MStore` is provided by the memory - expected *= build_expected_memory_from_trace(&trace, &rand_elements, 12); + expected *= build_expected_memory_from_trace(&trace, &rand_elements, 12.into()); assert_eq!(expected, b_chip[13]); // At cycle 13, `MStream` is requested by the stack, and the second read of `MStream` is @@ -125,7 +128,7 @@ fn b_chip_trace_mem() { [ONE, ZERO, ZERO, ZERO], ); expected *= (value1 * value2).inv(); - expected *= build_expected_memory_from_trace(&trace, &rand_elements, 13); + expected *= build_expected_memory_from_trace(&trace, &rand_elements, 13.into()); assert_eq!(expected, b_chip[14]); // At cycle 14 the decoder requests the span hash. We set this as the inverse of the previously @@ -164,7 +167,11 @@ fn build_expected_memory( + word_value } -fn build_expected_memory_from_trace(trace: &ExecutionTrace, alphas: &[Felt], row: usize) -> Felt { +fn build_expected_memory_from_trace( + trace: &ExecutionTrace, + alphas: &[Felt], + row: RowIndex, +) -> Felt { // get the memory access operation let s0 = trace.main_trace.get_column(MEMORY_SELECTORS_COL_IDX)[row]; let s1 = trace.main_trace.get_column(MEMORY_SELECTORS_COL_IDX + 1)[row]; diff --git a/processor/src/trace/utils.rs b/processor/src/trace/utils.rs index a3f706c874..3919182ac6 100644 --- a/processor/src/trace/utils.rs +++ b/processor/src/trace/utils.rs @@ -2,7 +2,7 @@ use super::{Felt, FieldElement, NUM_RAND_ROWS}; use crate::{chiplets::Chiplets, utils::uninit_vector}; use alloc::vec::Vec; use core::slice; -use miden_air::trace::main_trace::MainTrace; +use miden_air::{trace::main_trace::MainTrace, RowIndex}; #[cfg(test)] use vm_core::{utils::ToElements, Operation}; @@ -41,7 +41,7 @@ impl<'a> TraceFragment<'a> { /// Updates a single cell in this fragment with provided value. #[inline(always)] - pub fn set(&mut self, row_idx: usize, col_idx: usize, value: Felt) { + pub fn set(&mut self, row_idx: RowIndex, col_idx: usize, value: Felt) { self.data[col_idx][row_idx] = value; } @@ -148,10 +148,13 @@ pub struct ChipletsLengths { impl ChipletsLengths { pub fn new(chiplets: &Chiplets) -> Self { ChipletsLengths { - hash_chiplet_len: chiplets.bitwise_start(), - bitwise_chiplet_len: chiplets.memory_start() - chiplets.bitwise_start(), - memory_chiplet_len: chiplets.kernel_rom_start() - chiplets.memory_start(), - kernel_rom_len: chiplets.padding_start() - chiplets.kernel_rom_start(), + hash_chiplet_len: chiplets.bitwise_start().into(), + bitwise_chiplet_len: usize::from(chiplets.memory_start()) + - usize::from(chiplets.bitwise_start()), + memory_chiplet_len: usize::from(chiplets.kernel_rom_start()) + - usize::from(chiplets.memory_start()), + kernel_rom_len: usize::from(chiplets.padding_start()) + - usize::from(chiplets.kernel_rom_start()), } } @@ -210,9 +213,9 @@ pub trait AuxColumnBuilder> { // REQUIRED METHODS // -------------------------------------------------------------------------------------------- - fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row_idx: usize) -> E; + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E; - fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row_idx: usize) -> E; + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row: RowIndex) -> E; // PROVIDED METHODS // -------------------------------------------------------------------------------------------- @@ -235,9 +238,10 @@ pub trait AuxColumnBuilder> { let mut requests_running_prod = E::ONE; for row_idx in 0..main_trace.num_rows() - 1 { + let row = row_idx.into(); responses_prod[row_idx + 1] = - responses_prod[row_idx] * self.get_responses_at(main_trace, alphas, row_idx); - requests[row_idx + 1] = self.get_requests_at(main_trace, alphas, row_idx); + responses_prod[row_idx] * self.get_responses_at(main_trace, alphas, row); + requests[row_idx + 1] = self.get_requests_at(main_trace, alphas, row); requests_running_prod *= requests[row_idx + 1]; } diff --git a/stdlib/tests/crypto/falcon.rs b/stdlib/tests/crypto/falcon.rs index 2cbf89b6a8..010f917e72 100644 --- a/stdlib/tests/crypto/falcon.rs +++ b/stdlib/tests/crypto/falcon.rs @@ -173,7 +173,7 @@ fn test_falcon512_probabilistic_product_failure() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 31615, + clk: 31615.into(), err_code: 0, err_msg: None, } diff --git a/stdlib/tests/crypto/native.rs b/stdlib/tests/crypto/native.rs index 3c96997aa3..e63c77a247 100644 --- a/stdlib/tests/crypto/native.rs +++ b/stdlib/tests/crypto/native.rs @@ -18,7 +18,7 @@ fn test_invalid_end_addr() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 18, + clk: 18.into(), err_code: 0, err_msg: None, } @@ -39,7 +39,7 @@ fn test_invalid_end_addr() { expect_exec_error!( test, ExecutionError::FailedAssertion { - clk: 18, + clk: 18.into(), err_code: 0, err_msg: None, } diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 92137a5233..ef67bdb011 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -24,6 +24,7 @@ std = [ ] [dependencies] +air = { package = "miden-air", path = "../air", version = "0.10", default-features = false } assembly = { package = "miden-assembly", path = "../assembly", version = "0.10", default-features = false, features = [ "testing", ] }