Skip to content

Commit

Permalink
Assert word alignment for LW and SW
Browse files Browse the repository at this point in the history
  • Loading branch information
moodlezoup committed Nov 22, 2024
1 parent ace2f72 commit b97a1df
Show file tree
Hide file tree
Showing 6 changed files with 86 additions and 75 deletions.
34 changes: 18 additions & 16 deletions common/src/rv_trace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,7 @@ impl From<&RVTraceRow> for [MemoryOp; MEMORY_OPS_PER_INSTRUCTION] {
MemoryOp::noop_read(),
],
RV32InstructionFormat::I => match val.instruction.opcode {
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT
| RV32IM::VIRTUAL_ASSERT_WORD_ALIGNMENT => [
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT => [
rs1_read(),
MemoryOp::noop_read(),
MemoryOp::noop_write(),
Expand Down Expand Up @@ -230,7 +229,10 @@ impl ELFInstruction {
| RV32IM::SLTIU
| RV32IM::AUIPC
| RV32IM::JAL
| RV32IM::JALR,
| RV32IM::JALR
| RV32IM::SW
| RV32IM::LW
| RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT,
);

flags[CircuitFlags::Load as usize] = matches!(
Expand Down Expand Up @@ -273,7 +275,6 @@ impl ELFInstruction {
| RV32IM::VIRTUAL_ASSERT_VALID_SIGNED_REMAINDER
| RV32IM::VIRTUAL_ASSERT_VALID_UNSIGNED_REMAINDER
| RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT
| RV32IM::VIRTUAL_ASSERT_WORD_ALIGNMENT
);

flags[CircuitFlags::ConcatLookupQueryChunks as usize] = matches!(
Expand Down Expand Up @@ -313,10 +314,12 @@ impl ELFInstruction {
RV32IM::VIRTUAL_ASSERT_EQ |
RV32IM::VIRTUAL_ASSERT_LTE |
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT |
RV32IM::VIRTUAL_ASSERT_WORD_ALIGNMENT |
RV32IM::VIRTUAL_ASSERT_VALID_SIGNED_REMAINDER |
RV32IM::VIRTUAL_ASSERT_VALID_UNSIGNED_REMAINDER |
RV32IM::VIRTUAL_ASSERT_VALID_DIV0,
RV32IM::VIRTUAL_ASSERT_VALID_DIV0 |
// SW and LW perform a `AssertAlignedMemoryAccessInstruction` lookup
RV32IM::SW |
RV32IM::LW
);

// All instructions in virtual sequence are mapped from the same
Expand Down Expand Up @@ -364,14 +367,15 @@ impl RVTraceRow {
}

pub fn imm_u32(&self) -> u32 {
match self.instruction.opcode.instruction_type() {
RV32InstructionFormat::R => unimplemented!("R type does not use imm u32"),
RV32InstructionFormat::I => self.instruction.imm.unwrap() as u64 as u32,
RV32InstructionFormat::U => self.instruction.imm.unwrap() as u64 as u32,
RV32InstructionFormat::S => unimplemented!("S type does not use imm u32"),
RV32InstructionFormat::UJ => self.instruction.imm.unwrap() as u64 as u32,
_ => unimplemented!(),
}
self.instruction.imm.unwrap() as u64 as u32
// match self.instruction.opcode.instruction_type() {
// RV32InstructionFormat::R => unimplemented!("R type does not use imm u32"),
// RV32InstructionFormat::I => self.instruction.imm.unwrap() as u64 as u32,
// RV32InstructionFormat::U => self.instruction.imm.unwrap() as u64 as u32,
// RV32InstructionFormat::S => unimplemented!("S type does not use imm u32"),
// RV32InstructionFormat::UJ => self.instruction.imm.unwrap() as u64 as u32,
// _ => unimplemented!(),
// }
}
}

Expand Down Expand Up @@ -440,7 +444,6 @@ pub enum RV32IM {
VIRTUAL_ASSERT_EQ,
VIRTUAL_ASSERT_VALID_DIV0,
VIRTUAL_ASSERT_HALFWORD_ALIGNMENT,
VIRTUAL_ASSERT_WORD_ALIGNMENT,
}

impl FromStr for RV32IM {
Expand Down Expand Up @@ -549,7 +552,6 @@ impl RV32IM {
RV32IM::SLTIU |
RV32IM::VIRTUAL_MOVE |
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT |
RV32IM::VIRTUAL_ASSERT_WORD_ALIGNMENT |
RV32IM::VIRTUAL_MOVSIGN => RV32InstructionFormat::I,

RV32IM::LB |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,21 @@ use super::{JoltInstruction, SubtableIndices};
use crate::{
field::JoltField,
jolt::subtable::{low_bit::LowBitSubtable, LassoSubtable},
utils::instruction_utils::chunk_operand_usize,
utils::instruction_utils::{add_and_chunk_operands, assert_valid_parameters},
};

/// (address, offset)
#[derive(Copy, Clone, Default, Debug, Serialize, Deserialize, PartialEq)]
pub struct AssertAlignedMemoryAccessInstruction<const WORD_SIZE: usize, const ALIGN: usize>(
pub u64,
pub u64,
);

impl<const WORD_SIZE: usize, const ALIGN: usize> JoltInstruction
for AssertAlignedMemoryAccessInstruction<WORD_SIZE, ALIGN>
{
fn operands(&self) -> (u64, u64) {
(self.0, 0)
(self.0, self.1)
}

fn combine_lookups<F: JoltField>(&self, vals: &[F], _: usize, _: usize) -> F {
Expand Down Expand Up @@ -71,18 +73,25 @@ impl<const WORD_SIZE: usize, const ALIGN: usize> JoltInstruction
}

fn to_indices(&self, C: usize, log_M: usize) -> Vec<usize> {
chunk_operand_usize(self.0, C, log_M)
assert_valid_parameters(WORD_SIZE, C, log_M);
add_and_chunk_operands(self.0 as u128, self.1 as u128, C, log_M)
}

fn lookup_entry(&self) -> u64 {
(self.0 % ALIGN as u64 == 0) as u64
if WORD_SIZE == 32 {
((self.0 as u32 as i32 + self.1 as u32 as i32) % ALIGN as i32 == 0) as u64
} else if WORD_SIZE == 64 {
((self.0 as i64 + self.1 as i64) % ALIGN as i64 == 0) as u64
} else {
panic!("Only 32-bit and 64-bit word sizes are supported");
}
}

fn random(&self, rng: &mut StdRng) -> Self {
if WORD_SIZE == 32 {
Self(rng.next_u32() as u64)
Self(rng.next_u32() as u64, (rng.next_u32() % (1 << 12)) as u64)
} else if WORD_SIZE == 64 {
Self(rng.next_u64())
Self(rng.next_u64(), rng.next_u64() % (1 << 12))
} else {
panic!("Only 32-bit and 64-bit word sizes are supported");
}
Expand All @@ -109,13 +118,15 @@ mod test {
// ALIGN = 2
for _ in 0..256 {
let x = rng.next_u64();
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(x);
let imm = rng.next_u64() as i64 % (1 << 12);
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(x, imm as u64);
jolt_instruction_test!(instruction);
}
// ALIGN = 4
for _ in 0..256 {
let x = rng.next_u64();
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 4>(x);
let imm = rng.next_u64() as i64 % (1 << 12);
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 4>(x, imm as u64);
jolt_instruction_test!(instruction);
}
}
Expand All @@ -130,13 +141,15 @@ mod test {
// ALIGN = 2
for _ in 0..256 {
let x = rng.next_u64();
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(x);
let imm = rng.next_u64() as i64 % (1 << 12);
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(x, imm as u64);
jolt_instruction_test!(instruction);
}
// ALIGN = 4
for _ in 0..256 {
let x = rng.next_u64();
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 4>(x);
let imm = rng.next_u64() as i64 % (1 << 12);
let instruction = AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 4>(x, imm as u64);
jolt_instruction_test!(instruction);
}
}
Expand Down
27 changes: 14 additions & 13 deletions jolt-core/src/jolt/instruction/virtual_lh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,43 +36,44 @@ impl<const WORD_SIZE: usize> VirtualInstructionSequence for VirtualLHInstruction
_ => panic!("Unsupported WORD_SIZE: {}", WORD_SIZE),
};

let ram_address = ADDInstruction::<WORD_SIZE>(rs1_val, offset_unsigned).lookup_entry();
let is_aligned =
AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(rs1_val, offset_unsigned)
.lookup_entry();
debug_assert_eq!(is_aligned, 1);
virtual_trace.push(RVTraceRow {
instruction: ELFInstruction {
address: trace_row.instruction.address,
opcode: RV32IM::ADDI,
opcode: RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT,
rs1,
rs2: None,
rd: v_address,
rd: None,
imm: Some(offset),
virtual_sequence_remaining: Some(Self::SEQUENCE_LENGTH - virtual_trace.len() - 1),
},
register_state: RegisterState {
rs1_val: Some(rs1_val),
rs2_val: None,
rd_post_val: Some(ram_address),
rd_post_val: None,
},
memory_state: None,
advice_value: None,
});

let is_aligned =
AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(ram_address).lookup_entry();
debug_assert_eq!(is_aligned, 1);
let ram_address = ADDInstruction::<WORD_SIZE>(rs1_val, offset_unsigned).lookup_entry();
virtual_trace.push(RVTraceRow {
instruction: ELFInstruction {
address: trace_row.instruction.address,
opcode: RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT,
rs1: v_address,
opcode: RV32IM::ADDI,
rs1,
rs2: None,
rd: None,
imm: None,
rd: v_address,
imm: Some(offset),
virtual_sequence_remaining: Some(Self::SEQUENCE_LENGTH - virtual_trace.len() - 1),
},
register_state: RegisterState {
rs1_val: Some(ram_address),
rs1_val: Some(rs1_val),
rs2_val: None,
rd_post_val: None,
rd_post_val: Some(ram_address),
},
memory_state: None,
advice_value: None,
Expand Down
27 changes: 14 additions & 13 deletions jolt-core/src/jolt/instruction/virtual_lhu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,43 +36,44 @@ impl<const WORD_SIZE: usize> VirtualInstructionSequence for VirtualLHUInstructio
_ => panic!("Unsupported WORD_SIZE: {}", WORD_SIZE),
};

let ram_address = ADDInstruction::<WORD_SIZE>(rs1_val, offset_unsigned).lookup_entry();
let is_aligned =
AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(rs1_val, offset_unsigned)
.lookup_entry();
debug_assert_eq!(is_aligned, 1);
virtual_trace.push(RVTraceRow {
instruction: ELFInstruction {
address: trace_row.instruction.address,
opcode: RV32IM::ADDI,
opcode: RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT,
rs1,
rs2: None,
rd: v_address,
rd: None,
imm: Some(offset),
virtual_sequence_remaining: Some(Self::SEQUENCE_LENGTH - virtual_trace.len() - 1),
},
register_state: RegisterState {
rs1_val: Some(rs1_val),
rs2_val: None,
rd_post_val: Some(ram_address),
rd_post_val: None,
},
memory_state: None,
advice_value: None,
});

let is_aligned =
AssertAlignedMemoryAccessInstruction::<WORD_SIZE, 2>(ram_address).lookup_entry();
debug_assert_eq!(is_aligned, 1);
let ram_address = ADDInstruction::<WORD_SIZE>(rs1_val, offset_unsigned).lookup_entry();
virtual_trace.push(RVTraceRow {
instruction: ELFInstruction {
address: trace_row.instruction.address,
opcode: RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT,
rs1: v_address,
opcode: RV32IM::ADDI,
rs1,
rs2: None,
rd: None,
imm: None,
rd: v_address,
imm: Some(offset),
virtual_sequence_remaining: Some(Self::SEQUENCE_LENGTH - virtual_trace.len() - 1),
},
register_state: RegisterState {
rs1_val: Some(ram_address),
rs1_val: Some(rs1_val),
rs2_val: None,
rd_post_val: None,
rd_post_val: Some(ram_address),
},
memory_state: None,
advice_value: None,
Expand Down
19 changes: 8 additions & 11 deletions jolt-core/src/jolt/trace/rv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use crate::jolt::instruction::virtual_move::MOVEInstruction;
use crate::jolt::instruction::xor::XORInstruction;
use crate::jolt::instruction::{add::ADDInstruction, virtual_movsign::MOVSIGNInstruction};
use crate::jolt::vm::rv32i_vm::RV32I;
use common::rv_trace::{ELFInstruction, MemoryState, RVTraceRow, RV32IM};
use common::rv_trace::{ELFInstruction, RVTraceRow, RV32IM};

impl TryFrom<&ELFInstruction> for RV32I {
type Error = &'static str;
Expand Down Expand Up @@ -76,7 +76,9 @@ impl TryFrom<&ELFInstruction> for RV32I {
RV32IM::VIRTUAL_ASSERT_VALID_SIGNED_REMAINDER => Ok(AssertValidSignedRemainderInstruction::default().into()),
RV32IM::VIRTUAL_ASSERT_VALID_DIV0 => Ok(AssertValidDiv0Instruction::default().into()),
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT => Ok(AssertAlignedMemoryAccessInstruction::<32, 2>::default().into()),
RV32IM::VIRTUAL_ASSERT_WORD_ALIGNMENT => Ok(AssertAlignedMemoryAccessInstruction::<32, 4>::default().into()),

RV32IM::LW => Ok(AssertAlignedMemoryAccessInstruction::<32, 4>::default().into()),
RV32IM::SW => Ok(AssertAlignedMemoryAccessInstruction::<32, 4>::default().into()),

_ => Err("No corresponding RV32I instruction")
}
Expand Down Expand Up @@ -133,17 +135,12 @@ impl TryFrom<&RVTraceRow> for RV32I {
RV32IM::VIRTUAL_ASSERT_VALID_UNSIGNED_REMAINDER => Ok(AssertValidUnsignedRemainderInstruction(row.register_state.rs1_val.unwrap(), row.register_state.rs2_val.unwrap()).into()),
RV32IM::VIRTUAL_ASSERT_VALID_SIGNED_REMAINDER => Ok(AssertValidSignedRemainderInstruction(row.register_state.rs1_val.unwrap(), row.register_state.rs2_val.unwrap()).into()),
RV32IM::VIRTUAL_ASSERT_VALID_DIV0 => Ok(AssertValidDiv0Instruction(row.register_state.rs1_val.unwrap(), row.register_state.rs2_val.unwrap()).into()),
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT => Ok(AssertAlignedMemoryAccessInstruction::<32, 2>(row.register_state.rs1_val.unwrap()).into()),
RV32IM::VIRTUAL_ASSERT_WORD_ALIGNMENT => Ok(AssertAlignedMemoryAccessInstruction::<32, 4>(row.register_state.rs1_val.unwrap()).into()),
RV32IM::VIRTUAL_ASSERT_HALFWORD_ALIGNMENT => Ok(AssertAlignedMemoryAccessInstruction::<32, 2>(row.register_state.rs1_val.unwrap(), row.imm_u32() as u64).into()),

RV32IM::LW => Ok(AssertAlignedMemoryAccessInstruction::<32, 4>(row.register_state.rs1_val.unwrap(), row.imm_u32() as u64).into()),
RV32IM::SW => Ok(AssertAlignedMemoryAccessInstruction::<32, 4>(row.register_state.rs1_val.unwrap(), row.imm_u32() as u64).into()),

_ => Err("No corresponding RV32I instruction")
}
}
}

fn load_value(row: &RVTraceRow) -> u64 {
match row.memory_state.as_ref().unwrap() {
MemoryState::Read { address: _, value } => *value,
_ => panic!("Unexpected Write"),
}
}
21 changes: 9 additions & 12 deletions jolt-core/src/r1cs/constraints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,14 @@ impl<const C: usize, F: JoltField> R1CSConstraints<C, F> for JoltRV32IMConstrain
let packed_query =
R1CSBuilder::<C, F, JoltR1CSInputs>::pack_be(query_chunks.clone(), LOG_M);

cs.constrain_eq_conditional(
JoltR1CSInputs::InstructionFlags(ADDInstruction::default().into()),
packed_query.clone(),
x + y,
);
let add_operands = JoltR1CSInputs::InstructionFlags(ADDInstruction::default().into())
+ JoltR1CSInputs::InstructionFlags(
AssertAlignedMemoryAccessInstruction::<32, 2>::default().into(),
)
+ JoltR1CSInputs::InstructionFlags(
AssertAlignedMemoryAccessInstruction::<32, 4>::default().into(),
);
cs.constrain_eq_conditional(add_operands, packed_query.clone(), x + y);
// Converts from unsigned to twos-complement representation
cs.constrain_eq_conditional(
JoltR1CSInputs::InstructionFlags(SUBInstruction::default().into()),
Expand All @@ -137,13 +140,7 @@ impl<const C: usize, F: JoltField> R1CSConstraints<C, F> for JoltRV32IMConstrain
cs.constrain_eq_conditional(is_mul, packed_query.clone(), product);
cs.constrain_eq_conditional(
JoltR1CSInputs::InstructionFlags(MOVSIGNInstruction::default().into())
+ JoltR1CSInputs::InstructionFlags(MOVEInstruction::default().into())
+ JoltR1CSInputs::InstructionFlags(
AssertAlignedMemoryAccessInstruction::<32, 2>::default().into(),
)
+ JoltR1CSInputs::InstructionFlags(
AssertAlignedMemoryAccessInstruction::<32, 4>::default().into(),
),
+ JoltR1CSInputs::InstructionFlags(MOVEInstruction::default().into()),
packed_query.clone(),
x,
);
Expand Down

0 comments on commit b97a1df

Please sign in to comment.