Skip to content

Commit

Permalink
Merge pull request risc0#67 from taikoxyz/guest-blob-decoding
Browse files Browse the repository at this point in the history
Move blob decoding to the guest as well
  • Loading branch information
Brechtpd authored Mar 23, 2024
2 parents 9b31f9d + 0a92f96 commit 98284f8
Show file tree
Hide file tree
Showing 3 changed files with 122 additions and 106 deletions.
1 change: 1 addition & 0 deletions lib/src/builder/execute.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ impl TxExecStrategy for TkoTxExecStrategy {

// generate the transactions from the tx list
let mut transactions = generate_transactions(
block_builder.input.taiko.block_proposed.meta.blobUsed,
&block_builder.input.taiko.tx_list,
serde_json::from_str(&block_builder.input.taiko.anchor_tx.clone()).unwrap(),
);
Expand Down
116 changes: 115 additions & 1 deletion lib/src/taiko_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,18 @@ pub fn decode_transactions(tx_list: &[u8]) -> Vec<TxEnvelope> {
alloy_rlp::Decodable::decode(&mut &tx_list.to_owned()[..]).unwrap_or_default()
}

pub fn generate_transactions(tx_list: &[u8], anchor_tx: AlloyTransaction) -> Vec<TxEnvelope> {
pub fn generate_transactions(
is_blob_data: bool,
tx_list: &[u8],
anchor_tx: AlloyTransaction,
) -> Vec<TxEnvelope> {
// Decode the tx list from the raw data posted onchain
let tx_list = &if is_blob_data {
decode_blob_data(tx_list)
} else {
tx_list.to_owned()
};

// Decode the transactions from the tx list
let mut transactions = decode_transactions(tx_list);
// Create a tx from the anchor tx that has the same type as the transactions encoded from
Expand Down Expand Up @@ -62,6 +73,109 @@ pub fn generate_transactions(tx_list: &[u8], anchor_tx: AlloyTransaction) -> Vec
transactions
}

const BLOB_FIELD_ELEMENT_NUM: usize = 4096;
const BLOB_FIELD_ELEMENT_BYTES: usize = 32;
const BLOB_DATA_CAPACITY: usize = BLOB_FIELD_ELEMENT_NUM * BLOB_FIELD_ELEMENT_BYTES;
const BLOB_VERSION_OFFSET: usize = 1;
const BLOB_ENCODING_VERSION: u8 = 0;
const MAX_BLOB_DATA_SIZE: usize = (4 * 31 + 3) * 1024 - 4;

// decoding https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/blob.go
fn decode_blob_data(blob_buf: &[u8]) -> Vec<u8> {
// check the version
if blob_buf[BLOB_VERSION_OFFSET] != BLOB_ENCODING_VERSION {
return Vec::new();
}

// decode the 3-byte big-endian length value into a 4-byte integer
let output_len =
((blob_buf[2] as u32) << 16 | (blob_buf[3] as u32) << 8 | (blob_buf[4] as u32)) as usize;
if output_len > MAX_BLOB_DATA_SIZE {
return Vec::new();
}

// round 0 is special cased to copy only the remaining 27 bytes of the first field element
// into the output due to version/length encoding already occupying its first 5 bytes.
let mut output = [0; MAX_BLOB_DATA_SIZE];
output[0..27].copy_from_slice(&blob_buf[5..32]);

// now process remaining 3 field elements to complete round 0
let mut opos: usize = 28; // current position into output buffer
let mut ipos: usize = 32; // current position into the input blob
let mut encoded_byte: [u8; 4] = [0; 4]; // buffer for the 4 6-bit chunks
encoded_byte[0] = blob_buf[0];
for encoded_byte_i in encoded_byte.iter_mut().skip(1) {
(*encoded_byte_i, opos, ipos) =
match decode_field_element(&blob_buf, opos, ipos, &mut output) {
Ok(res) => res,
Err(_) => return Vec::new(),
}
}
opos = reassemble_bytes(opos, &encoded_byte, &mut output);

// in each remaining round we decode 4 field elements (128 bytes) of the input into 127
// bytes of output
for _ in 1..1024 {
if opos < output_len {
for encoded_byte_j in &mut encoded_byte {
// save the first byte of each field element for later re-assembly
(*encoded_byte_j, opos, ipos) =
match decode_field_element(&blob_buf, opos, ipos, &mut output) {
Ok(res) => res,
Err(_) => return Vec::new(),
}
}
opos = reassemble_bytes(opos, &encoded_byte, &mut output)
}
}
for otailing in output.iter().skip(output_len) {
if *otailing != 0 {
return Vec::new();
}
}
for itailing in blob_buf.iter().take(BLOB_DATA_CAPACITY).skip(ipos) {
if *itailing != 0 {
return Vec::new();
}
}
output[0..output_len].to_vec()
}

fn decode_field_element(
b: &[u8],
opos: usize,
ipos: usize,
output: &mut [u8],
) -> Result<(u8, usize, usize)> {
// two highest order bits of the first byte of each field element should always be 0
if b[ipos] & 0b1100_0000 != 0 {
return Err(anyhow::anyhow!(
"ErrBlobInvalidFieldElement: field element: {}",
ipos
));
}
// copy(output[opos:], b[ipos+1:ipos+32])
output[opos..opos + 31].copy_from_slice(&b[ipos + 1..ipos + 32]);
Ok((b[ipos], opos + 32, ipos + 32))
}

fn reassemble_bytes(
opos: usize,
encoded_byte: &[u8; 4],
output: &mut [u8; MAX_BLOB_DATA_SIZE],
) -> usize {
// account for fact that we don't output a 128th byte
let opos = opos - 1;
let x = (encoded_byte[0] & 0b0011_1111) | ((encoded_byte[1] & 0b0011_0000) << 2);
let y = (encoded_byte[1] & 0b0000_1111) | ((encoded_byte[3] & 0b0000_1111) << 4);
let z = (encoded_byte[2] & 0b0011_1111) | ((encoded_byte[3] & 0b0011_0000) << 2);
// put the re-assembled bytes in their appropriate output locations
output[opos - 32] = z;
output[opos - (32 * 2)] = y;
output[opos - (32 * 3)] = x;
opos
}

const GX1: Lazy<U256> =
Lazy::new(|| uint!(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798_U256));
const N: Lazy<U256> =
Expand Down
111 changes: 6 additions & 105 deletions raiko-host/src/host/host.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,15 +118,16 @@ pub fn preflight(
.cloned()
.collect::<Vec<GetBlobData>>();
assert!(!tx_blobs.is_empty());
(decode_blob_data(&tx_blobs[0].blob), Some(blob_hash))
(blob_to_bytes(&tx_blobs[0].blob), Some(blob_hash))
} else {
// Get the tx list data directly from the propose transaction data
let proposal_call = proposeBlockCall::abi_decode(&proposal_tx.input, false).unwrap();
(proposal_call.txList.clone(), None)
};

// Create the transactions from the proposed tx list
let transactions = generate_transactions(&tx_list, anchor_tx.clone());
let transactions =
generate_transactions(proposal_event.meta.blobUsed, &tx_list, anchor_tx.clone());
// Do a sanity check using the transactions returned by the node
println!("Block transactions: {:?}", block.transactions.len());
assert!(
Expand Down Expand Up @@ -202,112 +203,11 @@ pub fn preflight(
})
}

const BLOB_FIELD_ELEMENT_NUM: usize = 4096;
const BLOB_FIELD_ELEMENT_BYTES: usize = 32;
const BLOB_DATA_CAPACITY: usize = BLOB_FIELD_ELEMENT_NUM * BLOB_FIELD_ELEMENT_BYTES;
const BLOB_VERSION_OFFSET: usize = 1;
const BLOB_ENCODING_VERSION: u8 = 0;
const MAX_BLOB_DATA_SIZE: usize = (4 * 31 + 3) * 1024 - 4;

// decoding https://github.com/ethereum-optimism/optimism/blob/develop/op-service/eth/blob.go
fn decode_blob_data(blob_str: &str) -> Vec<u8> {
let blob_buf: Vec<u8> = match hex::decode(blob_str.to_lowercase().trim_start_matches("0x")) {
fn blob_to_bytes(blob_str: &str) -> Vec<u8> {
match hex::decode(blob_str.to_lowercase().trim_start_matches("0x")) {
Ok(b) => b,
Err(_) => return Vec::new(),
};

// check the version
if blob_buf[BLOB_VERSION_OFFSET] != BLOB_ENCODING_VERSION {
return Vec::new();
}

// decode the 3-byte big-endian length value into a 4-byte integer
let output_len =
((blob_buf[2] as u32) << 16 | (blob_buf[3] as u32) << 8 | (blob_buf[4] as u32)) as usize;
if output_len > MAX_BLOB_DATA_SIZE {
return Vec::new();
}

// round 0 is special cased to copy only the remaining 27 bytes of the first field element
// into the output due to version/length encoding already occupying its first 5 bytes.
let mut output = [0; MAX_BLOB_DATA_SIZE];
output[0..27].copy_from_slice(&blob_buf[5..32]);

// now process remaining 3 field elements to complete round 0
let mut opos: usize = 28; // current position into output buffer
let mut ipos: usize = 32; // current position into the input blob
let mut encoded_byte: [u8; 4] = [0; 4]; // buffer for the 4 6-bit chunks
encoded_byte[0] = blob_buf[0];
for encoded_byte_i in encoded_byte.iter_mut().skip(1) {
(*encoded_byte_i, opos, ipos) =
match decode_field_element(&blob_buf, opos, ipos, &mut output) {
Ok(res) => res,
Err(_) => return Vec::new(),
}
}
opos = reassemble_bytes(opos, &encoded_byte, &mut output);

// in each remaining round we decode 4 field elements (128 bytes) of the input into 127
// bytes of output
for _ in 1..1024 {
if opos < output_len {
for encoded_byte_j in &mut encoded_byte {
// save the first byte of each field element for later re-assembly
(*encoded_byte_j, opos, ipos) =
match decode_field_element(&blob_buf, opos, ipos, &mut output) {
Ok(res) => res,
Err(_) => return Vec::new(),
}
}
opos = reassemble_bytes(opos, &encoded_byte, &mut output)
}
}
for otailing in output.iter().skip(output_len) {
if *otailing != 0 {
return Vec::new();
}
}
for itailing in blob_buf.iter().take(BLOB_DATA_CAPACITY).skip(ipos) {
if *itailing != 0 {
return Vec::new();
}
}
output[0..output_len].to_vec()
}

fn decode_field_element(
b: &[u8],
opos: usize,
ipos: usize,
output: &mut [u8],
) -> Result<(u8, usize, usize)> {
// two highest order bits of the first byte of each field element should always be 0
if b[ipos] & 0b1100_0000 != 0 {
return Err(anyhow::anyhow!(
"ErrBlobInvalidFieldElement: field element: {}",
ipos
));
}
// copy(output[opos:], b[ipos+1:ipos+32])
output[opos..opos + 31].copy_from_slice(&b[ipos + 1..ipos + 32]);
Ok((b[ipos], opos + 32, ipos + 32))
}

fn reassemble_bytes(
opos: usize,
encoded_byte: &[u8; 4],
output: &mut [u8; MAX_BLOB_DATA_SIZE],
) -> usize {
// account for fact that we don't output a 128th byte
let opos = opos - 1;
let x = (encoded_byte[0] & 0b0011_1111) | ((encoded_byte[1] & 0b0011_0000) << 2);
let y = (encoded_byte[1] & 0b0000_1111) | ((encoded_byte[3] & 0b0000_1111) << 4);
let z = (encoded_byte[2] & 0b0011_1111) | ((encoded_byte[3] & 0b0011_0000) << 2);
// put the re-assembled bytes in their appropriate output locations
output[opos - 32] = z;
output[opos - (32 * 2)] = y;
output[opos - (32 * 3)] = x;
opos
}

fn calc_blob_versioned_hash(blob_str: &str) -> [u8; 32] {
Expand All @@ -319,6 +219,7 @@ fn calc_blob_versioned_hash(blob_str: &str) -> [u8; 32] {
let version_hash: [u8; 32] = kzg_to_versioned_hash(kzg_commit).0;
version_hash
}

fn get_blob_data(beacon_rpc_url: &str, block_id: u64) -> Result<GetBlobsResponse> {
let tokio_handle = tokio::runtime::Handle::current();
tokio_handle.block_on(async {
Expand Down

0 comments on commit 98284f8

Please sign in to comment.