From 9a12043507c2e117fdd77ff07e8b1c905cc88dd6 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Mon, 19 Aug 2024 21:53:50 -0400 Subject: [PATCH] more tests --- Cargo.lock | 4 + crates/core/app/src/app/mod.rs | 1 + crates/core/app/src/server/consensus.rs | 35 ++ .../app/tests/common/ibc_tests/relayer.rs | 314 ++++++++++++------ crates/core/app/tests/ibc_handshake.rs | 238 ++++++++++--- .../ibc/src/component/rpc/client_query.rs | 5 + crates/test/mock-consensus/src/abci.rs | 8 - crates/test/mock-consensus/src/block.rs | 48 ++- .../test/mock-tendermint-proxy/src/proxy.rs | 23 +- 9 files changed, 475 insertions(+), 201 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 77a87f1f0b..a3a901332b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4568,6 +4568,7 @@ dependencies = [ "tap", "tempfile", "tendermint", + "tendermint-config", "tendermint-light-client-verifier", "tendermint-proto", "tokio", @@ -5170,6 +5171,8 @@ dependencies = [ "hex", "prost", "rand_core", + "serde", + "serde_json", "sha2 0.10.8", "tap", "tendermint", @@ -5182,6 +5185,7 @@ dependencies = [ name = "penumbra-mock-tendermint-proxy" version = "0.80.2" dependencies = [ + "hex", "pbjson-types", "penumbra-mock-consensus", "penumbra-proto", diff --git a/crates/core/app/src/app/mod.rs b/crates/core/app/src/app/mod.rs index cec052d4ea..967e949813 100644 --- a/crates/core/app/src/app/mod.rs +++ b/crates/core/app/src/app/mod.rs @@ -628,6 +628,7 @@ impl App { } tracing::debug!(?jmt_root, "finished committing state"); + println!("finished committing state {}", hex::encode(jmt_root)); // Get the latest version of the state, now that we've committed it. self.state = Arc::new(StateDelta::new(storage.latest_snapshot())); diff --git a/crates/core/app/src/server/consensus.rs b/crates/core/app/src/server/consensus.rs index 35fa555b09..aedfd45d8a 100644 --- a/crates/core/app/src/server/consensus.rs +++ b/crates/core/app/src/server/consensus.rs @@ -187,7 +187,26 @@ impl Consensus { // We don't need to print the block height, because it will already be // included in the span modeling the abci request handling. tracing::info!(time = ?begin_block.header.time, "beginning block"); + + let storage_revision_height = self.storage.latest_snapshot().version(); + let storage_root = self.storage.latest_snapshot().root_hash().await?; + println!( + "BEFORE begin_block {} storage height is {} and storage root is {}", + begin_block.header.height, + storage_revision_height, + hex::encode(storage_root.0) + ); let events = self.app.begin_block(&begin_block).await; + + let storage_revision_height = self.storage.latest_snapshot().version(); + let storage_root = self.storage.latest_snapshot().root_hash().await?; + println!( + "AFTER begin_block {} storage height is {} and storage root is {}", + begin_block.header.height, + storage_revision_height, + hex::encode(storage_root.0) + ); + Ok(response::BeginBlock { events }) } @@ -240,9 +259,25 @@ impl Consensus { } async fn commit(&mut self) -> Result { + let storage_revision_height = self.storage.latest_snapshot().version(); + let storage_root = self.storage.latest_snapshot().root_hash().await?; + println!( + "BEFORE commit storage height is {} and storage root is {}", + storage_revision_height, + hex::encode(storage_root.0) + ); + let app_hash = self.app.commit(self.storage.clone()).await; tracing::info!(?app_hash, "committed block"); + let storage_revision_height = self.storage.latest_snapshot().version(); + let storage_root = self.storage.latest_snapshot().root_hash().await?; + println!( + "AFTER commit storage height is {} and storage root is {}", + storage_revision_height, + hex::encode(storage_root.0) + ); + Ok(response::Commit { data: app_hash.0.to_vec().into(), retain_height: 0u32.into(), diff --git a/crates/core/app/tests/common/ibc_tests/relayer.rs b/crates/core/app/tests/common/ibc_tests/relayer.rs index 5d8fa7ca58..0d50b9c619 100644 --- a/crates/core/app/tests/common/ibc_tests/relayer.rs +++ b/crates/core/app/tests/common/ibc_tests/relayer.rs @@ -32,6 +32,7 @@ use { penumbra_proto::{util::tendermint_proxy::v1::GetBlockByHeightRequest, DomainType}, penumbra_stake::state_key::chain, penumbra_transaction::{TransactionParameters, TransactionPlan}, + prost::Message as _, sha2::Digest, std::time::Duration, tendermint::Time, @@ -365,7 +366,7 @@ impl MockRelayer { } // TODO: copypaste not important to fix rn - pub async fn _build_and_send_update_client_b(&mut self) -> Result<()> { + pub async fn _build_and_send_update_client_b(&mut self, target_height: Height) -> Result<()> { tracing::info!( "send update client for chain {} to chain {}", self.chain_a_ibc.chain_id, @@ -374,23 +375,87 @@ impl MockRelayer { let chain_a_ibc = &mut self.chain_b_ibc; let chain_b_ibc = &mut self.chain_a_ibc; - // Fetch validators from chain B - let chain_b_height = chain_b_ibc.get_latest_height().await?; - println!("chain_b latest height: {:?}", chain_b_height); + let consensus_state = self + .chain_b_ibc + .ibc_client_query_client + .consensus_state(QueryConsensusStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + revision_number: target_height.revision_number, + revision_height: target_height.revision_height, + latest_height: false, + }) + .await? + .into_inner(); + + if let Some(consensus_state) = consensus_state.consensus_state { + tracing::info!( + "consensus state already exists at height {target_height}, skipping update" + ); + tracing::trace!(?consensus_state, "consensus state"); + return Ok(()); + } + + let mut src_application_latest_height = self.chain_a_ibc.get_latest_height().await?; + // Wait for the source network to produce block(s) & reach `target_height`. + while src_application_latest_height < target_height { + // advance both blocks + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + src_application_latest_height = self.chain_a_ibc.get_latest_height().await?; + } + + // Get the latest client state on destination. + let client_state_of_a_on_b_response = self + .chain_b_ibc + .ibc_client_query_client + .client_state(QueryClientStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + }) + .await? + .into_inner(); + + let client_latest_height = + ibc_types::lightclients::tendermint::client_state::ClientState::try_from( + client_state_of_a_on_b_response + .clone() + .client_state + .unwrap(), + )? + .latest_height; + let trusted_height = if client_latest_height < target_height { + client_latest_height + } else { + panic!("unsupported, no sending updates to the past"); + }; + + if trusted_height >= target_height { + tracing::warn!( + "skipping update: trusted height ({}) >= chain target height ({})", + trusted_height, + target_height + ); + + return Ok(()); + } + + println!("target chain b height: {:?}", target_height); let chain_b_latest_block: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = - chain_b_ibc + self.chain_b_ibc .tendermint_proxy_service_client .get_block_by_height(GetBlockByHeightRequest { - height: chain_b_height.revision_height.try_into()?, + height: target_height.revision_height.try_into()?, }) .await? .into_inner(); // Look up the last recorded consensus state for the counterparty client on chain A // to determine the last trusted height. - let prev_counterparty_consensus_state = chain_a_ibc - .get_prev_counterparty_consensus_state(&chain_a_ibc.client_id, &chain_b_height) - .await?; + // let prev_counterparty_consensus_state = + // ConsensusState::try_from(consensus_state.consensus_state.unwrap())?; + // let prev_counterparty_consensus_state = self + // .chain_a_ibc + // .get_prev_counterparty_consensus_state(&self.chain_a_ibc.client_id, &chain_b_height) + // .await?; println!( "Telling chain a about chain b latest block: {}", hex::encode(chain_b_latest_block.clone().block_id.unwrap().hash) @@ -427,15 +492,13 @@ impl MockRelayer { ); let plan = { let ibc_msg = IbcRelay::UpdateClient(MsgUpdateClient { - signer: chain_b_ibc.signer.clone(), - client_id: chain_a_ibc.client_id.clone(), - client_message: chain_b_ibc + signer: self.chain_b_ibc.signer.clone(), + client_id: self.chain_a_ibc.client_id.clone(), + client_message: self + .chain_b_ibc // The TendermintHeader is derived from the Block // and represents chain B's claims about its current state. - .create_tendermint_header( - prev_counterparty_consensus_state.map(|(height, cs)| height), - chain_b_latest_block, - )? + .create_tendermint_header(Some(trusted_height), chain_b_latest_block)? .into(), }) .into(); @@ -445,15 +508,15 @@ impl MockRelayer { memo: None, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { - chain_id: chain_a_ibc.chain_id.clone(), + chain_id: self.chain_a_ibc.chain_id.clone(), ..Default::default() }, } }; - let tx = chain_a_ibc.client.witness_auth_build(&plan).await?; + let tx = self.chain_a_ibc.client.witness_auth_build(&plan).await?; // Execute the transaction, applying it to the chain state. - chain_a_ibc + self.chain_a_ibc .node .block() .with_data(vec![tx.encode_to_vec()]) @@ -463,32 +526,94 @@ impl MockRelayer { } // helper function to build UpdateClient to send to chain A - pub async fn _build_and_send_update_client_a(&mut self) -> Result<()> { + pub async fn _build_and_send_update_client_a(&mut self, target_height: Height) -> Result<()> { tracing::info!( "send update client for chain {} to chain {}", self.chain_b_ibc.chain_id, self.chain_a_ibc.chain_id ); - let chain_a_ibc = &mut self.chain_a_ibc; - let chain_b_ibc = &mut self.chain_b_ibc; - // Fetch validators from chain B - let chain_b_height = chain_b_ibc.get_latest_height().await?; - println!("chain_b latest height: {:?}", chain_b_height); + let consensus_state = self + .chain_b_ibc + .ibc_client_query_client + .consensus_state(QueryConsensusStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + revision_number: target_height.revision_number, + revision_height: target_height.revision_height, + latest_height: false, + }) + .await? + .into_inner(); + + if let Some(consensus_state) = consensus_state.consensus_state { + tracing::debug!( + "consensus state already exists at height {target_height}, skipping update" + ); + tracing::trace!(?consensus_state, "consensus state"); + return Ok(()); + } + + let mut src_application_latest_height = self.chain_a_ibc.get_latest_height().await?; + // Wait for the source network to produce block(s) & reach `target_height`. + while src_application_latest_height < target_height { + // advance both blocks + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + src_application_latest_height = self.chain_a_ibc.get_latest_height().await?; + } + + // Get the latest client state on destination. + let client_state_of_a_on_b_response = self + .chain_b_ibc + .ibc_client_query_client + .client_state(QueryClientStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + }) + .await? + .into_inner(); + + let client_latest_height = + ibc_types::lightclients::tendermint::client_state::ClientState::try_from( + client_state_of_a_on_b_response + .clone() + .client_state + .unwrap(), + )? + .latest_height; + let trusted_height = if client_latest_height < target_height { + client_latest_height + } else { + panic!("unsupported, no sending updates to the past"); + }; + + if trusted_height >= target_height { + tracing::warn!( + "skipping update: trusted height ({}) >= chain target height ({})", + trusted_height, + target_height + ); + + return Ok(()); + } + + println!("target chain b height: {:?}", target_height); let chain_b_latest_block: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = - chain_b_ibc + self.chain_b_ibc .tendermint_proxy_service_client .get_block_by_height(GetBlockByHeightRequest { - height: chain_b_height.revision_height.try_into()?, + height: target_height.revision_height.try_into()?, }) .await? .into_inner(); // Look up the last recorded consensus state for the counterparty client on chain A // to determine the last trusted height. - let prev_counterparty_consensus_state = chain_a_ibc - .get_prev_counterparty_consensus_state(&chain_a_ibc.client_id, &chain_b_height) - .await?; + // let prev_counterparty_consensus_state = + // ConsensusState::try_from(consensus_state.consensus_state.unwrap())?; + // let prev_counterparty_consensus_state = self + // .chain_a_ibc + // .get_prev_counterparty_consensus_state(&self.chain_a_ibc.client_id, &chain_b_height) + // .await?; println!( "Telling chain a about chain b latest block: {}", hex::encode(chain_b_latest_block.clone().block_id.unwrap().hash) @@ -525,15 +650,13 @@ impl MockRelayer { ); let plan = { let ibc_msg = IbcRelay::UpdateClient(MsgUpdateClient { - signer: chain_b_ibc.signer.clone(), - client_id: chain_a_ibc.client_id.clone(), - client_message: chain_b_ibc + signer: self.chain_b_ibc.signer.clone(), + client_id: self.chain_a_ibc.client_id.clone(), + client_message: self + .chain_b_ibc // The TendermintHeader is derived from the Block // and represents chain B's claims about its current state. - .create_tendermint_header( - prev_counterparty_consensus_state.map(|(height, cs)| height), - chain_b_latest_block, - )? + .create_tendermint_header(Some(trusted_height), chain_b_latest_block)? .into(), }) .into(); @@ -543,15 +666,15 @@ impl MockRelayer { memo: None, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { - chain_id: chain_a_ibc.chain_id.clone(), + chain_id: self.chain_a_ibc.chain_id.clone(), ..Default::default() }, } }; - let tx = chain_a_ibc.client.witness_auth_build(&plan).await?; + let tx = self.chain_a_ibc.client.witness_auth_build(&plan).await?; // Execute the transaction, applying it to the chain state. - chain_a_ibc + self.chain_a_ibc .node .block() .with_data(vec![tx.encode_to_vec()]) @@ -575,9 +698,13 @@ impl MockRelayer { .into_inner(); // Build message(s) for updating client on source - self._build_and_send_update_client_a().await?; + let src_client_target_height = self.chain_b_ibc.get_latest_height().await?; + self._build_and_send_update_client_a(src_client_target_height) + .await?; // Build message(s) for updating client on destination - self._build_and_send_update_client_b().await?; + let dst_client_target_height = self.chain_a_ibc.get_latest_height().await?; + self._build_and_send_update_client_b(dst_client_target_height) + .await?; let client_state_of_a_on_b_response = self .chain_b_ibc @@ -701,14 +828,27 @@ impl MockRelayer { self.chain_b_ibc.node.block().execute().await?; self._sync_chains().await?; + let src_connection = self + .chain_a_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: self.chain_a_ibc.connection_id.to_string(), + }) + .await? + .into_inner(); + + let src_client_target_height = self.chain_b_ibc.get_latest_height().await?; + let client_msgs = self + ._build_and_send_update_client_a(src_client_target_height) + .await?; + // Make sure chain B has a client state for this height println!("UPDATE1"); - self._build_and_send_update_client_b().await?; + // self._build_and_send_update_client_b().await?; // self._sync_chains().await?; // the height chain b's client for chain a should have state for - let chain_b_client_a_target_height = - self.chain_a_ibc.get_latest_height().await?.increment(); + let chain_b_client_a_target_height = self.chain_a_ibc.get_latest_height().await?; let client_state_of_b_on_a_response = self .chain_a_ibc @@ -793,43 +933,9 @@ impl MockRelayer { // Send an update to both sides to ensure they are up to date // Build message(s) for updating client on source // chain B needs to know about chain A at the proof height - let mut latest_client_state = - ibc_types::lightclients::tendermint::client_state::ClientState::try_from( - self.chain_b_ibc - .ibc_client_query_client - .client_state(QueryClientStateRequest { - client_id: self.chain_a_ibc.client_id.to_string(), - }) - .await? - .into_inner() - .client_state - .unwrap(), - )?; - while chain_b_client_a_target_height >= latest_client_state.latest_height { - println!("UPDATE2"); - self._build_and_send_update_client_b().await?; - println!("Gotta go fast"); - self.chain_a_ibc.node.block().execute().await?; - self.chain_b_ibc.node.block().execute().await?; - // self._sync_chains().await?; - - latest_client_state = - ibc_types::lightclients::tendermint::client_state::ClientState::try_from( - self.chain_b_ibc - .ibc_client_query_client - .client_state(QueryClientStateRequest { - client_id: self.chain_a_ibc.client_id.to_string(), - }) - .await? - .into_inner() - .client_state - .unwrap(), - )?; - } - println!( - "latest client state revision height: {:?}", - latest_client_state.latest_height.revision_height - ); + let proof_height = connection_of_b_on_a_response.proof_height.clone().unwrap(); + self._build_and_send_update_client_b(proof_height.try_into()?) + .await?; println!( "client state target height: {:?}", chain_b_client_a_target_height @@ -840,18 +946,6 @@ impl MockRelayer { self.chain_a_ibc.counterparty.connection_id = Some(self.chain_b_ibc.connection_id.clone()); // Build message(s) for updating client on destination - println!("UPDATE3"); - self._build_and_send_update_client_b().await?; - - let client_state_b_on_a = - ibc_types::lightclients::tendermint::client_state::ClientState::try_from( - client_state_of_b_on_a_response - .clone() - .client_state - .unwrap(), - )?; - - // wait until chain b is at least the height of the proofs println!( "proof height: {:?}", connection_of_b_on_a_response.proof_height @@ -865,6 +959,27 @@ impl MockRelayer { ); // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L943 + let mut latest_client_state = + ibc_types::lightclients::tendermint::client_state::ClientState::try_from( + self.chain_b_ibc + .ibc_client_query_client + .client_state(QueryClientStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + }) + .await? + .into_inner() + .client_state + .unwrap(), + )?; + + // chain b needs to know about chain a at the proof height + let proofs_height_on_a = connection_of_b_on_a_response + .proof_height + .expect("proof height") + .try_into()?; + self._build_and_send_update_client_b(proofs_height_on_a) + .await?; + let plan = { // This mocks the relayer constructing a connection open try message on behalf // of the counterparty chain. @@ -886,11 +1001,8 @@ impl MockRelayer { proof_conn_end_on_a, proof_client_state_of_b_on_a, proof_consensus_state_of_b_on_a, - proofs_height_on_a: connection_of_b_on_a_response - .proof_height - .expect("proof height") - .try_into()?, - consensus_height_of_b_on_a: client_state_b_on_a.latest_height, + proofs_height_on_a, + consensus_height_of_b_on_a: latest_client_state.latest_height, // this seems to be an optional proof proof_consensus_state_of_b: None, // deprecated @@ -982,7 +1094,9 @@ impl MockRelayer { // Build message(s) for updating client on destination println!("UPDATE4"); - self._build_and_send_update_client_b().await?; + let dst_client_target_height = self.chain_a_ibc.get_latest_height().await?; + self._build_and_send_update_client_b(dst_client_target_height) + .await?; let plan = { // This mocks the relayer constructing a connection open try message on behalf diff --git a/crates/core/app/tests/ibc_handshake.rs b/crates/core/app/tests/ibc_handshake.rs index b747518fb8..b2bbac492a 100644 --- a/crates/core/app/tests/ibc_handshake.rs +++ b/crates/core/app/tests/ibc_handshake.rs @@ -99,6 +99,128 @@ fn set_tracing_subscriber() -> tracing::subscriber::DefaultGuard { set_tracing_subscriber_with_env_filter(filter) } +// Snapshot version is used as the revision height in the IBC client_state query. +// Therefore we need to validate that the snapshot revision is the same as the +// Mock Tendermint height. +#[tokio::test] +async fn mocktendermint_snapshot_versions() -> anyhow::Result<()> { + let guard = set_tracing_subscriber(); + + let storage = TempStorage::new_with_penumbra_prefixes().await?; + + let proxy = penumbra_mock_tendermint_proxy::TestNodeProxy::new::(); + let mut node = { + let genesis = get_verified_genesis()?; + let consensus = Consensus::new(storage.clone()); + // Hardcoded keys for each chain for test reproducibility: + let sk_a = ed25519_consensus::SigningKey::from([0u8; 32]); + let vk_a = sk_a.verification_key(); + let keys = (sk_a, vk_a); + // let consensus = Consensus::new(storage.as_ref().clone()); + TestNode::builder() + .with_keys(vec![keys]) + .single_validator() + .with_tendermint_genesis(genesis) + .on_block(proxy.on_block_callback()) + .init_chain(consensus) + .await + .tap_ok(|e| tracing::info!(hash = %e.last_app_hash_hex(), "finished init chain"))? + }; + let grpc_url = "http://127.0.0.1:8081" // see #4517 + .parse::()? + .tap(|url| tracing::debug!(%url, "parsed grpc url")); + // Spawn the node's RPC server. + let _rpc_server = { + let make_svc = + penumbra_app::rpc::router(&storage, proxy, false /*enable_expensive_rpc*/)? + .into_router() + .layer(tower_http::cors::CorsLayer::permissive()) + .into_make_service() + .tap(|_| println!("initialized rpc service")); + let [addr] = grpc_url + .socket_addrs(|| None)? + .try_into() + .expect("grpc url can be turned into a socket address"); + let server = axum_server::bind(addr).serve(make_svc); + tokio::spawn(async { server.await.expect("grpc server returned an error") }) + .tap(|_| println!("grpc server is running")) + }; + time::sleep(time::Duration::from_secs(1)).await; + let channel = Channel::from_shared(grpc_url.to_string()) + .with_context(|| "could not parse node URI")? + .connect() + .await + .with_context(|| "could not connect to grpc server") + .tap_err(|error| tracing::error!(?error, "could not connect to grpc server"))?; + let mut tendermint_proxy_service_client = TendermintProxyServiceClient::new(channel.clone()); + + assert_eq!(u64::from(*node.height()), 0u64); + + // we're still on block 0, execute a block 1 with no transactions. + node.block().execute().await?; + + // block header 1 has now been created. + let block_1: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = + tendermint_proxy_service_client + .get_block_by_height(GetBlockByHeightRequest { + // get block height 1 + height: 1.into(), + }) + .await? + .into_inner(); + + assert_eq!(u64::from(*node.height()), 1u64); + + // we know the block 1 app_hash should always be 5c94f2eabd29ac36f5be7f812a586b5dd44c10d586d2bb1a18e3679801d1b5dd + // for the test genesis data + println!("block 1: {:?}", block_1); + assert_eq!( + hex::decode("5c94f2eabd29ac36f5be7f812a586b5dd44c10d586d2bb1a18e3679801d1b5dd")?, + block_1.block.unwrap().header.unwrap().app_hash + ); + + let snapshot = storage.latest_snapshot(); + let storage_revision_height = snapshot.version(); + + let saved_height = node.height().clone(); + // JMT storage revision height should always match the mock tendermint height + assert_eq!(u64::from(saved_height), storage_revision_height); + // store the root of storage at this height for later verification + let saved_storage_root = snapshot.root_hash().await?; + println!( + "storage height is {} and storage root is {}", + storage_revision_height, + hex::encode(saved_storage_root.0) + ); + + // execute a few blocks + node.block().execute().await?; + node.block().execute().await?; + node.block().execute().await?; + + let proof_block: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = + tendermint_proxy_service_client + .get_block_by_height(GetBlockByHeightRequest { + // Use the height from earlier + height: saved_height.into(), + }) + .await? + .into_inner(); + + // We fetched the block associated with the height from earlier + // and can validate that its app hash in the block header + // matches the value we got directly from storage earlier: + assert_eq!( + proof_block.block.clone().unwrap().header.unwrap().app_hash, + saved_storage_root.0, + "block app hash {} should match storage root {}", + hex::encode(proof_block.block.unwrap().header.unwrap().app_hash), + hex::encode(saved_storage_root.0) + ); + + Ok(()) +} + /// Validates the cometbft mock behavior against real cometbft /// using the same genesis data. #[tokio::test] @@ -144,14 +266,12 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { let proxy = penumbra_mock_tendermint_proxy::TestNodeProxy::new::(); let mut node = { - let app_state = AppState::Content( - genesis::Content::default().with_chain_id(TestNode::<()>::CHAIN_ID.to_string()), - ); + let genesis = get_verified_genesis()?; let consensus = Consensus::new(storage.clone()); // let consensus = Consensus::new(storage.as_ref().clone()); TestNode::builder() .single_validator() - .with_penumbra_auto_app_state(app_state)? + .with_tendermint_genesis(genesis) .on_block(proxy.on_block_callback()) .init_chain(consensus) .await @@ -196,6 +316,8 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { .into(), consensus_state: ibc_types::lightclients::tendermint::consensus_state::ConsensusState { timestamp: start_time, + // These values don't matter since we are only checking the proof + // of the client state. root: MerkleRoot { hash: vec![0u8; 32], }, @@ -239,16 +361,16 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { // The unproven version should be present assert!(!unproven.is_empty()); - let (conn, cnid_proof) = snapshot + let (cnid_client_state, cnid_proof) = snapshot .get_with_proof(key.clone()) .await .map_err(|e| tonic::Status::aborted(format!("couldn't get connection: {e}")))?; // The proven version should also be present - let conn = conn.unwrap(); + let cnid_client_state = cnid_client_state.unwrap(); // The proven version should be the same as the unproven. - assert_eq!(conn, unproven); + assert_eq!(cnid_client_state, unproven); // Common proof parameters: let proof_specs = IBC_PROOF_SPECS.to_vec(); @@ -263,21 +385,12 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { // With the prefix applied: let merkle_path = prefix.apply(vec![csp.to_string()]); - // Verify the proof against the results from calling get_raw. - cnid_proof.verify_membership( - &proof_specs, - root.clone(), - merkle_path.clone(), - unproven.clone(), - 0, - )?; - // Verify the proof against the results from calling get_with_proof. cnid_proof.verify_membership( &proof_specs, root.clone(), merkle_path.clone(), - conn.clone(), + cnid_client_state.clone(), 0, )?; @@ -329,7 +442,7 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { // happened on-chain since the cnidarium proof was generated. assert_eq!(cnid_proof, proof); // Same for the values. - assert_eq!(value, conn); + assert_eq!(value, cnid_client_state); proof.verify_membership( &proof_specs, @@ -339,12 +452,21 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { 0, )?; + let snapshot = storage.latest_snapshot(); + let storage_revision_height = snapshot.version(); + + let latest_height = node.height().clone(); + assert_eq!(u64::from(latest_height), storage_revision_height); + // Try fetching the client state via the IBC API + // height 2 let node_height = node.height(); + // WRONG vvv these don't match what's in the block headers let node_last_app_hash = node.last_app_hash(); println!( "making IBC client state request at height {} and hash {}", node_height, + // e0c071d4b2198c7e5f9fdee7d6618bf36ea75fdecd56df315ba2ae87b9a50718 (height 3 header app_hash) hex::encode(node_last_app_hash) ); let ibc_client_state_response = ibc_client_query_client @@ -357,22 +479,22 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { let ibc_proof = MerkleProof::decode(ibc_client_state_response.clone().proof.as_slice())?; let ibc_value = ibc_client_state_response.client_state.unwrap(); - let cs = ibc_types::lightclients::tendermint::client_state::ClientState::try_from( - ibc_value.clone(), - )?; - println!("client state: {:?}", cs); - // let cs2 = ibc_types::lightclients::tendermint::client_state::ClientState::try_from(Any { - // type_url: TENDERMINT_CLIENT_STATE_TYPE_URL.to_string(), - // value: value.clone().into(), - // })?; - let client_state = ibc_proto::google::protobuf::Any::decode(value.as_ref())?; - let cs2 = ibc_proto::ibc::lightclients::tendermint::v1::ClientState::decode( - &*client_state.value.clone(), - )?; - let cs3 = - ibc_types::lightclients::tendermint::client_state::ClientState::try_from(client_state)?; - println!("client state2: {:?}", cs2); - println!("client state3: {:?}", cs3); + // let cs = ibc_types::lightclients::tendermint::client_state::ClientState::try_from( + // ibc_value.clone(), + // )?; + // println!("client state: {:?}", cs); + // // let cs2 = ibc_types::lightclients::tendermint::client_state::ClientState::try_from(Any { + // // type_url: TENDERMINT_CLIENT_STATE_TYPE_URL.to_string(), + // // value: value.clone().into(), + // // })?; + // let client_state = ibc_proto::google::protobuf::Any::decode(value.as_ref())?; + // let cs2 = ibc_proto::ibc::lightclients::tendermint::v1::ClientState::decode( + // &*client_state.value.clone(), + // )?; + // let cs3 = + // ibc_types::lightclients::tendermint::client_state::ClientState::try_from(client_state)?; + // println!("client state2: {:?}", cs2); + // println!("client state3: {:?}", cs3); // let client_state = ibc_proto::google::protobuf::Any::decode(value.as_ref())?; // let cs1 = ibc_proto::ibc::lightclients::tendermint::v1::ClientState::decode(&*client.value)?; @@ -415,14 +537,14 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { u64::from(*node_height) ); // the proof block's app hash should match - assert_eq!( - node_last_app_hash, - proof_block.block.clone().unwrap().header.unwrap().app_hash, - "node claimed app hash for height {} was {}, however block header contained {}", - node_height, - hex::encode(node_last_app_hash), - hex::encode(proof_block.block.clone().unwrap().header.unwrap().app_hash) - ); + // assert_eq!( + // node_last_app_hash, + // proof_block.block.clone().unwrap().header.unwrap().app_hash, + // "node claimed app hash for height {} was {}, however block header contained {}", + // node_height, + // hex::encode(node_last_app_hash), + // hex::encode(proof_block.block.clone().unwrap().header.unwrap().app_hash) + // ); println!( "proof height: {} proof_block_root: {:?}", ibc_client_state_response @@ -810,3 +932,35 @@ async fn real_cometbft_tests() -> Result<()> { Ok(()) } + +// // the builder api put the state of the node in a weird place where you couldn't +// // actually make API requests in between +// #[instrument(level = "info", skip_all, fields(height, time))] +// pub async fn begin(self) -> Result { +// // Calling `finish` finishes the previous block +// // and prepares the current block. +// let (test_node, block) = self.finish()?; + +// let Block { +// // The header for the current block +// header, +// data, +// evidence: _, +// // Votes for the previous block +// last_commit, +// .. +// } = block.clone().tap(|block| { +// tracing::span::Span::current() +// .record("height", block.header.height.value()) +// .record("time", block.header.time.unix_timestamp()); +// }); +// let last_commit_info = Self::last_commit_info(last_commit); + +// let height = header.height; +// trace!("sending block"); +// test_node.begin_block(header, last_commit_info).await?; +// for tx in data { +// let tx = tx.into(); +// test_node.deliver_tx(tx).await?; +// } +// } diff --git a/crates/core/component/ibc/src/component/rpc/client_query.rs b/crates/core/component/ibc/src/component/rpc/client_query.rs index d8b23d020f..de900c5dff 100644 --- a/crates/core/component/ibc/src/component/rpc/client_query.rs +++ b/crates/core/component/ibc/src/component/rpc/client_query.rs @@ -34,6 +34,11 @@ impl ClientQuery for IbcQuery { &self, request: tonic::Request, ) -> std::result::Result, Status> { + println!( + "client_state, snapshot version {} and snapshot root {}", + self.storage.latest_snapshot().version(), + hex::encode(self.storage.latest_snapshot().root_hash().await.unwrap()), + ); let snapshot = self.storage.latest_snapshot(); let client_id = ClientId::from_str(&request.get_ref().client_id) .map_err(|e| tonic::Status::invalid_argument(format!("invalid client id: {e}")))?; diff --git a/crates/test/mock-consensus/src/abci.rs b/crates/test/mock-consensus/src/abci.rs index 11d6ded4c8..dbea3c1a20 100644 --- a/crates/test/mock-consensus/src/abci.rs +++ b/crates/test/mock-consensus/src/abci.rs @@ -161,14 +161,6 @@ where } = &response; trace!(?data, ?retain_height, "received Commit response"); - // Set the last app hash to the new block's app hash. - assert!(response.data.to_vec().len() > 0); - println!( - "Committed, setting last_app_hash: {:?}", - hex::encode(response.data.to_vec()) - ); - // TODO: the node isn't really done preparing the current header when this happens - self.last_app_hash = response.data.to_vec(); Ok(response) } response => { diff --git a/crates/test/mock-consensus/src/block.rs b/crates/test/mock-consensus/src/block.rs index 75eff8bf76..34c15abf85 100644 --- a/crates/test/mock-consensus/src/block.rs +++ b/crates/test/mock-consensus/src/block.rs @@ -115,6 +115,8 @@ where /// validator signatures. #[instrument(level = "info", skip_all, fields(height, time))] pub async fn execute(self) -> Result<(), anyhow::Error> { + // Calling `finish` finishes the previous block + // and prepares the current block. let (test_node, block) = self.finish()?; let Block { @@ -132,6 +134,7 @@ where }); let last_commit_info = Self::last_commit_info(last_commit); + let height = header.height; trace!("sending block"); test_node.begin_block(header, last_commit_info).await?; for tx in data { @@ -142,7 +145,19 @@ where // the commit call will set test_node.last_app_hash, preparing // for the next block to begin execution - test_node.commit().await?; + let commit_response = test_node.commit().await?; + + // NOTE: after calling .commit(), the internal status of the pd node's storage is going to be updated + // to the next block + // therefore we need to update the height within our mock now now + + // Set the last app hash to the new block's app hash. + test_node.last_app_hash = commit_response.data.to_vec(); + test_node.height = height; + println!( + "Committed, setting last_app_hash: {:?}", + hex::encode(commit_response.data.to_vec()) + ); trace!("finished sending block"); // If an `on_block` callback was set, call it now. @@ -173,7 +188,6 @@ where // The first (non-genesis) block has height 1. let height = { let height = test_node.height.increment(); - test_node.height = height; tracing::Span::current().record("height", height.value()); height }; @@ -248,8 +262,8 @@ where .expect(""), ); - let pub_key = - tendermint::PublicKey::from_raw_ed25519(pk.as_bytes()).expect("pub key present"); + // let pub_key = + // tendermint::PublicKey::from_raw_ed25519(pk.as_bytes()).expect("pub key present"); // TODO: pull validator set out of state // let validator_set = tendermint::validator::Set::new( @@ -278,12 +292,6 @@ where let consensus_hash = test_node.consensus_params_hash.clone().try_into().unwrap(); // TODO: would be great to see if we could load a cometBFT node with // the same configs as here and produce the same values - println!( - "instantiate header for height {} with app hash: {} and last commit hash {:?}", - height, - hex::encode(test_node.last_app_hash()), - test_node.last_commit_hash - ); let header = Header { // Protocol version. Block version 11 matches cometbft when tests were written. version: Version { block: 11, app: 0 }, @@ -331,24 +339,6 @@ where proposer_address, }; println!("Header is {:?}", header); - // first verified commit hash: - // assert_eq!( - // header.last_commit_hash, - // Some(Hash::Sha256( - // hex::decode("E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855")? - // .try_into() - // .unwrap() - // )) - // ); - // the validators aren't handled correctly yet, unfortunately - // assert_eq!( - // header.validators_hash, - // Hash::Sha256( - // hex::decode("EF728006489DCF4D71D0D437F96BE7874E31CEE5BBD9B03BDE088E5D4C1C713D")? - // .try_into() - // .unwrap() - // ) - // ); // The next block will use the signatures of this block's header. let signatures: Vec = if !disable_signatures { @@ -358,7 +348,7 @@ where }; println!( - "i made and signed a block and its height is {} and its app hash (after prev block) is {} and its block id is {} and its commit is for height {}", + "beginning block: its height is {} and its app hash (after prev block) is {} and its block id is {} and its commit is for height {}", height.value(), hex::encode(header.app_hash.clone()), hex::encode(header.hash()), diff --git a/crates/test/mock-tendermint-proxy/src/proxy.rs b/crates/test/mock-tendermint-proxy/src/proxy.rs index 00074515c7..59292dc2e1 100644 --- a/crates/test/mock-tendermint-proxy/src/proxy.rs +++ b/crates/test/mock-tendermint-proxy/src/proxy.rs @@ -204,6 +204,7 @@ impl TendermintProxyService for TestNodeProxy { let GetBlockByHeightRequest { height } = req.into_inner(); let height = tendermint::block::Height::try_from(height).expect("height should be less than 2^63"); + println!("get block height: {:?}", height); let block = self.inner.blocks().get(&height).cloned(); let proto_block = block @@ -214,30 +215,8 @@ impl TendermintProxyService for TestNodeProxy { tracing::warn!(?height, error = ?e, "proxy: error fetching blocks"); Err(tonic::Status::internal("error fetching blocks")) })?; - let last_commit_block_id = proto_block - .as_ref() // is this off-by-one? should we be getting the id of the last commit? - .and_then(|b| b.last_commit.as_ref()) - .and_then(|c| c.block_id.as_ref()) - .cloned(); println!("returning a block from the test proxy"); - // println!( - // "last commit block id: {}", - // hex::encode(last_commit_block_id.clone().unwrap().hash) - // ); - // println!( - // "block.last_hash: {}", - // hex::encode( - // block - // .clone() - // .unwrap() - // .header - // .unwrap() - // .last_block_id - // .unwrap_or_default() - // .hash - // ) - // ); Ok(GetBlockByHeightResponse { block_id: block.map(|b| penumbra_proto::tendermint::types::BlockId { hash: b.header.hash().into(),