Skip to content

Commit

Permalink
Merge branch 'base-token-gas-oracle' into gas_oracle_move_gas_to_u256
Browse files Browse the repository at this point in the history
  • Loading branch information
juan518munoz committed May 21, 2024
2 parents b4849b8 + c0af449 commit 9d1eb9b
Show file tree
Hide file tree
Showing 12 changed files with 349 additions and 58 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/build-contract-verifier-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,8 @@ jobs:
- name: Show sccache stats
if: always()
run: |
ci_run sccache --show-stats
ci_run cat /tmp/sccache_log.txt
ci_run sccache --show-stats || true
ci_run cat /tmp/sccache_log.txt || true
create_manifest:
name: Create release manifest
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/build-core-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ jobs:
- name: Show sccache stats
if: always()
run: |
ci_run sccache --show-stats
ci_run cat /tmp/sccache_log.txt
ci_run sccache --show-stats || true
ci_run cat /tmp/sccache_log.txt || true
create_manifest:
name: Create release manifest
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/build-prover-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,8 @@ jobs:
- name: Show sccache stats
if: always()
run: |
ci_run sccache --show-stats
ci_run cat /tmp/sccache_log.txt
ci_run sccache --show-stats || true
ci_run cat /tmp/sccache_log.txt || true
copy-images:
name: Copy images between docker registries
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/ci-core-reusable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,8 @@ jobs:
- name: Show sccache logs
if: always()
run: |
ci_run sccache --show-stats
ci_run cat /tmp/sccache_log.txt
ci_run sccache --show-stats || true
ci_run cat /tmp/sccache_log.txt || true
integration:
name: Integration (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }})
Expand Down Expand Up @@ -268,8 +268,8 @@ jobs:
- name: Show sccache logs
if: always()
run: |
ci_run sccache --show-stats
ci_run cat /tmp/sccache_log.txt
ci_run sccache --show-stats || true
ci_run cat /tmp/sccache_log.txt || true
external-node:
name: External node (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }})
Expand Down Expand Up @@ -389,5 +389,5 @@ jobs:
- name: Show sccache logs
if: always()
run: |
ci_run sccache --show-stats
ci_run cat /tmp/sccache_log.txt
ci_run sccache --show-stats || true
ci_run cat /tmp/sccache_log.txt || true

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

184 changes: 157 additions & 27 deletions core/lib/dal/src/blocks_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -407,64 +407,101 @@ impl BlocksDal<'_, '_> {
) -> DalResult<()> {
match aggregation_type {
AggregatedActionType::Commit => {
sqlx::query!(
let instrumentation = Instrumented::new("set_eth_tx_id#commit")
.with_arg("number_range", &number_range)
.with_arg("eth_tx_id", &eth_tx_id);

let query = sqlx::query!(
r#"
UPDATE l1_batches
SET
eth_commit_tx_id = $1,
updated_at = NOW()
WHERE
number BETWEEN $2 AND $3
AND eth_commit_tx_id IS NULL
"#,
eth_tx_id as i32,
i64::from(number_range.start().0),
i64::from(number_range.end().0)
)
.instrument("set_eth_tx_id#commit")
.with_arg("number_range", &number_range)
.with_arg("eth_tx_id", &eth_tx_id)
.execute(self.storage)
.await?;
);
let result = instrumentation
.clone()
.with(query)
.execute(self.storage)
.await?;

if result.rows_affected() == 0 {
let err = instrumentation.constraint_error(anyhow::anyhow!(
"Update eth_commit_tx_id that is is not null is not allowed"
));
return Err(err);
}
}
AggregatedActionType::PublishProofOnchain => {
sqlx::query!(
let instrumentation = Instrumented::new("set_eth_tx_id#prove")
.with_arg("number_range", &number_range)
.with_arg("eth_tx_id", &eth_tx_id);
let query = sqlx::query!(
r#"
UPDATE l1_batches
SET
eth_prove_tx_id = $1,
updated_at = NOW()
WHERE
number BETWEEN $2 AND $3
AND eth_prove_tx_id IS NULL
"#,
eth_tx_id as i32,
i64::from(number_range.start().0),
i64::from(number_range.end().0)
)
.instrument("set_eth_tx_id#prove")
.with_arg("number_range", &number_range)
.with_arg("eth_tx_id", &eth_tx_id)
.execute(self.storage)
.await?;
);

let result = instrumentation
.clone()
.with(query)
.execute(self.storage)
.await?;

if result.rows_affected() == 0 {
let err = instrumentation.constraint_error(anyhow::anyhow!(
"Update eth_prove_tx_id that is is not null is not allowed"
));
return Err(err);
}
}
AggregatedActionType::Execute => {
sqlx::query!(
let instrumentation = Instrumented::new("set_eth_tx_id#execute")
.with_arg("number_range", &number_range)
.with_arg("eth_tx_id", &eth_tx_id);

let query = sqlx::query!(
r#"
UPDATE l1_batches
SET
eth_execute_tx_id = $1,
updated_at = NOW()
WHERE
number BETWEEN $2 AND $3
AND eth_execute_tx_id IS NULL
"#,
eth_tx_id as i32,
i64::from(number_range.start().0),
i64::from(number_range.end().0)
)
.instrument("set_eth_tx_id#execute")
.with_arg("number_range", &number_range)
.with_arg("eth_tx_id", &eth_tx_id)
.execute(self.storage)
.await?;
);

let result = instrumentation
.clone()
.with(query)
.execute(self.storage)
.await?;

if result.rows_affected() == 0 {
let err = instrumentation.constraint_error(anyhow::anyhow!(
"Update eth_execute_tx_id that is is not null is not allowed"
));
return Err(err);
}
}
}
Ok(())
Expand Down Expand Up @@ -2234,15 +2271,14 @@ mod tests {
use super::*;
use crate::{ConnectionPool, Core, CoreDal};

#[tokio::test]
async fn loading_l1_batch_header() {
let pool = ConnectionPool::<Core>::test_pool().await;
let mut conn = pool.connection().await.unwrap();
conn.protocol_versions_dal()
.save_protocol_version_with_tx(&ProtocolVersion::default())
async fn save_mock_eth_tx(action_type: AggregatedActionType, conn: &mut Connection<'_, Core>) {
conn.eth_sender_dal()
.save_eth_tx(1, vec![], action_type, Address::default(), 1, None, None)
.await
.unwrap();
}

fn mock_l1_batch_header() -> L1BatchHeader {
let mut header = L1BatchHeader::new(
L1BatchNumber(1),
100,
Expand All @@ -2265,6 +2301,100 @@ mod tests {
header.l2_to_l1_messages.push(vec![22; 22]);
header.l2_to_l1_messages.push(vec![33; 33]);

header
}

#[tokio::test]
async fn set_tx_id_works_correctly() {
let pool = ConnectionPool::<Core>::test_pool().await;
let mut conn = pool.connection().await.unwrap();

conn.protocol_versions_dal()
.save_protocol_version_with_tx(&ProtocolVersion::default())
.await
.unwrap();

conn.blocks_dal()
.insert_mock_l1_batch(&mock_l1_batch_header())
.await
.unwrap();

save_mock_eth_tx(AggregatedActionType::Commit, &mut conn).await;
save_mock_eth_tx(AggregatedActionType::PublishProofOnchain, &mut conn).await;
save_mock_eth_tx(AggregatedActionType::Execute, &mut conn).await;

assert!(conn
.blocks_dal()
.set_eth_tx_id(
L1BatchNumber(1)..=L1BatchNumber(1),
1,
AggregatedActionType::Commit,
)
.await
.is_ok());

assert!(conn
.blocks_dal()
.set_eth_tx_id(
L1BatchNumber(1)..=L1BatchNumber(1),
2,
AggregatedActionType::Commit,
)
.await
.is_err());

assert!(conn
.blocks_dal()
.set_eth_tx_id(
L1BatchNumber(1)..=L1BatchNumber(1),
1,
AggregatedActionType::PublishProofOnchain,
)
.await
.is_ok());

assert!(conn
.blocks_dal()
.set_eth_tx_id(
L1BatchNumber(1)..=L1BatchNumber(1),
2,
AggregatedActionType::PublishProofOnchain,
)
.await
.is_err());

assert!(conn
.blocks_dal()
.set_eth_tx_id(
L1BatchNumber(1)..=L1BatchNumber(1),
1,
AggregatedActionType::Execute,
)
.await
.is_ok());

assert!(conn
.blocks_dal()
.set_eth_tx_id(
L1BatchNumber(1)..=L1BatchNumber(1),
2,
AggregatedActionType::Execute,
)
.await
.is_err());
}

#[tokio::test]
async fn loading_l1_batch_header() {
let pool = ConnectionPool::<Core>::test_pool().await;
let mut conn = pool.connection().await.unwrap();
conn.protocol_versions_dal()
.save_protocol_version_with_tx(&ProtocolVersion::default())
.await
.unwrap();

let header = mock_l1_batch_header();

conn.blocks_dal()
.insert_mock_l1_batch(&header)
.await
Expand Down
6 changes: 3 additions & 3 deletions core/lib/db_connection/src/instrument.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use crate::{
type ThreadSafeDebug<'a> = dyn fmt::Debug + Send + Sync + 'a;

/// Logged arguments for an SQL query.
#[derive(Debug, Default)]
#[derive(Debug, Clone, Default)]
struct QueryArgs<'a> {
inner: Vec<(&'static str, &'a ThreadSafeDebug<'a>)>,
}
Expand Down Expand Up @@ -180,7 +180,7 @@ impl ActiveCopy<'_> {
}
}

#[derive(Debug)]
#[derive(Debug, Clone)]
struct InstrumentedData<'a> {
name: &'static str,
location: &'static Location<'static>,
Expand Down Expand Up @@ -278,7 +278,7 @@ impl<'a> InstrumentedData<'a> {
/// included in the case of a slow query, plus the error info.
/// - Slow and erroneous queries are also reported using metrics (`dal.request.slow` and `dal.request.error`,
/// respectively). The query name is included as a metric label; args are not included for obvious reasons.
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct Instrumented<'a, Q> {
query: Q,
data: InstrumentedData<'a>,
Expand Down
Loading

0 comments on commit 9d1eb9b

Please sign in to comment.