Skip to content

Commit

Permalink
contracts: Fix printing the Schedule (paritytech#3021)
Browse files Browse the repository at this point in the history
Printing the `Schedule` is a useful debugging tool and general sanity
check. It is much more easy to interpret than the raw weights.

The printing relied on using `println` and hence was only available from
the native runtime. This is no longer available. This is why in this PR
we switch to using `log` which works from Wasm.

I made sure that the `WeightDebug` is only derived when
`runtime-benchmarks` is set so that we don't increase the size of the
binary.

Some other changes were necessary to make this actually work inside the
runtime. For example, I needed to remove `format!` and usage of floats.

Please note that this removed the decimal from the number because
truncating the fraction without using floats would not be easy and would
require custom code. I think the precision here is sufficient.

This is how the output looks like now:
```
Schedule {
    limits: Limits {
        event_topics: 4,
        globals: 256,
        locals: 1024,
        parameters: 128,
        memory_pages: 16,
        table_size: 4096,
        br_table_size: 256,
        subject_len: 32,
        payload_len: 16384,
        runtime_memory: 134217728,
    },
    instruction_weights: InstructionWeights {
        base: 2565,
        _phantom: PhantomData<kitchensink_runtime::Runtime>,
    },
    host_fn_weights: HostFnWeights {
        caller: 322 ns, 6 bytes,
        is_contract: 28 µs, 2684 bytes,
        code_hash: 29 µs, 2688 bytes,
        own_code_hash: 400 ns, 6 bytes,
        caller_is_origin: 176 ns, 3 bytes,
        caller_is_root: 158 ns, 3 bytes,
        address: 315 ns, 6 bytes,
        gas_left: 355 ns, 6 bytes,
        balance: 1 µs, 6 bytes,
        value_transferred: 314 ns, 6 bytes,
        minimum_balance: 318 ns, 6 bytes,
        block_number: 313 ns, 6 bytes,
        now: 325 ns, 6 bytes,
        weight_to_fee: 1 µs, 14 bytes,
        input: 263 ns, 6 bytes,
        input_per_byte: 989 ps, 0 bytes,
        r#return: 0 ps, 45 bytes,
        return_per_byte: 320 ps, 0 bytes,
        terminate: 1 ms, 5266 bytes,
        random: 1 µs, 10 bytes,
        deposit_event: 1 µs, 10 bytes,
        deposit_event_per_topic: 127 µs, 2508 bytes,
        deposit_event_per_byte: 501 ps, 0 bytes,
        debug_message: 226 ns, 7 bytes,
        debug_message_per_byte: 1 ns, 0 bytes,
        set_storage: 131 µs, 293 bytes,
        set_storage_per_new_byte: 576 ps, 0 bytes,
        set_storage_per_old_byte: 184 ps, 1 bytes,
        set_code_hash: 297 µs, 3090 bytes,
        clear_storage: 131 µs, 289 bytes,
        clear_storage_per_byte: 92 ps, 1 bytes,
        contains_storage: 29 µs, 289 bytes,
        contains_storage_per_byte: 213 ps, 1 bytes,
        get_storage: 29 µs, 297 bytes,
        get_storage_per_byte: 980 ps, 1 bytes,
        take_storage: 131 µs, 297 bytes,
        take_storage_per_byte: 921 ps, 1 bytes,
        transfer: 156 µs, 2520 bytes,
        call: 484 µs, 2721 bytes,
        delegate_call: 406 µs, 2637 bytes,
        call_transfer_surcharge: 607 µs, 5227 bytes,
        call_per_cloned_byte: 970 ps, 0 bytes,
        instantiate: 1 ms, 2731 bytes,
        instantiate_transfer_surcharge: 131 µs, 2549 bytes,
        instantiate_per_input_byte: 1 ns, 0 bytes,
        instantiate_per_salt_byte: 1 ns, 0 bytes,
        hash_sha2_256: 377 ns, 8 bytes,
        hash_sha2_256_per_byte: 1 ns, 0 bytes,
        hash_keccak_256: 767 ns, 8 bytes,
        hash_keccak_256_per_byte: 3 ns, 0 bytes,
        hash_blake2_256: 443 ns, 8 bytes,
        hash_blake2_256_per_byte: 1 ns, 0 bytes,
        hash_blake2_128: 440 ns, 8 bytes,
        hash_blake2_128_per_byte: 1 ns, 0 bytes,
        ecdsa_recover: 45 µs, 77 bytes,
        ecdsa_to_eth_address: 11 µs, 42 bytes,
        sr25519_verify: 41 µs, 112 bytes,
        sr25519_verify_per_byte: 5 ns, 1 bytes,
        reentrance_count: 174 ns, 3 bytes,
        account_reentrance_count: 248 ns, 40 bytes,
        instantiation_nonce: 154 ns, 3 bytes,
        add_delegate_dependency: 131 µs, 2606 bytes,
        remove_delegate_dependency: 130 µs, 2568 bytes,
    },
}
###############################################
Lazy deletion weight per key: Weight(ref_time: 126109302, proof_size: 70)
Lazy deletion keys per block: 15859
```
  • Loading branch information
athei authored Jan 26, 2024
1 parent d72fb58 commit 5e5341d
Show file tree
Hide file tree
Showing 7 changed files with 80 additions and 128 deletions.
1 change: 0 additions & 1 deletion substrate/frame/contracts/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ std = [
"frame-system/std",
"log/std",
"pallet-balances?/std",
"pallet-contracts-proc-macro/full",
"pallet-insecure-randomness-collective-flip/std",
"pallet-proxy/std",
"pallet-timestamp/std",
Expand Down
13 changes: 13 additions & 0 deletions substrate/frame/contracts/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,19 @@ calls are reverted. Assuming correct error handling by contract A, A's other cal

One `ref_time` `Weight` is defined as one picosecond of execution time on the runtime's reference machine.

#### Schedule

The `Schedule` is where, among other things, the cost of every action a contract can do is defined. These costs are derived
from the benchmarks of this pallet. Instead of looking at the raw benchmark results it is advised to look at the `Schedule`
if one wants to manually inspect the performance characteristics. The `Schedule` can be printed like this:

```sh
RUST_LOG=runtime::contracts=info cargo run --features runtime-benchmarks --bin substrate-node -- benchmark pallet --extra -p pallet_contracts -e print_schedule
```

Please note that the `Schedule` will be printed multiple times. This is because we are (ab)using a benchmark to print
the struct.

### Revert Behaviour

Contract call failures are not cascading. When failures occur in a sub-call, they do not "bubble up", and the call will
Expand Down
1 change: 0 additions & 1 deletion substrate/frame/contracts/mock-network/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ std = [
"frame-support/std",
"frame-system/std",
"pallet-balances/std",
"pallet-contracts-proc-macro/full",
"pallet-contracts/std",
"pallet-insecure-randomness-collective-flip/std",
"pallet-proxy/std",
Expand Down
6 changes: 0 additions & 6 deletions substrate/frame/contracts/proc-macro/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,3 @@ proc-macro = true
proc-macro2 = "1.0.56"
quote = "1.0.28"
syn = { version = "2.0.48", features = ["full"] }

[dev-dependencies]

[features]
# If set the full output is generated. Do NOT set when generating for wasm runtime.
full = []
135 changes: 49 additions & 86 deletions substrate/frame/contracts/proc-macro/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,41 +20,20 @@
//! Most likely you should use the [`#[define_env]`][`macro@define_env`] attribute macro which hides
//! boilerplate of defining external environment for a wasm module.
#![no_std]

extern crate alloc;

use alloc::{
collections::BTreeMap,
format,
string::{String, ToString},
vec::Vec,
};
use core::cmp::Reverse;
use proc_macro::TokenStream;
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::{quote, quote_spanned, ToTokens};
use syn::{
parse_macro_input, punctuated::Punctuated, spanned::Spanned, token::Comma, Data, DeriveInput,
FnArg, Ident,
Fields, FnArg, Ident,
};

/// This derives `Debug` for a struct where each field must be of some numeric type.
/// It interprets each field as its represents some weight and formats it as times so that
/// it is readable by humans.
#[proc_macro_derive(WeightDebug)]
pub fn derive_weight_debug(input: TokenStream) -> TokenStream {
derive_debug(input, format_weight)
}

/// This is basically identical to the std libs Debug derive but without adding any
/// bounds to existing generics.
#[proc_macro_derive(ScheduleDebug)]
pub fn derive_schedule_debug(input: TokenStream) -> TokenStream {
derive_debug(input, format_default)
}

fn derive_debug(input: TokenStream, fmt: impl Fn(&Ident) -> TokenStream2) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = &input.ident;
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
Expand All @@ -68,45 +47,15 @@ fn derive_debug(input: TokenStream, fmt: impl Fn(&Ident) -> TokenStream2) -> Tok
.into()
};

#[cfg(feature = "full")]
let fields = iterate_fields(data, fmt);

#[cfg(not(feature = "full"))]
let fields = {
drop(fmt);
let _ = data;
TokenStream2::new()
};

let tokens = quote! {
impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause {
fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
use ::sp_runtime::{FixedPointNumber, FixedU128 as Fixed};
let mut formatter = formatter.debug_struct(stringify!(#name));
#fields
formatter.finish()
}
}
};

tokens.into()
}

/// This is only used then the `full` feature is activated.
#[cfg(feature = "full")]
fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream2) -> TokenStream2 {
use syn::Fields;

match &data.fields {
let fields = match &data.fields {
Fields::Named(fields) => {
let recurse = fields.named.iter().filter_map(|f| {
let name = f.ident.as_ref()?;
if name.to_string().starts_with('_') {
return None
}
let value = fmt(name);
let ret = quote_spanned! { f.span() =>
formatter.field(stringify!(#name), #value);
formatter.field(stringify!(#name), &HumanWeight(self.#name));
};
Some(ret)
});
Expand All @@ -119,39 +68,53 @@ fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream2)
compile_error!("Unnamed fields are not supported")
},
Fields::Unit => quote!(),
}
}
};

fn format_weight(field: &Ident) -> TokenStream2 {
quote_spanned! { field.span() =>
&if self.#field.ref_time() > 1_000_000_000 {
format!(
"{:.1?} ms, {} bytes",
Fixed::saturating_from_rational(self.#field.ref_time(), 1_000_000_000).to_float(),
self.#field.proof_size()
)
} else if self.#field.ref_time() > 1_000_000 {
format!(
"{:.1?} µs, {} bytes",
Fixed::saturating_from_rational(self.#field.ref_time(), 1_000_000).to_float(),
self.#field.proof_size()
)
} else if self.#field.ref_time() > 1_000 {
format!(
"{:.1?} ns, {} bytes",
Fixed::saturating_from_rational(self.#field.ref_time(), 1_000).to_float(),
self.#field.proof_size()
)
} else {
format!("{} ps, {} bytes", self.#field.ref_time(), self.#field.proof_size())
let tokens = quote! {
impl #impl_generics ::core::fmt::Debug for #name #ty_generics #where_clause {
fn fmt(&self, formatter: &mut ::core::fmt::Formatter<'_>) -> core::fmt::Result {
use ::sp_runtime::{FixedPointNumber, FixedU128 as Fixed};
use ::core::{fmt, write};

struct HumanWeight(Weight);

impl fmt::Debug for HumanWeight {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0.ref_time() > 1_000_000_000 {
write!(
formatter,
"{} ms, {} bytes",
Fixed::saturating_from_rational(self.0.ref_time(), 1_000_000_000).into_inner() / Fixed::accuracy(),
self.0.proof_size()
)
} else if self.0.ref_time() > 1_000_000 {
write!(
formatter,
"{} µs, {} bytes",
Fixed::saturating_from_rational(self.0.ref_time(), 1_000_000).into_inner() / Fixed::accuracy(),
self.0.proof_size()
)
} else if self.0.ref_time() > 1_000 {
write!(
formatter,
"{} ns, {} bytes",
Fixed::saturating_from_rational(self.0.ref_time(), 1_000).into_inner() / Fixed::accuracy(),
self.0.proof_size()
)
} else {
write!(formatter, "{} ps, {} bytes", self.0.ref_time(), self.0.proof_size())
}
}
}

let mut formatter = formatter.debug_struct(stringify!(#name));
#fields
formatter.finish()
}
}
}
}
};

fn format_default(field: &Ident) -> TokenStream2 {
quote_spanned! { field.span() =>
&self.#field
}
tokens.into()
}

/// Parsed environment definition.
Expand Down Expand Up @@ -480,7 +443,7 @@ fn expand_func_doc(func: &HostFn) -> TokenStream2 {
fn expand_docs(def: &EnvDef) -> TokenStream2 {
// Create the `Current` trait with only the newest versions
// we sort so that only the newest versions make it into `docs`
let mut current_docs = BTreeMap::new();
let mut current_docs = std::collections::HashMap::new();
let mut funcs: Vec<_> = def.host_funcs.iter().filter(|f| f.alias_to.is_none()).collect();
funcs.sort_unstable_by_key(|func| Reverse(func.version));
for func in funcs {
Expand All @@ -493,7 +456,7 @@ fn expand_docs(def: &EnvDef) -> TokenStream2 {

// Create the `legacy` module with all functions
// Maps from version to list of functions that have this version
let mut legacy_doc = BTreeMap::<u8, Vec<TokenStream2>>::new();
let mut legacy_doc = std::collections::BTreeMap::<u8, Vec<TokenStream2>>::new();
for func in def.host_funcs.iter() {
legacy_doc.entry(func.version).or_default().push(expand_func_doc(&func));
}
Expand Down
26 changes: 10 additions & 16 deletions substrate/frame/contracts/src/benchmarking/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2628,25 +2628,19 @@ benchmarks! {
}
// This is no benchmark. It merely exist to have an easy way to pretty print the currently
// configured `Schedule` during benchmark development.
// It can be outputted using the following command:
// cargo run --manifest-path=bin/node/cli/Cargo.toml \
// --features runtime-benchmarks -- benchmark pallet --extra --dev --execution=native \
// -p pallet_contracts -e print_schedule --no-median-slopes --no-min-squares
// configured `Schedule` during benchmark development. Check the README on how to print this.
#[extra]
#[pov_mode = Ignored]
print_schedule {
#[cfg(feature = "std")]
{
let max_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
let (weight_per_key, key_budget) = ContractInfo::<T>::deletion_budget(max_weight);
println!("{:#?}", Schedule::<T>::default());
println!("###############################################");
println!("Lazy deletion weight per key: {weight_per_key}");
println!("Lazy deletion throughput per block: {key_budget}");
}
#[cfg(not(feature = "std"))]
Err("Run this bench with a native runtime in order to see the schedule.")?;
let max_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
let (weight_per_key, key_budget) = ContractInfo::<T>::deletion_budget(max_weight);
let schedule = T::Schedule::get();
log::info!(target: LOG_TARGET, "
{schedule:#?}
###############################################
Lazy deletion weight per key: {weight_per_key}
Lazy deletion keys per block: {key_budget}
");
}: {}
// Execute one erc20 transfer using the ink! erc20 example contract.
Expand Down
26 changes: 8 additions & 18 deletions substrate/frame/contracts/src/schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,9 @@ use crate::{weights::WeightInfo, Config};

use codec::{Decode, Encode};
use frame_support::{weights::Weight, DefaultNoBound};
use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug};
use scale_info::TypeInfo;
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
use sp_runtime::RuntimeDebug;
use sp_std::marker::PhantomData;

/// Definition of the cost schedule and other parameterizations for the wasm vm.
Expand Down Expand Up @@ -57,7 +55,8 @@ use sp_std::marker::PhantomData;
/// ```
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "std", serde(bound(serialize = "", deserialize = "")))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug, DefaultNoBound, TypeInfo)]
#[cfg_attr(feature = "runtime-benchmarks", derive(frame_support::DebugNoBound))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, DefaultNoBound, TypeInfo)]
#[scale_info(skip_type_params(T))]
pub struct Schedule<T: Config> {
/// Describes the upper limits on various metrics.
Expand All @@ -72,7 +71,8 @@ pub struct Schedule<T: Config> {

/// Describes the upper limits on various metrics.
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)]
#[cfg_attr(feature = "runtime-benchmarks", derive(Debug))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, TypeInfo)]
pub struct Limits {
/// The maximum number of topics supported by an event.
pub event_topics: u32,
Expand Down Expand Up @@ -130,7 +130,8 @@ impl Limits {
/// Gas metering of Wasm executed instructions is being done on the engine side.
/// This struct holds a reference value used to gas units scaling between host and engine.
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug, TypeInfo)]
#[cfg_attr(feature = "runtime-benchmarks", derive(frame_support::DebugNoBound))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, TypeInfo)]
#[scale_info(skip_type_params(T))]
pub struct InstructionWeights<T: Config> {
/// Base instruction `ref_time` Weight.
Expand All @@ -143,7 +144,8 @@ pub struct InstructionWeights<T: Config> {

/// Describes the weight for each imported function that a contract is allowed to call.
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug, TypeInfo)]
#[cfg_attr(feature = "runtime-benchmarks", derive(pallet_contracts_proc_macro::WeightDebug))]
#[derive(Clone, Encode, Decode, PartialEq, Eq, TypeInfo)]
#[scale_info(skip_type_params(T))]
pub struct HostFnWeights<T: Config> {
/// Weight of calling `seal_caller`.
Expand Down Expand Up @@ -478,15 +480,3 @@ impl<T: Config> Default for HostFnWeights<T> {
}
}
}

#[cfg(test)]
mod test {
use super::*;
use crate::tests::Test;

#[test]
fn print_test_schedule() {
let schedule = Schedule::<Test>::default();
println!("{:#?}", schedule);
}
}

0 comments on commit 5e5341d

Please sign in to comment.