Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ apache-avro = { version = "0.19.0", default-features = false }
rand = "0.9.0"
parking_lot = "0.12.1"
lazy_static = { version = "1.5", features = ["spin_no_std"] }
multibase = { version = "0.9", default-features = false }
cid = { version = "0.11", default-features = false, features = ["alloc"] }

# substrate wasm
parity-scale-codec = { version = "3.7.4", default-features = false }
Expand Down Expand Up @@ -65,6 +67,7 @@ pallet-authorship = { git = "https://github.com/paritytech/polkadot-sdk", tag =
pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
pallet-democracy = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
pallet-migrations = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-stable2503-7", default-features = false }
Expand Down
19 changes: 13 additions & 6 deletions Makefile
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Made the following enhancements for try-runtime-* targets:

  • Only build the runtime, not the entire node; speeds up the build
  • Consolidate targets using patterns to avoid duplicated targets/commands
  • Fetch state for a minimal set of system pallets so that multi-block migrations can be properly tested
  • Allow the use of the PALLETS env var to specify additional pallets for dumping/restoring state

Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,7 @@ LOCAL_URI=ws://localhost:9944
WASM_PATH=./target/release/wbuild/frequency-runtime/frequency_runtime.wasm
# Without the state from this minimal set of pallets, try-runtime panics when trying to validate multi-block migrations
MINIMAL_PALLETS=ParachainSystem ParachainInfo System Timestamp Aura Authorship
TRY_RUNTIME_BUILD_TYPE=release

.PHONY: check-onfinality-api-key
check-onfinality-api-key:
Expand All @@ -440,25 +441,30 @@ try-runtime-%-mainnet: URI := $(MAINNET_URI)
try-runtime-%-mainnet: CHAIN := mainnet
try-runtime-%-local: URI := $(LOCAL_URI)
try-runtime-%-local: CHAIN := local
try-runtime-%-local: WASM_PATH=./target/debug/wbuild/frequency-runtime/frequency_runtime.wasm


build-runtime-paseo-testnet: FEATURES := frequency-testnet
build-runtime-bridging-testnet: FEATURES := frequency-testnet,frequency-bridging
build-runtime-mainnet: FEATURES := frequency
build-runtime-westend-testnet: FEATURES := frequency-westend,frequency-bridging
build-runtime-local: FEATURES := frequency-no-relay,frequency-bridging
build-runtime-local: FEATURES := frequency-no-relay
build-runtime-local: TRY_RUNTIME_BUILD_TYPE := dev

.PHONY: build-runtime-paseo-testnet build-runtime-westend-testnet build-runtime-mainnet build-runtime-local
build-runtime-local \
build-runtime-paseo-testnet \
build-runtime-westend-testnet \
build-runtime-mainnet:
cargo build --package frequency-runtime --release --features $(FEATURES),try-runtime --locked
cargo build --package frequency-runtime --profile ${TRY_RUNTIME_BUILD_TYPE} --features $(FEATURES),try-runtime --locked

#
# The 'try-runtime' targets can optionally be constrained to fetch state for only specific pallets. This is useful to
# avoid unnecessarily fetching large state trees for pallets not under test. The list of pallets is:
# Msa Messages StatefulStorage Capacity FrequencyTxPayment Handles Passkey Schemas

.PHONY: try-runtime-create-snapshot-paseo-testnet try-runtime-create-snapshot-westend-testnet try-runtime-create-snapshot-mainnet
.PHONY: try-runtime-create-snapshot-paseo-testnet try-runtime-create-snapshot-westend-testnet try-runtime-create-snapshot-mainnet try-runtime-create-snapshot-local
try-runtime-create-snapshot-local \
try-runtime-create-snapshot-paseo-testnet \
try-runtime-create-snapshot-westend-testnet \
try-runtime-create-snapshot-mainnet: check-try-runtime-installed check-onfinality-api-key
Expand All @@ -470,7 +476,8 @@ try-runtime-upgrade-paseo-testnet \
try-runtime-upgrade-mainnet: try-runtime-upgrade-%: check-try-runtime-installed build-runtime-%
try-runtime --runtime $(WASM_PATH) on-runtime-upgrade --blocktime=6000 live --uri $(URI)

.PHONY: try-runtime-use-snapshot-paseo-testnet try-runtime-use-snapshot-mainnet
.PHONY: try-runtime-use-snapshot-paseo-testnet try-runtime-use-snapshot-mainnet try-runtime-use-snapshot-local
try-runtime-use-snapshot-local \
try-runtime-use-snapshot-paseo-testnet \
try-runtime-use-snapshot-mainnet: try-runtime-use-snapshot-%: check-try-runtime-installed build-runtime-%
try-runtime --runtime $(WASM_PATH) on-runtime-upgrade --blocktime=6000 snap --path $(CHAIN)-$(SNAPSHOT_PALLETS).state
Expand All @@ -483,11 +490,11 @@ try-runtime-check-migrations-westend-testnet: try-runtime-check-migrations-%: ch

.PHONY: try-runtime-check-migrations-local
try-runtime-check-migrations-local: check-try-runtime-installed build-runtime-local
try-runtime --runtime $(WASM_PATH) on-runtime-upgrade --blocktime=6000 --checks="pre-and-post" --disable-spec-version-check --disable-mbm-checks --no-weight-warnings live --uri $(URI) $(PALLET_FLAGS)
try-runtime --runtime $(WASM_PATH) on-runtime-upgrade --blocktime=6000 --checks="pre-and-post" --disable-spec-version-check live --uri $(URI) $(PALLET_FLAGS)

.PHONY: try-runtime-check-migrations-none-local
try-runtime-check-migrations-none-local: check-try-runtime-installed build-runtime-local
try-runtime --runtime $(WASM_PATH) on-runtime-upgrade --blocktime=6000 --checks="none" --disable-spec-version-check --disable-mbm-checks --no-weight-warnings live --uri $(URI) $(PALLET_FLAGS)
try-runtime --runtime $(WASM_PATH) on-runtime-upgrade --blocktime=6000 --checks="none" --disable-spec-version-check live --uri $(URI) $(PALLET_FLAGS)

# Pull the Polkadot version from the polkadot-cli package in the Cargo.lock file.
# This will break if the lock file format changes
Expand Down
1 change: 1 addition & 0 deletions common/primitives/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ sp-runtime-interface = { workspace = true }
libsecp256k1 = { workspace = true, features = ["hmac"] }
log = "0.4.22"
lazy_static = { workspace = true }
cid = { workspace = true }

[features]
default = ['std']
Expand Down
158 changes: 158 additions & 0 deletions common/primitives/src/cid.rs
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This file is not really net-new; I just haven't merged from main in a while. I've added some additional consts, and moved the CID validation function and tests from the messages pallet.

Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
#[cfg(test)]
use cid::multibase::Base;
use cid::{multibase, Cid};
#[cfg(test)]
use frame_support::assert_ok;
use frame_support::ensure;
use sp_io::hashing::sha2_256;
use sp_runtime::Vec;

/// Multihash type for wrapping digests (support up to 64-byte digests)
type Multihash = cid::multihash::Multihash<64>;

/// SHA2-256 multihash code
const SHA2_256: u64 = 0x12;
/// BLAKE3 multihash code
const BLAKE3: u64 = 0x1e;

/// List of hash algorithms supported by DSNP
const DSNP_HASH_ALGORITHMS: &[u64] = &[SHA2_256, BLAKE3];

/// Raw codec for CIDv1 (0x55)
const RAW: u64 = 0x55;

/// Error enum for CID validation
#[derive(Debug, PartialEq)]
pub enum CidError {
/// Unsupported CID version
UnsupportedCidVersion,
/// Unsupported CID hash algorithm
UnsupportedCidMultihash,
/// Multibase decoding error
MultibaseDecodeError,
/// UTF-8 decoding error
Utf8DecodeError,
/// CID parsing error
InvalidCid,
}

/// Computes a CIDv1 (RAW + SHA2-256 multihash)
pub fn compute_cid_v1(bytes: &[u8]) -> Option<Vec<u8>> {
let digest = sha2_256(bytes);
let mh = Multihash::wrap(SHA2_256, &digest).ok()?;
let cid = Cid::new_v1(RAW, mh);
Some(cid.to_bytes())
}

/// Validates a CID to conform to IPFS CIDv1 (or higher) formatting and allowed multihashes (does not validate decoded CID fields)
pub fn validate_cid(in_cid: &[u8]) -> Result<Vec<u8>, CidError> {
// Decode SCALE encoded CID into string slice
let cid_str: &str = core::str::from_utf8(in_cid).map_err(|_| CidError::Utf8DecodeError)?;
ensure!(cid_str.len() > 2, CidError::InvalidCid);
// starts_with handles Unicode multibyte characters safely
ensure!(!cid_str.starts_with("Qm"), CidError::UnsupportedCidVersion);

// Assume it's a multibase-encoded string. Decode it to a byte array so we can parse the CID.
let cid_b = multibase::decode(cid_str).map_err(|_| CidError::MultibaseDecodeError)?.1;
let cid = Cid::read_bytes(&cid_b[..]).map_err(|_| CidError::InvalidCid)?;
ensure!(DSNP_HASH_ALGORITHMS.contains(&cid.hash().code()), CidError::UnsupportedCidMultihash);

Ok(cid_b)
}

#[cfg(test)]
const DUMMY_CID_SHA512: &str = "bafkrgqb76pscorjihsk77zpyst3p364zlti6aojlu4nga34vhp7t5orzwbwwytvp7ej44r5yhjzneanqwb5arcnvuvfwo2d4qgzyx5hymvto4";
#[cfg(test)]
const DUMMY_CID_SHA256: &str = "bagaaierasords4njcts6vs7qvdjfcvgnume4hqohf65zsfguprqphs3icwea";
#[cfg(test)]
const DUMMY_CID_BLAKE3: &str = "bafkr4ihn4xalcdzoyslzy2nvf5q6il7vwqjvdhhatpqpctijrxh6l5xzru";

#[test]
fn validate_cid_invalid_utf8_errors() {
let bad_cid = vec![0xfc, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1];
assert_eq!(
validate_cid(&bad_cid).expect_err("Expected Utf8DecodeError"),
CidError::Utf8DecodeError
);
}

#[test]
fn validate_cid_too_short_errors() {
let bad_cid = "a".as_bytes().to_vec();
assert_eq!(validate_cid(&bad_cid).expect_err("Expected InvalidCid"), CidError::InvalidCid);
}

#[test]
fn validate_cid_v0_errors() {
let bad_cid = "Qmxxx".as_bytes().to_vec();
assert_eq!(
validate_cid(&bad_cid).expect_err("Expected UnsupportedCidVersion"),
CidError::UnsupportedCidVersion
);
}

#[test]
fn validate_cid_invalid_multibase_errors() {
let bad_cid = "aaaa".as_bytes().to_vec();
assert_eq!(
validate_cid(&bad_cid).expect_err("Expected MultibaseDecodeError"),
CidError::MultibaseDecodeError
);
}

#[test]
fn validate_cid_invalid_cid_errors() {
let bad_cid = multibase::encode(Base::Base32Lower, "foo").as_bytes().to_vec();
assert_eq!(validate_cid(&bad_cid).expect_err("Expected InvalidCid"), CidError::InvalidCid);
}

#[test]
fn validate_cid_valid_cid_sha2_256_succeeds() {
let cid = DUMMY_CID_SHA256.as_bytes().to_vec();
assert_ok!(validate_cid(&cid));
}

#[test]
fn validate_cid_valid_cid_blake3_succeeds() {
let cid = DUMMY_CID_BLAKE3.as_bytes().to_vec();
assert_ok!(validate_cid(&cid));
}

#[test]
fn validate_cid_invalid_hash_function_errors() {
let bad_cid = DUMMY_CID_SHA512.as_bytes().to_vec();
assert_eq!(
validate_cid(&bad_cid).expect_err("Expected UnsupportedCidMultihash"),
CidError::UnsupportedCidMultihash
);
}
#[test]
fn validate_cid_not_valid_multibase() {
// This should not panic, but should return an error.
let bad_cid = vec![55, 197, 136, 0, 0, 0, 0, 0, 0, 0, 0];
assert_eq!(
validate_cid(&bad_cid).expect_err("Expected MultibaseDecodeError"),
CidError::MultibaseDecodeError
);
}

#[test]
fn validate_cid_not_correct_format_errors() {
// This should not panic, but should return an error.
let bad_cid = vec![0, 1, 0, 1, 203, 155, 0, 0, 0, 5, 67];
assert_eq!(validate_cid(&bad_cid).expect_err("Expected InvalidCid"), CidError::InvalidCid);

// This should not panic, but should return an error.
let another_bad_cid = vec![241, 0, 0, 0, 0, 0, 128, 132, 132, 132, 58];
assert_eq!(
validate_cid(&another_bad_cid).expect_err("Expected Utf8DecodeError"),
CidError::Utf8DecodeError
);
}

#[test]
fn validate_cid_unwrap_errors() {
// This should not panic, but should return an error.
let bad_cid = vec![102, 70, 70, 70, 70, 70, 70, 70, 70, 48, 48, 48, 54, 53, 53, 48, 48];
assert_eq!(validate_cid(&bad_cid).expect_err("Expected InvalidCid"), CidError::InvalidCid);
}
2 changes: 2 additions & 0 deletions common/primitives/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,7 @@ pub mod offchain;
/// Benchmarking helper trait
pub mod benchmarks;

/// CID support
pub mod cid;
/// Signature support for ethereum
pub mod signatures;
60 changes: 56 additions & 4 deletions common/primitives/src/messages.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#[cfg(feature = "std")]
use crate::utils;
use crate::{msa::MessageSourceId, node::BlockNumber};
use crate::{msa::MessageSourceId, node::BlockNumber, schema::SchemaId};
use parity_scale_codec::{Decode, Encode};
use scale_info::TypeInfo;
#[cfg(feature = "std")]
Expand All @@ -11,7 +11,7 @@ use alloc::{vec, vec::Vec};
#[cfg(feature = "std")]
use utils::*;

/// A type for responding with an single Message in an RPC-call dependent on schema model
/// A type for responding with a single Message in an RPC-call dependent on schema model
/// IPFS, Parquet: { index, block_number, provider_msa_id, cid, payload_length }
/// Avro, OnChain: { index, block_number, provider_msa_id, msa_id, payload }
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
Expand All @@ -20,7 +20,7 @@ pub struct MessageResponse {
/// Message source account id of the Provider. This may be the same id as contained in `msa_id`,
/// indicating that the original source MSA is acting as its own provider. An id differing from that
/// of `msa_id` indicates that `provider_msa_id` was delegated by `msa_id` to send this message on
/// its behalf .
/// its behalf.
pub provider_msa_id: MessageSourceId,
/// Index in block to get total order.
pub index: u16,
Expand All @@ -45,6 +45,58 @@ pub struct MessageResponse {
#[cfg_attr(feature = "std", serde(skip_serializing_if = "Option::is_none", default))]
pub payload_length: Option<u32>,
}

/// A type for responding with a single Message in an RPC-call dependent on schema model
/// IPFS, Parquet: { index, block_number, provider_msa_id, cid, payload_length }
/// Avro, OnChain: { index, block_number, provider_msa_id, msa_id, payload }
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq)]
pub struct MessageResponseV2 {
/// Message source account id of the Provider. This may be the same id as contained in `msa_id`,
/// indicating that the original source MSA is acting as its own provider. An id differing from that
/// of `msa_id` indicates that `provider_msa_id` was delegated by `msa_id` to send this message on
/// its behalf.
pub provider_msa_id: MessageSourceId,
/// Index in block to get total order.
pub index: u16,
/// Block-number for which the message was stored.
pub block_number: BlockNumber,
/// Message source account id (the original source).
#[cfg_attr(feature = "std", serde(skip_serializing_if = "Option::is_none", default))]
pub msa_id: Option<MessageSourceId>,
/// Serialized data in a the schemas.
#[cfg_attr(
feature = "std",
serde(with = "as_hex_option", skip_serializing_if = "Option::is_none", default)
)]
pub payload: Option<Vec<u8>>,
/// The content address for an IPFS payload in Base32. Will always be CIDv1.
#[cfg_attr(
feature = "std",
serde(with = "as_string_option", skip_serializing_if = "Option::is_none", default)
)]
pub cid: Option<Vec<u8>>,
/// Offchain payload length (IPFS).
#[cfg_attr(feature = "std", serde(skip_serializing_if = "Option::is_none", default))]
pub payload_length: Option<u32>,
/// The SchemaId of the schema that defines the payload format
pub schema_id: SchemaId,
}

impl Into<MessageResponse> for MessageResponseV2 {
fn into(self) -> MessageResponse {
MessageResponse {
provider_msa_id: self.provider_msa_id,
index: self.index,
block_number: self.block_number,
msa_id: self.msa_id,
payload: self.payload,
cid: self.cid,
payload_length: self.payload_length,
}
}
}

/// A type for requesting paginated messages.
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq)]
Expand Down Expand Up @@ -93,7 +145,7 @@ pub struct BlockPaginationResponse<T> {
}

impl<T> BlockPaginationResponse<T> {
/// Generates a new empty Pagination request
/// Generates a new empty Pagination response
pub const fn new() -> BlockPaginationResponse<T> {
BlockPaginationResponse {
content: vec![],
Expand Down
2 changes: 1 addition & 1 deletion designdocs/schemas_protocols_intents.md
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ it's difficult or impossible to bifurcate the storage in the same way as the `me
requiring a complete storage migration, new pages/items that are written can include a _storage version magic number_ in
either the page or the item header. For `Paginated` storage, this value would precede the `PageNonce`; for `Itemized`
storage the value would precede `payload_len`. The 'magic number' would be designed to be the same byte length as the
value currently a byte offset zero within the page/item, and to be a value such that conflict with a valid `nonce` or
value currently at byte offset zero within the page/item, and to be a value such that conflict with a valid `nonce` or
`payload_len` would be highly unlikely, if not impossible.

New structures would be defined, ie `PageV2` and `ItemizedItemV2`, and decoding values read from storage would need to
Expand Down
Loading
Loading