diff --git a/Cargo.lock b/Cargo.lock index cbd618fa1..305421f6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7636,6 +7636,21 @@ dependencies = [ "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?tag=polkadot-v1.13.0)", ] +[[package]] +name = "pallet-storage-provider" +version = "0.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", +] + [[package]] name = "pallet-sudo" version = "28.0.0" @@ -8340,6 +8355,7 @@ dependencies = [ "pallet-market", "pallet-message-queue", "pallet-session", + "pallet-storage-provider", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", diff --git a/Cargo.toml b/Cargo.toml index 22bc0e144..2cb378633 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "cli/polka-storage-provider", "node", "pallets/market", + "pallets/storage-provider", "primitives/cli", "primitives/proofs", "runtime", @@ -91,8 +92,10 @@ url = "2.5.0" uuid = "1.8.0" # Local + cli-primitives = { path = "primitives/cli" } pallet-market = { path = "pallets/market", default-features = false } +pallet-storage-provider = { path = "pallets/storage-provider", default-features = false } polka-storage-runtime = { path = "runtime" } primitives-proofs = { path = "primitives/proofs", default-features = false } diff --git a/pallets/storage-provider/.gitkeep b/pallets/storage-provider/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/pallets/storage-provider/Cargo.toml b/pallets/storage-provider/Cargo.toml new file mode 100644 index 000000000..1aed701eb --- /dev/null +++ b/pallets/storage-provider/Cargo.toml @@ -0,0 +1,49 @@ +[package] +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license-file.workspace = true +name = "pallet-storage-provider" +repository.workspace = true +version = "0.0.0" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true, default-features = false, features = ["derive"] } +log.workspace = true +scale-info = { workspace = true, default-features = false, features = ["derive"] } + +# Frame deps +frame-benchmarking = { workspace = true, default-features = false, optional = true } +frame-support = { workspace = true, default-features = false } +frame-system = { workspace = true, default-features = false } + +[dev-dependencies] +sp-core = { workspace = true, default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true, default-features = false } + +[features] +default = ["std"] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", +] +try-runtime = ["frame-support/try-runtime", "frame-system/try-runtime", "sp-runtime/try-runtime"] diff --git a/pallets/storage-provider/DESIGN.md b/pallets/storage-provider/DESIGN.md new file mode 100644 index 000000000..227844e70 --- /dev/null +++ b/pallets/storage-provider/DESIGN.md @@ -0,0 +1,214 @@ +# Storage Provider Pallet + +- [Storage Provider Pallet](#storage-provider-pallet) + - [Overview](#overview) + - [Constants \& Terminology](#constants--terminology) + - [Usage](#usage) + - [Registering storage providers](#registering-storage-providers) + - [Modifying storage provider information](#modifying-storage-provider-information) + - [Declaring storage faults](#declaring-storage-faults) + - [Declaring storage faults recovered](#declaring-storage-faults-recovered) + - [Storage fault slashing](#storage-fault-slashing) + - [Fault Fee (FF)](#fault-fee-ff) + - [Sector Penalty (SP)](#sector-penalty-sp) + - [Termination Penalty (TP)](#termination-penalty-tp) + - [State management for Storage Providers](#state-management-for-storage-providers) + - [Static information about a Storage Provider](#static-information-about-a-storage-provider) + - [Sector sealing](#sector-sealing) + - [Data structures](#data-structures) + - [Proof of Spacetime](#proof-of-spacetime) + - [Proof of Replication](#proof-of-replication) + - [Storage Provider Flow](#storage-provider-flow) + - [Registration](#registration) + - [Commit](#commit) + - [Proof of Spacetime submission](#proof-of-spacetime-submission) + - [Storage provider pallet hooks](#storage-provider-pallet-hooks) + +## Overview + +The `Storage Provider Pallet` handles the creation of storage providers and facilitates storage providers and client in creating storage deals. Storage providers must provide Proof of Spacetime and Proof of Replication to the `Storage Provider Pallet` in order to prevent the pallet impose penalties on the storage providers through [slashing](#storage-fault-slashing). + +### Constants & Terminology + +- **Sector**: The sector is the default unit of storage that providers put in the network. A sector is a contiguous array of bytes that a storage provider puts together, seals, and performs Proofs of Spacetime on. Storage providers store data on the network in fixed-size sectors. +- **Partition**: A group of 2349 sectors proven simultaneously. +- **Proving Period**: The average period for proving all sectors maintained by a provider (default set to 24 hours). +- **Deadline**: One of the multiple points during a proving period when proofs for some partitions are due. +- **Challenge Window**: The period immediately before a deadline during which a challenge can be generated by the chain and the requisite proofs computed. +- **Provider Size**: The amount of proven storage maintained by a single storage provider. + +## Usage + +### Registering storage providers + +A storage provider indexes in the storage provider pallet itself when it starts up by calling the `create_storage_provider` extrinsic with it's `PeerId` as an argument. The public key will be extracted from the origin and is used to modify on-chain information and receive rewards. The `PeerId` is given by the storage provider so clients can use that to connect to the storage provider. + +### Modifying storage provider information + +The `Storage Provider Pallet` allows storage providers to modify their information such as changing the peer id, through `change_peer_id` and changing owners, through `change_owner_address`. + +### Declaring storage faults + +A storage provider can declare sectors as faulty, through the `declare_faults`, for any sectors that it cannot generate `WindowPoSt` proofs. A storage provider has to declare the sector as faulty **before** the challenge window. Until the sectors are recovered they will be masked from proofs in subsequent proving periods. + +### Declaring storage faults recovered + +After a storage provider has declared some sectors as faulty, it can recover those sectors. The storage provider can use the `declare_faults_recovered` method to set the sectors it previously declared as faulty to recovering. + +## Storage fault slashing + +Storage Fault Slashing refers to a set of penalties that storage providers may incur if they fail to maintain sector reliability or choose to voluntarily exit the network. These penalties include Fault Fees, Sector Penalties, and Termination Fees. Below is a detailed explanation of each type of penalty. + +### Fault Fee (FF) + +- **Description**: A penalty incurred by a storage provider for each day that a sector is offline. +- **Rationale**: Ensures that storage providers maintain high availability and reliability of their committed data. + +### Sector Penalty (SP) + +- **Description**: A penalty incurred by a storage provider for a sector that becomes faulted without being declared as such before a WindowPoSt (Proof-of-Spacetime) check. +- **Rationale**: Encourages storage providers to promptly declare any faults to avoid more severe penalties. +- **Details**: If a fault is detected during a WindowPoSt check, the sector will incur an SP and will continue to incur a FF until the fault is resolved. + +### Termination Penalty (TP) + +- **Description**: A penalty incurred when a sector is either voluntarily or involuntarily terminated and removed from the network. +- **Rationale**: Discourages storage providers from arbitrarily terminating sectors and ensures they fulfill their storage commitments. + +By implementing these penalties, storage providers are incentivised to maintain the reliability and availability of the data they store. This system of Storage Fault Slashing helps maintain the integrity and reliability of our decentralized storage network. + +### State management for Storage Providers + +In our parachain, the state management for all storage providers is handled collectively, unlike Filecoin, which manages the state for individual storage providers. + +### Static information about a Storage Provider + +The below struct and its fields ensure that all necessary static information about a Storage provider is encapsulated, allowing for efficient management and interaction within the parachain. + +```rust +pub struct StorageProviderInfo { + /// Libp2p identity that should be used when connecting to this Storage Provider + pub peer_id: PeerId, + + /// The proof type used by this Storage provider for sealing sectors. + /// Rationale: Different StorageProviders may use different proof types for sealing sectors. By storing + /// the `window_post_proof_type`, we can ensure that the correct proof mechanisms are applied and verified + /// according to the provider's chosen method. This enhances compatibility and integrity in the proof-of-storage + /// processes. + pub window_post_proof_type: RegisteredPoStProof, + + /// Amount of space in each sector committed to the network by this Storage Provider + /// + /// Rationale: The `sector_size` indicates the amount of data each sector can hold. This information is crucial + /// for calculating storage capacity, economic incentives, and the validation process. It ensures that the storage + /// commitments made by the provider are transparent and verifiable. + pub sector_size: SectorSize, + + /// The number of sectors in each Window PoSt partition (proof). + /// This is computed from the proof type and represented here redundantly. + /// + /// Rationale: The `window_post_partition_sectors` field specifies the number of sectors included in each + /// Window PoSt proof partition. This redundancy ensures that partition calculations are consistent and + /// simplifies the process of generating and verifying proofs. By storing this value, we enhance the efficiency + /// of proof operations and reduce computational overhead during runtime. + pub window_post_partition_sectors: u64, +} +``` + +## Sector sealing + +Before a sector can be used, the storage provider must seal the sector, which involves encoding the data in the sector to prepare it for the proving process. + +- **Unsealed Sector**: An unsealed sector is a sector containing raw data that has not yet been sealed. +- **UnsealedCID (CommD)**: The root hash of the unsealed sector’s Merkle tree, also referred to as CommD or "data commitment." +- **Sealed Sector**: A sector that has been encoded and prepared for the proving process. +- **SealedCID (CommR)**: The root hash of the sealed sector’s Merkle tree, also referred to as CommR or "replica commitment." + +By sealing sectors, storage providers ensure that data is properly encoded and ready for the proof-of-storage process, maintaining the integrity and security of the stored data in the network. + +Sealing a sector using Proof-of-Replication (PoRep) is a computation-intensive process that results in a unique encoding of the sector. Once the data is sealed, storage providers follow these steps: + +- **Generate a Proof**: Create a proof that the data has been correctly sealed. +- **Run a SNARK on the Proof**: Compress the proof using a Succinct Non-interactive Argument of Knowledge (SNARK). +- **Submit the Compressed Proof:** Submit the result of the compression to the blockchain as certification of the storage commitment. + +## Data structures + +### Proof of Spacetime + +> [!NOTE] +> For more information about proofs check out the [proof of storage docs](./PROOF-OF-STORAGE.md) + +Proof of Spacetime indicates the version and the sector size of the proof. This type is used by the Storage Provider when initially starting up to indicate what PoSt version it will use to submit Window PoSt proof. + +```rust +pub enum RegisteredPoStProof { + StackedDRGWindow2KiBV1P1, +} +``` + +The `SectorSize` indicates one of a set of possible sizes in the network. + +```rust +#[repr(u64)] +pub enum SectorSize { + _2KiB, +} +``` + +The `PoStProof` is the proof of spacetime data that is stored on chain + +```rust +pub struct PoStProof { + pub post_proof: RegisteredPoStProof, + pub proof_bytes: Vec, +} +``` + +### Proof of Replication + +> [!NOTE] +> For more information about proofs check out the [proof of storage docs](./PROOF-OF-STORAGE.md) + +Proof of Replication is used when a Storage Provider wants to store data on behalf of a client and receives a piece of client data. The data will first be placed in a sector after which that sector is sealed by the storage provider. Then a unique encoding, which serves as proof that the Storage Provider has replicated a copy of the data they agreed to store, is generated. Finally, the proof is compressed and submitted to the network as certification of storage. + +```rust +/// This type indicates the seal proof type which defines the version and the sector size +pub enum RegisteredSealProof { + StackedDRG2KiBV1P1, +} +``` + +The unique encoding created during the sealing process is generated using the sealed data, the storage provider who seals the data and the time at which the data was sealed. + +```rust +/// This type is passed into the pre commit function on the storage provider pallet +pub struct SectorPreCommitInfo { + pub seal_proof: RegisteredSealProof, + pub sector_number: SectorNumber, + pub sealed_cid: Cid, + pub expiration: u64, +} +``` + +## Storage Provider Flow + +### Registration + +The first thing a storage provider must do is register itself by calling `storage_provider.create_storage_provider(peer_id: PeerId, window_post_proof_type: RegisteredPoStProof)`. At this point there are no funds locked in the storage provider pallet. The next step is to place storage market asks on the market, this is done through the market pallet. After that the storage provider needs to make deals with clients and begin filling up sectors with data. When they have a full sector they should seal the sector. + +### Commit + +When the storage provider has completed their first seal, they should post it to the storage provider pallet by calling `storage_provider.pre_commit_sector(sectors: SectorPreCommitInfo)`. If the storage provider had zero committed sectors before this call, this begins their proving period. The proving period is a fixed amount of time in which the storage provider must submit a Proof of Space Time to the network. +During this period, the storage provider may also commit to new sectors, but they will not be included in proofs of space time until the next proving period starts. During the prove commit call, the storage provider pledges some collateral in case they fail to submit their PoSt on time. + +### Proof of Spacetime submission + +When the storage provider has completed their PoSt, they must submit it to the network by calling `storage_provider.submit_windowed_post(deadline: u64, partitions: Vec, proofs: Vec)`. There are two different types of submissions: + +- **Standard Submission**: A standard submission is one that makes it on-chain before the end of the proving period. +- **Penalize Submission**:A penalized submission is one that makes it on-chain after the end of the proving period, but before the generation attack threshold. These submissions count as valid PoSt submissions, but the miner must pay a penalty for their late submission. See [storage fault slashing](#storage-fault-slashing). + +## Storage provider pallet hooks + +Substrate pallet hooks execute some actions when certain conditions are met. We use these hooks, when a block finalizes, to check if storage providers are up to date with their proofs. If a proof needs to be submitted but isn't the storage provider pallet will penalize the storage provider accordingly [slash](#storage-fault-slashing) their collateral that the locked up during the [pre commit section](#commit). \ No newline at end of file diff --git a/pallets/storage-provider/PROOF-OF-STORAGE.md b/pallets/storage-provider/PROOF-OF-STORAGE.md new file mode 100644 index 000000000..1e43ea2e2 --- /dev/null +++ b/pallets/storage-provider/PROOF-OF-STORAGE.md @@ -0,0 +1,83 @@ +# Proof of Storage + +> [!NOTE] +> Some terms used in this document are described in the [design document](./DESIGN.md#constants--terminology) + +In our parachain within the Polkadot ecosystem, storage providers are required to prove that they hold a copy of the data they have committed to storing at any given point in time. This proof is achieved through a mechanism known as 'challenges'. The process involves the system posing specific questions to the storage providers, who must then provide correct answers to prove they are maintaining the data as promised. + +To ensure the integrity and reliability of these proofs, the challenges must: + +1. Target a Random Part of the Data: The challenge must be directed at a randomly selected portion of the stored data. +2. Be Timed Appropriately: Challenges must occur at intervals that make it infeasible, unprofitable, or irrational for the storage provider to discard the data and retrieve it only when challenged. + +General Proof-of-Storage (PoS) schemes are designed to allow users to verify that a storage provider is indeed storing the outsourced data at the time a challenge is issued. However, proving that data has been stored continuously over a period of time poses additional challenges. One method to address this is to require repeated challenges to the storage provider. However, this approach can lead to high communication complexity, which becomes a bottleneck, especially when storage providers must frequently submit proofs to the network. + +To overcome the limitations of continuous Proof-of-Storage, there is proof called Proof-of-Spacetime (PoSt). PoSt allows a verifier to check whether a storage provider has consistently stored the committed data over Space (the storage capacity) and Time (the duration). This method provides a more efficient and reliable means of proving data storage over extended periods, reducing the need for constant interaction and lowering the overall communication overhead. + +By implementing PoSt, our parachain ensures that storage providers maintain the integrity of the data they store, providing a robust and scalable solution for decentralized storage within the Polkadot ecosystem. + +## Proof of Replication + +To register a storage sector with our parachain, the sector must undergo a sealing process. Sealing is a computationally intensive procedure that generates a unique proof called Proof-of-Replication (PoRep), which attests to the unique representation of the stored data. + +The PoRep proof links together: + +1. The data itself. +2. The storage provider who performs the sealing. +3. The time when the specific data was sealed by the specific storage provider. + +If the same storage provider attempts to seal the same data at a later time, a different PoRep proof will be produced. The time is recorded as the blockchain height at which sealing took place, with the corresponding chain reference termed [SealRandomness](https://spec.filecoin.io/systems/filecoin_mining/sector/sealing/#section-systems.filecoin_mining.sector.sealing.randomness). + +## Generating and Submitting PoRep Proofs + +Once the proof is generated, the storage provider compresses it using a SNARK (Succinct Non-interactive Argument of Knowledge) and submits the result to the blockchain. This submission certifies that the storage provider has indeed replicated a copy of the data they committed to store. +Phases of the PoRep Process + +The PoRep process is divided into two main phases: + +1. Sealing preCommit Phase 1: In this phase, the PoRep encoding and replication take place, ensuring that the data is uniquely tied to the storage provider and timestamp. +2. Sealing preCommit Phase 2: This phase involves the generation of Merkle proofs and trees using the Poseidon hashing algorithm, providing a secure and verifiable method of proof generation. + +By implementing PoRep within our parachain, we ensure that storage providers are accountable for the data they store, enhancing the integrity and reliability of our decentralized storage solution in the Polkadot ecosystem. + +## Proof of Spacetime + +From the point of committing to store data, storage providers must continuously prove that they maintain the data they pledged to store. Proof-of-Spacetime (PoSt) is a procedure during which storage providers are given cryptographic challenges that can only be correctly answered if they are actually storing a copy of the sealed data. + +There are two types of challenges (and their corresponding mechanisms) within the PoSt process: WinningPoSt and WindowPoSt, each serving a different purpose. + +- WinningPoSt: Proves that the storage provider has a replica of the data at the specific time they are challenged. A WinningPoSt challenge is issued to a storage provider only if they are selected through the [Secret Leader Election algorithm](https://eprint.iacr.org/2020/025.pdf) to validate the next block. The answer to the WinningPoSt challenge must be submitted within a short [deadline](./DESIGN.md#constants--terminology), making it impractical for the provider to reseal and find the answer on demand. This ensures that the provider maintains a copy of the data at the time of the challenge. +- WindowPoSt: Proves that a copy of the data has been continuously maintained over time. Providers must submit proofs regularly, making it irrational for them to reseal the data every time a WindowPoSt challenge is issued. + +### WinningPoSt + +> [!NOTE] +> This is not relevant for our implementation as block rewards are earned by Collators. + +At the beginning of each block, a small number of storage providers are elected to validate new blocks through the Expected Consensus algorithm. Each elected provider must submit proof that they maintain a sealed copy of the data included in their proposed block before the end of the current block. This proof submission is known as WinningPoSt. Successfully submitting a WinningPoSt proof grants the provider a block reward and the opportunity to charge fees for including transactions in the block. Failing to meet the [deadline](./DESIGN.md#constants--terminology) results in the provider missing the opportunity to validate a block and earn rewards. + +### WindowPoSt + +WindowPoSt audits the commitments made by storage providers. Every 24-hour period, known as a [proving period](./DESIGN.md#constants--terminology), is divided into 30-minute, non-overlapping [deadline](./DESIGN.md#constants--terminology)s, totalling 48 [deadline](./DESIGN.md#constants--terminology)s per period. Providers must demonstrate the availability of all claimed [sectors](./DESIGN.md#constants--terminology) within this time frame. Each proof is limited to 2349 [sectors](./DESIGN.md#constants--terminology) (a partition), with 10 challenges per partition. +[Sectors](./DESIGN.md#constants--terminology) are assigned to [deadline](./DESIGN.md#constants--terminology)s and grouped into partitions. At each [deadline](./DESIGN.md#constants--terminology), providers must prove an entire partition rather than individual [sectors](./DESIGN.md#constants--terminology). For each partition, the provider generates a SNARK-compressed proof and publishes it to the blockchain. This process ensures that each sector is audited at least once every 24 hours, creating a permanent, verifiable record of the provider's commitment. +The more [sectors](./DESIGN.md#constants--terminology) a provider has pledged to store, the more partitions they must prove per [deadline](./DESIGN.md#constants--terminology). This setup necessitates ready access to sealed copies of each challenged sector, making it impractical for the provider to reseal data each time a WindowPoSt proof is required. + +### Design of Proof-of-Spacetime + +Each storage provider is allocated a 24-hour [proving period](./DESIGN.md#constants--terminology) upon creation, divided into 48 non-overlapping half-hour [deadline](./DESIGN.md#constants--terminology)s. Each sector is assigned to a specific [deadline](./DESIGN.md#constants--terminology) when proven to the chain and remains assigned to that [deadline](./DESIGN.md#constants--terminology) throughout its lifetime. [Sectors](./DESIGN.md#constants--terminology) are proven in partitions, and the set of [sectors](./DESIGN.md#constants--terminology) due at each [deadline](./DESIGN.md#constants--terminology) is recorded in a collection of 48 bitfields. + +- Open: BlockNumber from which a PoSt Proof for this [deadline](./DESIGN.md#constants--terminology) can be submitted. +- Close: BlockNumber after which a PoSt Proof for this [deadline](./DESIGN.md#constants--terminology) will be rejected. +- FaultCutoff: BlockNumber after which fault declarations for [sectors](./DESIGN.md#constants--terminology) in the upcoming [deadline](./DESIGN.md#constants--terminology) are rejected. +- Challenge: BlockNumber at which the randomness for the challenges is available. + +### PoSt Summary + +- Storage providers maintain their [sectors](./DESIGN.md#constants--terminology) by generating Proofs-of-Spacetime (PoSt) and submitting WindowPoSt proofs for their [sectors](./DESIGN.md#constants--terminology) on time. +- WindowPoSt ensures that [sectors](./DESIGN.md#constants--terminology) are persistently stored over time. +- Each provider proves all their [sectors](./DESIGN.md#constants--terminology) once per [proving period](./DESIGN.md#constants--terminology), with each sector proven by a specific [deadline](./DESIGN.md#constants--terminology). +- The [proving period](./DESIGN.md#constants--terminology) is a 24-hour cycle divided into [deadline](./DESIGN.md#constants--terminology)s, each assigned to specific [sectors](./DESIGN.md#constants--terminology). +- To prove continuous storage of a sector, providers must submit a WindowPoSt for each [deadline](./DESIGN.md#constants--terminology). +- [Sectors](./DESIGN.md#constants--terminology) are grouped into partitions, with each partition proven in a single SNARK proof. + +By implementing PoSt within our parachain, we ensure that storage providers are consistently accountable for the data they store, enhancing the integrity and reliability of our decentralized storage solution in the Polkadot ecosystem. diff --git a/pallets/storage-provider/README.md b/pallets/storage-provider/README.md new file mode 100644 index 000000000..124b30447 --- /dev/null +++ b/pallets/storage-provider/README.md @@ -0,0 +1,3 @@ +# Storage Provider Pallet + +TODO(aidan46, no-ref, 2024-06-04): Add README docs for storage provider diff --git a/pallets/storage-provider/src/benchmarks.rs b/pallets/storage-provider/src/benchmarks.rs new file mode 100644 index 000000000..aa70b0c67 --- /dev/null +++ b/pallets/storage-provider/src/benchmarks.rs @@ -0,0 +1,7 @@ +//! Benchmarking setup for pallet-storage-provider +#![cfg(feature = "runtime-benchmarks")] + +#[benchmarks] +mod benchmarks { + // TODO(aidan46, no-ref, 2024-06-04): Add benchmarks for storage provider pallet. +} diff --git a/pallets/storage-provider/src/lib.rs b/pallets/storage-provider/src/lib.rs new file mode 100644 index 000000000..ed4dd7465 --- /dev/null +++ b/pallets/storage-provider/src/lib.rs @@ -0,0 +1,62 @@ +//! # Storage Provider Pallet +//! +//! This pallet is responsible for: +//! - Storage proving operations +//! - Used by the storage provider to generate and submit Proof-of-Replication (PoRep) and Proof-of-Spacetime (PoSt). +//! - Managing and handling collateral for storage deals, penalties, and rewards related to storage deal performance. +//! +//! This pallet holds information about storage providers and provides an interface to modify that information. +//! +//! The Storage Provider Pallet is the source of truth for anything storage provider related. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarks; + +mod types; + +pub use pallet::{Config, Pallet}; + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use core::fmt::Debug; + + use codec::{Decode, Encode}; + use frame_support::pallet_prelude::{IsType, StorageMap}; + use scale_info::TypeInfo; + + use crate::types::StorageProviderInfo; + + #[pallet::pallet] + #[pallet::without_storage_info] // Allows to define storage items without fixed size + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Because this pallet emits events, it depends on the runtime's definition of an event. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Peer ID is derived by hashing an encoded public key. + /// Usually represented in bytes. + /// https://github.com/libp2p/specs/blob/2ea41e8c769f1bead8e637a9d4ebf8c791976e8a/peer-ids/peer-ids.md#peer-ids + /// More information about libp2p peer ids: https://docs.libp2p.io/concepts/fundamentals/peers/ + type PeerId: Clone + Debug + Decode + Encode + Eq + TypeInfo; + } + + // Need some storage type that keeps track of sectors, deadlines and terminations. + // Could be added to this type maybe? + #[pallet::storage] + #[pallet::getter(fn storage_providers)] + pub type StorageProviders = + StorageMap<_, _, T::AccountId, StorageProviderInfo>; + + #[pallet::event] + pub enum Event {} + + #[pallet::error] + pub enum Error {} + + #[pallet::call] + impl Pallet {} +} diff --git a/pallets/storage-provider/src/types.rs b/pallets/storage-provider/src/types.rs new file mode 100644 index 000000000..a9c966177 --- /dev/null +++ b/pallets/storage-provider/src/types.rs @@ -0,0 +1,150 @@ +use codec::{Decode, Encode}; +use scale_info::{ + prelude::{string::String, vec::Vec}, + TypeInfo, +}; + +/// SectorNumber is a numeric identifier for a sector. +pub type SectorNumber = u64; + +/// Content identifier +pub type Cid = String; + +#[derive(Decode, Encode, TypeInfo)] +pub struct StorageProviderInfo< + AccountId: Encode + Decode + Eq + PartialEq, + PeerId: Encode + Decode + Eq + PartialEq, +> { + /// Account that owns this StorageProvider + /// - Income and returned collateral are paid to this address + /// + /// Rationale: The owner account is essential for economic transactions and permissions management. + /// By tying the income and collateral to this address, we ensure that the economic benefits and responsibilities + /// are correctly attributed. + pub owner: AccountId, + + /// Libp2p identity that should be used when connecting to this Storage Provider + pub peer_id: PeerId, + + /// The proof type used by this Storage provider for sealing sectors. + /// Rationale: Different StorageProviders may use different proof types for sealing sectors. By storing + /// the `window_post_proof_type`, we can ensure that the correct proof mechanisms are applied and verified + /// according to the provider's chosen method. This enhances compatibility and integrity in the proof-of-storage + /// processes. + pub window_post_proof_type: RegisteredPoStProof, + + /// Amount of space in each sector committed to the network by this Storage Provider + /// + /// Rationale: The `sector_size` indicates the amount of data each sector can hold. This information is crucial + /// for calculating storage capacity, economic incentives, and the validation process. It ensures that the storage + /// commitments made by the provider are transparent and verifiable. + pub sector_size: SectorSize, + + /// The number of sectors in each Window PoSt partition (proof). + /// This is computed from the proof type and represented here redundantly. + /// + /// Rationale: The `window_post_partition_sectors` field specifies the number of sectors included in each + /// Window PoSt proof partition. This redundancy ensures that partition calculations are consistent and + /// simplifies the process of generating and verifying proofs. By storing this value, we enhance the efficiency + /// of proof operations and reduce computational overhead during runtime. + pub window_post_partition_sectors: u64, +} + +impl StorageProviderInfo +where + AccountId: Encode + Decode + Eq + PartialEq, + PeerId: Encode + Decode + Eq + PartialEq + Clone, +{ + /// Create a new instance of StorageProviderInfo + pub fn new( + owner: AccountId, + peer_id: PeerId, + window_post_proof_type: RegisteredPoStProof, + ) -> Result { + let sector_size = window_post_proof_type.sector_size(); + + let window_post_partition_sectors = window_post_proof_type.window_post_partitions_sector(); + + Ok(Self { + owner, + peer_id, + window_post_proof_type, + sector_size, + window_post_partition_sectors, + }) + } + + /// Updates the owner address. + pub fn change_owner(&self, owner: AccountId) -> Self { + Self { + owner, + peer_id: self.peer_id.clone(), + window_post_proof_type: self.window_post_proof_type, + sector_size: self.sector_size, + window_post_partition_sectors: self.window_post_partition_sectors, + } + } +} + +/// SectorSize indicates one of a set of possible sizes in the network. +#[derive(Encode, Decode, TypeInfo, Clone, Debug, PartialEq, Eq, Copy)] +pub enum SectorSize { + _2KiB, +} + +/// Proof of Spacetime type, indicating version and sector size of the proof. +#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone, Copy)] +pub enum RegisteredPoStProof { + StackedDRGWindow2KiBV1P1, +} + +impl RegisteredPoStProof { + /// Returns the sector size of the proof type, which is measured in bytes. + pub fn sector_size(self) -> SectorSize { + use RegisteredPoStProof::*; + match self { + StackedDRGWindow2KiBV1P1 => SectorSize::_2KiB, + } + } + + /// Proof size for each PoStProof type + #[allow(unused)] + pub fn proof_size(self) -> usize { + use RegisteredPoStProof::*; + match self { + StackedDRGWindow2KiBV1P1 => 192, + } + } + /// Returns the partition size, in sectors, associated with a proof type. + /// The partition size is the number of sectors proven in a single PoSt proof. + pub fn window_post_partitions_sector(self) -> u64 { + // Resolve to post proof and then compute size from that. + use RegisteredPoStProof::*; + match self { + StackedDRGWindow2KiBV1P1 => 2, + } + } +} + +/// Proof of Spacetime data stored on chain. +#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] +pub struct PoStProof { + pub post_proof: RegisteredPoStProof, + pub proof_bytes: Vec, +} + +/// Seal proof type which defines the version and sector size. +#[allow(non_camel_case_types)] +#[derive(Debug, Decode, Encode, TypeInfo, Eq, PartialEq, Clone)] +pub enum RegisteredSealProof { + StackedDRG2KiBV1P1, +} + +/// This type is passed into the pre commit function on the storage provider pallet +#[derive(Debug, Decode, Encode, TypeInfo, Eq, PartialEq, Clone)] +pub struct SectorPreCommitInfo { + pub seal_proof: RegisteredSealProof, + pub sector_number: SectorNumber, + pub sealed_cid: Cid, + pub expiration: u64, +} diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index ec4673222..b5ccf00b5 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -19,6 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { workspace = true, optional = true } [dependencies] +# Pallets +pallet-storage-provider = { workspace = true, default-features = false } + codec = { workspace = true, default-features = false, features = ["derive"] } hex-literal = { workspace = true, optional = true } log = { workspace = true } @@ -110,6 +113,7 @@ std = [ "pallet-market/std", "pallet-message-queue/std", "pallet-session/std", + "pallet-storage-provider/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -153,6 +157,7 @@ runtime-benchmarks = [ "pallet-collator-selection/runtime-benchmarks", "pallet-market/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-storage-provider/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -180,6 +185,7 @@ try-runtime = [ "pallet-market/try-runtime", "pallet-message-queue/try-runtime", "pallet-session/try-runtime", + "pallet-storage-provider/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/runtime/src/configs/mod.rs b/runtime/src/configs/mod.rs index 78ad16f09..013733c17 100644 --- a/runtime/src/configs/mod.rs +++ b/runtime/src/configs/mod.rs @@ -45,6 +45,7 @@ use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; use polkadot_runtime_common::{ xcm_sender::NoPriceForMessageDelivery, BlockHashCount, SlowAdjustingFeeUpdate, }; +use scale_info::prelude::vec::Vec; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_runtime::{traits::Verify, MultiSignature, Perbill}; use sp_version::RuntimeVersion; @@ -306,6 +307,11 @@ impl pallet_collator_selection::Config for Runtime { type WeightInfo = (); } +impl pallet_storage_provider::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PeerId = Vec; +} + parameter_types! { /// PalletId of Market Pallet, used to convert it to AccountId which holds the Market funds pub const MarketPalletId: PalletId = PalletId(*b"spMarket"); diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index cbc825eec..e5294e7c0 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -246,6 +246,7 @@ construct_runtime!( CumulusXcm: cumulus_pallet_xcm = 32, MessageQueue: pallet_message_queue = 33, + StorageProvider: pallet_storage_provider::pallet = 34, Market: pallet_market = 35, } );