diff --git a/docs/berkeley-upgrade/appendix.mdx b/docs/berkeley-upgrade/appendix.mdx new file mode 100644 index 000000000..42da28d6d --- /dev/null +++ b/docs/berkeley-upgrade/appendix.mdx @@ -0,0 +1,82 @@ +--- +title: Appendix +sidebar_label: Appendix +hide_title: true +description: Berkeley Upgrade Appendix +keywords: + - Berkeley + - upgrade + - appendix +--- + +# Appendix + +## Migration from o1labs/client-sdk to mina-signer + +The signing library o1labs/client-sdk was deprecated some time ago and will stop working after the upgrade. All users should upgrade with the library mina-signer https://www.npmjs.com/package/mina-signer. + +Below you will find an example of how to use the mina-signer library. Please keep in mind the following: + +1. Make sure to adjust the nonce to the correct nonce on the account you want to use +2. Use fee 1 MINA to get the TX submitted quickly +3. Update the “url” variable with an existing Mina Node GraphQL + +```javascript +import { Client } from 'mina-signer'; + +// create testnet client and define hard-coded keypair + +const client = new Client({ network: 'testnet' }); + +let privateKey = Your private key; +let publicKey = the public key - perhaps derived from the private key using -> client.derivePublicKey(privateKey); + +// define and sign payment + +let payment = { + from: publicKey, + to: 'to public key', + amount: 100, + nonce: 1, + fee: 1000000, +}; + +const signedPayment = client.signPayment(payment, privateKey); + +// send payment to graphql endpoint + +let url = 'https://qanet.minaprotocol.network/graphql'; + +let query = `mutation { + sendPayment( + input: ${objectToGraphqlQuery(signedPayment.data)}, + signature: ${objectToGraphqlQuery(signedPayment.signature)} + ) { + payment { id } + } + }`; + +console.log('======================='); +console.log(query); +console.log('======================='); + +let response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ operationName: null, query, variables: {} }), +}); +if (response.status == 200) { + let json = await response.json(); + console.dir(json, { depth: null }); +} else { + let text = await response.text(); + console.log('Error:\n', text); +} + +function objectToGraphqlQuery(obj: any) { + let json = JSON.stringify(obj, null, 2); + // removes the quotes on JSON keys + return json.replace(/\"(\S+)\"\s*:/gm, '$1:'); +} + +``` diff --git a/docs/berkeley-upgrade/archive-migration/appendix.mdx b/docs/berkeley-upgrade/archive-migration/appendix.mdx new file mode 100644 index 000000000..a6c745bb2 --- /dev/null +++ b/docs/berkeley-upgrade/archive-migration/appendix.mdx @@ -0,0 +1,137 @@ +--- +title: Appendix +sidebar_label: Appendix +hide_title: true +description: archive node schema changes between Mainnet and Berkeley +keywords: + - Berkeley + - upgrade + - archive migration + - appendix + - mina archive node + - archive node +--- + +# Appendix + +## Archive node schema changes + +If you are using the Archive Node database directly for your system integrations, then you should understand all the changes that might impact your applications. The most important change is that the `balances` table in the Devnet schema will no longer exist. In the new schema, it is replaced with the table `accounts_accessed` - from an application semantics point of view, the data in `accounts_accessed` is still the same. + +In the Berkeley protocol, accounts can now have the same public key but a different token_id. This means accounts are identified by both their public key and token_id, not just the public key. Consequently, the foreign key for the account in all tables is account_identifier_id instead of public_key_id. + +### Schema differences +- **Removed Types** + - The options `create_token`, `create_account`, and `mint_tokens` have been removed from the user_command_type enumeration. +- Indexes Dropped + - We've removed several indexes from tables, this may affect how you search and organize data: + - `idx_public_keys_id` + - `idx_public_keys_value` + - `idx_snarked_ledger_hashes_value` + - `idx_blocks_id` + - `idx_blocks_state_hash` +- **Table Removed** + - The `balances` table is no longer available. +- **New Tables Added** + - We've introduced the following new tables: + - `tokens` + - `token_symbols` + - `account_identifiers` + - `voting_for` + - `protocol_versions` + - `accounts_accessed` + - `accounts_created` + - `zkapp_commands` + - `blocks_zkapp_commands` + - `zkapp_field` + - `zkapp_field_array` + - `zkapp_states_nullable` + - `zkapp_states` + - `zkapp_action_states` + - `zkapp_events` + - `zkapp_verification_key_hashes` + - `zkapp_verification_keys` + - `zkapp_permissions` + - `zkapp_timing_info` + - `zkapp_uris` + - `zkapp_updates` + - `zkapp_balance_bounds` + - `zkapp_nonce_bounds` + - `zkapp_account_precondition` + - `zkapp_accounts` + - `zkapp_token_id_bounds` + - `zkapp_length_bounds` + - `zkapp_amount_bounds` + - `zkapp_global_slot_bounds` + - `zkapp_epoch_ledger` + - `zkapp_epoch_data` + - `zkapp_network_precondition` + - `zkapp_fee_payer_body` + - `zkapp_account_update_body` + - `zkapp_account_update` + - `zkapp_account_update_failures` +- **Updated Tables** + - The following tables have been updated + - `timing_info` + - `user_commands` + - `internal_commands` + - `epoch_data` + - `blocks` + - `blocks_user_commands` + - `blocks_internal_commands` + +### Differences per table +- **`timing_info`** + - Removed columns: + - `token` + - `initial_balance` +- **`user_commands`** + - Removed columns: + - `fee_token` + - `token` +- **`internal_commands`** + - Removed columns: + - `token` + - Renamed column + - `command_type` to `type` +- **`epoch_data`** + - Added columns: + - `total_currency` + - `start_checkpoint` + - `lock_checkpoint` + - `epoch_length` +- **`blocks`** + - Added columns: + - `last_vrf_output` + - `min_window_density` + - `sub_window_densities` + - `total_currency` + - `global_slot_since_hard_fork` + - `global_slot_since_genesis` + - `protocol_version_id` + - `proposed_protocol_version_id` + - Removed column: + - `global_slot` +- **`blocks_user_commands`** + - Removed columns: + - `fee_payer_account_creation_fee_paid` + - `receiver_account_creation_fee_paid` + - `created_token` + - `fee_payer_balance` + - `source_balance` + - `receiver_balance` + - Added index: + - `idx_blocks_user_commands_sequence_no` +- **`blocks_internal_commands`** + - Removed columns: + - `receiver_account_creation_fee_paid` + - `receiver_balance` + - Added indexes: + - `idx_blocks_internal_commands_sequence_no` + - `idx_blocks_internal_commands_secondary_sequence_no` + +### Rosetta API new operations + +The Berkeley upgrade introduces two new operation types: +- `zkapp_fee_payer_dec` +- `zkapp_balance_change` diff --git a/docs/berkeley-upgrade/archive-migration-installation.mdx b/docs/berkeley-upgrade/archive-migration/archive-migration-installation.mdx similarity index 87% rename from docs/berkeley-upgrade/archive-migration-installation.mdx rename to docs/berkeley-upgrade/archive-migration/archive-migration-installation.mdx index cadcc2d22..0a9398a87 100644 --- a/docs/berkeley-upgrade/archive-migration-installation.mdx +++ b/docs/berkeley-upgrade/archive-migration/archive-migration-installation.mdx @@ -7,14 +7,14 @@ keywords: - Berkeley - upgrade - archive migration - - installing + - installing - prerequisites - mina archive node - archive node --- The archive node Berkeley migration package is sufficient for satisfying the migration from Devnet/Mainnet to Berkeley. -However, it has some limitations. For example, the migration package does not migrate a non-canonical chain and it skips orphaned blocks that are not part of a canonical chain. +However, it has some limitations. For example, the migration package does not migrate a non-canonical chain and it skips orphaned blocks that are not part of a canonical chain. To mitigate these limitations, the archive node maintenance package is available for use by archive node operators who want to maintain a copy of their Devnet and Mainnet databases for historical reasons. @@ -35,23 +35,23 @@ We strongly encourage you to perform the migration on your own data to preserve 1. Download the Devnet/Mainnet archive data using cURL or gsutil: - cURL: - + For Devnet: ```sh curl https://storage.googleapis.com/mina-archive-dumps/devnet-archive-dump-{date}_0000.sql.tar.gz ``` - - For Mainnet: + + For Mainnet: ```sh curl https://storage.googleapis.com/mina-archive-dumps/mainnet-archive-dump-{date}_0000.sql.tar.gz ``` - + To filter the dumps by date, replace `{date}` using the required `yyyy-dd-mm` format. For example, for March 15, 2024, use `2024-03-15`. - + :warning: The majority of backups have the `0000` suffix. If a download with that name suffix is not available, try incrementing it. For example, `0001`, `0002`, and so on. - gsutil: - + ```sh gsutil cp gs://mina-archive-dumps/mainnet-archive-dump-2024-01-15* . ``` @@ -65,13 +65,13 @@ We strongly encourage you to perform the migration on your own data to preserve 3. Import the Devnet/Mainnet archive dump into the Berkeley database. Run this command at the database server: - + ```sh psql -U {user} -f {network}-archive-dump-{date}_0000.sql ``` - + The database in the dump **archive_balances_migrated** is created with the Devnet/Mainnet archive schema. - + Note: This database does not have any Berkeley changes. ## Ensure the location of Google Cloud bucket with the Devnet/Mainnet precomputed blocks @@ -84,17 +84,17 @@ The recommended method is to perform migration on your own data to preserve the ## Validate the Devnet/Mainnet database -The correct Devnet/Mainnet database state is crucial for a successful migration. +The correct Devnet/Mainnet database state is crucial for a successful migration. -[Missing blocks](/berkeley-upgrade/mainnet-database-maintenance#missing-blocks) is one the most frequent issues when dealing with the Devnet/Mainnet archive. Although this step is optional, it is strongly recommended that you verify the archive condition before you start the migration process. +[Missing blocks](/berkeley-upgrade/archive-migration/mainnet-database-maintenance#missing-blocks) is one the most frequent issues when dealing with the Devnet/Mainnet archive. Although this step is optional, it is strongly recommended that you verify the archive condition before you start the migration process. -To learn how to maintain archive data, see [Devnet/Mainnet database maintenance](/berkeley-upgrade/mainnet-database-maintenance). +To learn how to maintain archive data, see [Devnet/Mainnet database maintenance](/berkeley-upgrade/archive-migration/mainnet-database-maintenance). ## Download the migration applications Migration applications are distributed as part of the archive migration Docker and Debian packages. -Choose the packages that are appropriate for your environment. +Choose the packages that are appropriate for your environment. ### Debian packages @@ -118,7 +118,7 @@ To get the Docker image: docker pull gcr.io/o1labs-192920/mina-archive-migration:3.0.1-e848ecb-{codename} ``` -Where supported codenames are: +Where supported codenames are: - bullseye - focal - buster @@ -132,9 +132,9 @@ The Mina Devnet/Mainnet genesis ledger is stored in GitHub in the `mina` reposit You can get the Berkeley schema files from different locations: -- GitHub repository from the `berkeley` branch. +- GitHub repository from the `berkeley` branch. - Note: The `berkeley` branch can contain new updates regarding schema files, so always get the latest schema files instead of using an already downloaded schema. + Note: The `berkeley` branch can contain new updates regarding schema files, so always get the latest schema files instead of using an already downloaded schema. - Archive/Rosetta Docker from `berkeley` version @@ -148,4 +148,4 @@ You can get the Berkeley schema files from different locations: ## Next steps -Congratulations on completing the essential preparation and verification steps. You are now ready to perform the migration steps in [Migrating Devnet/Mainnet Archive to Berkeley Archive](/berkeley-upgrade/migrating-archive-database-to-berkeley). +Congratulations on completing the essential preparation and verification steps. You are now ready to perform the migration steps in [Migrating Devnet/Mainnet Archive to Berkeley Archive](/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley). diff --git a/docs/berkeley-upgrade/archive-migration-prerequisites.mdx b/docs/berkeley-upgrade/archive-migration/archive-migration-prerequisites.mdx similarity index 100% rename from docs/berkeley-upgrade/archive-migration-prerequisites.mdx rename to docs/berkeley-upgrade/archive-migration/archive-migration-prerequisites.mdx diff --git a/docs/berkeley-upgrade/worked-archive-example.mdx b/docs/berkeley-upgrade/archive-migration/debian-example.mdx similarity index 93% rename from docs/berkeley-upgrade/worked-archive-example.mdx rename to docs/berkeley-upgrade/archive-migration/debian-example.mdx index dbaa83989..e571373ba 100644 --- a/docs/berkeley-upgrade/worked-archive-example.mdx +++ b/docs/berkeley-upgrade/archive-migration/debian-example.mdx @@ -1,6 +1,6 @@ --- -title: Worked example of Devnet Archive Migration -sidebar_label: Worked example (devnet 2024-03-22) +title: Example of Devnet Archive Migration (Debian) +sidebar_label: Debian example (Devnet) hide_title: true description: A copy-paste example of how to do a Devnet migration. keywords: @@ -11,9 +11,9 @@ keywords: - archive node --- -You can follow these steps that can be copy-pasted directly into a fresh Debian 11. +You can follow these steps that can be copy-pasted directly into a fresh Debian 11. -This example uses an altered two-step version of the [full simplified workflow](/berkeley-upgrade/migrating-archive-database-to-berkeley#simplified-approach). +This example uses an altered two-step version of the [full simplified workflow](/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley#simplified-approach). ```sh apt update && apt install lsb-release sudo postgresql curl wget gpg # debian:11 is surprisingly light @@ -54,7 +54,6 @@ mina-berkeley-migration-script initial \ --blocks-batch-size 100 --blocks-bucket mina_network_block_data \ --network devnet - # now, do a final migration gsutil cp gs://mina-archive-dumps/devnet-archive-dump-2024-03-22_2050.sql.tar.gz . diff --git a/docs/berkeley-upgrade/archive-migration/docker-example.mdx b/docs/berkeley-upgrade/archive-migration/docker-example.mdx new file mode 100644 index 000000000..15fd47c7c --- /dev/null +++ b/docs/berkeley-upgrade/archive-migration/docker-example.mdx @@ -0,0 +1,73 @@ +--- +title: Example of Mainnet Archive Migration (Docker) +sidebar_label: Docker example (Mainnet) +hide_title: true +description: A copy-paste example of how to do a Mainnet migration. +keywords: + - Berkeley + - upgrade + - archive migration + - mina archive node + - archive node +--- + +You can follow these steps that can be copy-pasted directly into a OS running Docker. + +This example performs a Mainnet initial migration following the [debian-example](/berkeley-upgrade/archive-migration/debian-example) + +```sh + +# Create a new directory for the migration data +mkdir $(pwd)/mainnet-migration && cd $(pwd)/mainnet-migration + +# Create Network +docker network create mainnet + +# Launch Local Postgres Database +docker run --name postgres -d -p 5432:5432 --network mainnet -v $(pwd)/mainnet-migration/postgresql/data:/var/lib/postgresql/data -e POSTGRES_USER=mina -e POSTGRES_PASSWORD=minamina -d postgres:13-bullseye + +export PGHOST="localhost" +export PGPORT=5432 +export PGUSER="mina" +export PGPASSWORD="minamina" + +# Drop DBs if they exist +psql -c "DROP DATABASE IF EXISTS mainnet_balances_migrated;" +psql -c "DROP DATABASE IF EXISTS mainnet_really_migrated;" + +# Create DBs +psql -c "CREATE DATABASE mainnet_balances_migrated;" +psql -c "CREATE DATABASE mainnet_really_migrated;" + +# Retrieve Archive Node Backup +wget https://673156464838-mina-archive-node-backups.s3.us-west-2.amazonaws.com/mainnet/mainnet-archive-dump-2024-04-29_0000.sql.tar.gz +tar -xf mainnet-archive-dump-2024-04-29_0000.sql.tar.gz + +# Replace the database name in the dump +sed -i -e s/archive_balances_migrated/mainnet_balances_migrated/g mainnet-archive-dump-2024-04-29_0000.sql +psql mainnet_balances_migrated -f mainnet-archive-dump-2024-04-29_0000.sql + +# Prepare target +wget https://raw.githubusercontent.com/MinaProtocol/mina/berkeley/src/app/archive/create_schema.sql +wget https://raw.githubusercontent.com/MinaProtocol/mina/berkeley/src/app/archive/zkapp_tables.sql +psql mainnet_really_migrated -f create_schema.sql + +# Start migration +docker create --name mainnet-db-migration \ + -v $(pwd)/mainnet-migration:/data \ + --network mainnet gcr.io/o1labs-192920/mina-archive-migration:3.0.1-e848ecb-bullseye -- bash -c ' + wget http://673156464838-mina-genesis-ledgers.s3-website-us-west-2.amazonaws.com/mainnet/genesis_ledger.json; mina-berkeley-migration-script initial \ + --genesis-ledger genesis_ledger.json \ + --source-db postgres://mina:minamina@postgres:5432/mainnet_balances_migrated \ + --target-db postgres://mina:minamina@postgres:5432/mainnet_really_migrated \ + --blocks-batch-size 5000 \ + --blocks-bucket mina_network_block_data \ + --checkpoint-output-path /data/checkpoints/. \ + --precomputed-blocks-local-path /data/precomputed_blocks/. \ + --network mainnet' + +docker start mainnet-db-migration + +docker logs -f mainnet-db-migration + +``` diff --git a/docs/berkeley-upgrade/index.mdx b/docs/berkeley-upgrade/archive-migration/index.mdx similarity index 74% rename from docs/berkeley-upgrade/index.mdx rename to docs/berkeley-upgrade/archive-migration/index.mdx index 60cc07d02..71134142b 100644 --- a/docs/berkeley-upgrade/index.mdx +++ b/docs/berkeley-upgrade/archive-migration/index.mdx @@ -1,6 +1,6 @@ --- -title: Berkeley Upgrade -sidebar_label: Berkeley Upgrade +title: Archive Migration +sidebar_label: Archive Migration hide_title: true description: Berkeley upgrade is a major upgrade that requires all nodes in a network to upgrade to a newer version. It is not backward compatible. keywords: @@ -11,26 +11,27 @@ keywords: - archive node --- -# Berkeley Upgrade +# Archive Migration -The Berkeley upgrade is a major upgrade that requires all nodes in a network to upgrade to a newer version. It is not backward compatible. +The Berkeley upgrade is a major upgrade that requires all nodes in a network to upgrade to a newer version. It is not backward compatible. A major upgrade occurs when there are major changes to the core protocol that require all nodes on the network to update to the latest software. ## How to prepare for the Berkeley upgrade -The Berkeley upgrade requires upgrading all nodes, including archive nodes. One of the required steps is to migrate archive databases from the current Mainnet format to Berkeley. This migration requires actions and efforts from node operators and exchanges. +The Berkeley upgrade requires upgrading all nodes, including archive nodes. One of the required steps is to migrate archive databases from the current Mainnet format to Berkeley. This migration requires actions and efforts from node operators and exchanges. Learn about the archive data migration: -- [Understanding the migration process](/berkeley-upgrade/understanding-archive-migration) -- [Prerequisites before migration](/berkeley-upgrade/archive-migration-prerequisites) -- [Suggested installation procedure](/berkeley-upgrade/archive-migration-installation) -- [How to perform archive migration](/berkeley-upgrade/migrating-archive-database-to-berkeley) +- [Understanding the migration process](/berkeley-upgrade/archive-migration/understanding-archive-migration) +- [Prerequisites before migration](/berkeley-upgrade/archive-migration/archive-migration-prerequisites) +- [Suggested installation procedure](/berkeley-upgrade/archive-migration/archive-migration-installation) +- [How to perform archive migration](/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley) Finally, see the shell script example that is compatible with a stock Debian 11 container: -- [Worked example using March 22 data](/berkeley-upgrade/worked-archive-example) +- [Worked Devnet Debian example using March 22 data](/berkeley-upgrade/archive-migration/debian-example) +- [Worked Devnet Docker example using April 29 data](/berkeley-upgrade/archive-migration/docker-example) ## What will happen with original Devnet/Mainnet data @@ -44,4 +45,4 @@ After the migration, you will have two databases: There is no requirement to preserve the original Devnet/Mainnet database after migration. However, if for some reason you want to keep the Mainnet orphaned or non-canonical pending blocks, you can download the archive maintenance package for the Devnet/Mainnet database. -To learn about maintaining archive data, see [Devnet/Mainnet database maintenance](/berkeley-upgrade/mainnet-database-maintenance). +To learn about maintaining archive data, see [Devnet/Mainnet database maintenance](/berkeley-upgrade/archive-migration/mainnet-database-maintenance). diff --git a/docs/berkeley-upgrade/mainnet-database-maintenance.mdx b/docs/berkeley-upgrade/archive-migration/mainnet-database-maintenance.mdx similarity index 96% rename from docs/berkeley-upgrade/mainnet-database-maintenance.mdx rename to docs/berkeley-upgrade/archive-migration/mainnet-database-maintenance.mdx index f53366d05..531aa9e5a 100644 --- a/docs/berkeley-upgrade/mainnet-database-maintenance.mdx +++ b/docs/berkeley-upgrade/archive-migration/mainnet-database-maintenance.mdx @@ -7,7 +7,7 @@ keywords: - Berkeley - upgrade - archive migration - - planning + - planning - prerequisites - mina archive node - archive node @@ -18,7 +18,7 @@ keywords: # Devnet/Mainnet database maintenance -After the Berkeley migration, the original Devnet/Mainnet database is not required unless you are interested in +After the Berkeley migration, the original Devnet/Mainnet database is not required unless you are interested in preserving some aspect of the database that is lost during the migration process. Two databases exist after the successful migration: @@ -31,13 +31,13 @@ Two databases exist after the successful migration: - Without pending blocks that are not in the canonical chain - With all pending blocks on the canonical chain converted to canonical blocks -The o1Labs and Mina Foundation teams have consistently prioritized rigorous testing and the delivery of high-quality software products. +The o1Labs and Mina Foundation teams have consistently prioritized rigorous testing and the delivery of high-quality software products. -However, being human entails the possibility of making mistakes. +However, being human entails the possibility of making mistakes. ## Known issues -Recently, a few mistakes were identified while working on a version of Mina used on Mainnet. These issues were promptly addressed; however, within the decentralized environment, archive nodes can retain historical issues despite our best efforts. +Recently, a few mistakes were identified while working on a version of Mina used on Mainnet. These issues were promptly addressed; however, within the decentralized environment, archive nodes can retain historical issues despite our best efforts. Fixes are available for the following known issues: @@ -98,7 +98,7 @@ mina-replayer \ where: -- `archive-uri` - connection string to the archive database +- `archive-uri` - connection string to the archive database - `input-file` - JSON file that holds the archive database - `output-file` - JSON file that will hold the ledger with auxiliary information, like global slot and blockchain height, which will be dumped on the last block - `checkpoint-interval` - frequency of checkpoints expressed in blocks count @@ -131,12 +131,12 @@ mina-replayer --archive-uri {db_connection_string} --input-file reference_replay where: -- `archive-uri` - connection string to the archive database +- `archive-uri` - connection string to the archive database - `input-file` - JSON file that holds the archive database - `output-file` - JSON file that will hold the ledger with auxiliary information, like global slot and blockchain height, which will be dumped on the last block - `checkpoint-interval` - frequency of checkpoints expressed in blocks count - `replayer_input_file.json` - JSON file constructed from the Devnet/Mainnet genesis ledger: - + ``` jq '.ledger.accounts' genesis_ledger.json | jq '{genesis_ledger: {accounts: .}}' > replayer_input_config.json ``` @@ -149,9 +149,9 @@ where: The daemon node unavailability can cause the archive node to miss some of the blocks. This recurring missing blocks issue consistently poses challenges. To address this issue, you can reapply missing blocks. -If you uploaded the missing blocks to Google Cloud, the missing blocks can be reapplied from precomputed blocks to preserve chain continuity. +If you uploaded the missing blocks to Google Cloud, the missing blocks can be reapplied from precomputed blocks to preserve chain continuity. -1. To automatically verify and patch missing blocks, use the [download_missing_blocks.sh](https://raw.githubusercontent.com/MinaProtocol/mina/2.0.0berkeley_rc1/src/app/rosetta/download-missing-blocks.sh) script. +1. To automatically verify and patch missing blocks, use the [download_missing_blocks.sh](https://raw.githubusercontent.com/MinaProtocol/mina/2.0.0berkeley_rc1/src/app/rosetta/download-missing-blocks.sh) script. The `download-missing-blocks` script uses `localhost` as the database host so the script assumes that psql is running on localhost on port 5432. Modify `PG_CONN` in `download_missing_block.sh` for your environment. @@ -164,15 +164,15 @@ If you uploaded the missing blocks to Google Cloud, the missing blocks can be re ``` 1. Run the `mina-missing-blocks-auditor` script from the database host: - + For Devnet: - + ```sh download-missing-blocks.sh devnet {db_user} {db_password} ``` For Mainnet: - + ```sh download-missing-blocks.sh mainnet {db_user} {db_password} ``` @@ -193,4 +193,4 @@ Note: It's important to highlight that precomputed blocks for **Devnet** between ## Next steps -Now that you have completed the steps to properly maintain the correctness of the archive database, you are ready to perform the archive [migration process](/berkeley-upgrade/migrating-archive-database-to-berkeley). +Now that you have completed the steps to properly maintain the correctness of the archive database, you are ready to perform the archive [migration process](/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley). diff --git a/docs/berkeley-upgrade/migrating-archive-database-to-berkeley.mdx b/docs/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley.mdx similarity index 94% rename from docs/berkeley-upgrade/migrating-archive-database-to-berkeley.mdx rename to docs/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley.mdx index cc4590da6..cfe727772 100644 --- a/docs/berkeley-upgrade/migrating-archive-database-to-berkeley.mdx +++ b/docs/berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley.mdx @@ -1,5 +1,5 @@ --- -title: Migrating Devnet/Mainnet Archive to Berkeley Archive +title: Migrating Devnet/Mainnet Archive to Berkeley Archive sidebar_label: Performing archive migration hide_title: true description: Steps to properly migrate archives from Devnet/Mainnet to Berkeley. @@ -7,7 +7,7 @@ keywords: - Berkeley - upgrade - archive migration - - planning + - planning - prerequisites - mina archive node - archive node @@ -15,15 +15,15 @@ keywords: # Migrating Devnet/Mainnet Archive to Berkeley Archive -Before you start the process to migrate your archive database from the current Mainnet or Devnet format to Berkeley, be sure that you: +Before you start the process to migrate your archive database from the current Mainnet or Devnet format to Berkeley, be sure that you: -- [Understand the Archive Migration](/berkeley-upgrade/understanding-archive-migration) -- Meet the foundational requirements in [Archive migration prerequisites](/berkeley-upgrade/archive-migration-prerequisites) -- Have successfully installed the [archive migration package](/berkeley-upgrade/archive-migration-installation) +- [Understand the Archive Migration](/berkeley-upgrade/archive-migration/understanding-archive-migration) +- Meet the foundational requirements in [Archive migration prerequisites](/berkeley-upgrade/archive-migration/archive-migration-prerequisites) +- Have successfully installed the [archive migration package](/berkeley-upgrade/archive-migration/archive-migration-installation) ## Migration process -The Devnet/Mainnet migration can take up to a couple of days. +The Devnet/Mainnet migration can take up to a couple of days. Therefore, you can achieve a successful migration by using three stages: - **Stage 1:** Initial migration @@ -49,7 +49,7 @@ For convenience, use the `mina-berkeley-migration-script` app if you do not need ### Stage 1: Initial migration ``` -mina-berkeley-migration-script \ +mina-berkeley-migration-script \ initial \ --genesis-ledger ledger.json \ --source-db postgres://postgres:postgres@localhost:5432/source \ @@ -62,7 +62,7 @@ mina-berkeley-migration-script \ --network NETWORK ``` -where: +where: `-g | --genesis-ledger`: path to the genesis ledger file @@ -141,7 +141,7 @@ mina-berkeley-migration-script \ -fc fork-genesis-config.json ``` -where: +where: `-g | --genesis-ledger`: path to the genesis ledger file @@ -175,7 +175,7 @@ This first stage requires only the initial Berkeley schema, which is the foundat - Inputs - Unmigrated Devnet/Mainnet database - - Devnet/Mainnet genesis ledger + - Devnet/Mainnet genesis ledger - Empty target Berkeley database with the schema created, but without any content - Outputs @@ -196,7 +196,7 @@ mina-berkeley-migration \ --network NETWORK ``` -where: +where: `--batch-size`: number of precomputed blocks to be fetched at one time from Google Cloud. A larger number, like 1000, can help speed up migration process. @@ -244,7 +244,7 @@ where: `--input-file`: path to the replayer input file, see below on how's created -`replayer_input_config.json`: is a file constructed out of network genesis ledger: +`replayer_input_config.json`: is a file constructed out of network genesis ledger: ``` jq '.ledger.accounts' genesis_ledger.json | jq '{genesis_ledger: {accounts: .}}' > replayer_input_config.json ``` @@ -255,13 +255,13 @@ where: #### Phase 3: Validations -Use the **berkeley_migration_verifier** app to perform checks for both the fully migrated and partially migrated databases. +Use the **berkeley_migration_verifier** app to perform checks for both the fully migrated and partially migrated databases. ``` mina-berkeley-migration-verifier \ pre-fork \ --mainnet-archive-uri postgres://postgres:postgres@localhost:5432/source \ - --migrated-archive-uri postgres://postgres:postgres@localhost:5432/migrated + --migrated-archive-uri postgres://postgres:postgres@localhost:5432/migrated ``` where: @@ -272,7 +272,7 @@ where: ### Stage 2: Incremental migration -After the initial migration, the data is migrated data up to the last canonical block. However, Devnet/Mainnet data is progressing with new blocks that must also be migrated again and again until the fork block is announced. +After the initial migration, the data is migrated data up to the last canonical block. However, Devnet/Mainnet data is progressing with new blocks that must also be migrated again and again until the fork block is announced. :info: Incremental migration can, and probably must, be repeated a couple of times until the fork block is announced by Mina Foundation. Run the incremental migration multiple times with the latest Devnet/Mainnet database and the latest replayer checkpoint file. @@ -280,8 +280,8 @@ Run the incremental migration multiple times with the latest Devnet/Mainnet data - Inputs - Latest Devnet/Mainnet database - Devnet/Mainnet genesis ledger - - Replayer checkpoint from last run - - Migrated berkeley database from initial migration + - Replayer checkpoint from last run + - Migrated berkeley database from initial migration - Outputs - Migrated Devnet/Mainnet database to the Berkeley format up to the last canonical block @@ -348,7 +348,7 @@ Incremental migration can be run continuously on top of the initial migration or #### Phase 3: Validations -Use the **berkeley_migration_verifier** app to perform checks for both the fully migrated and partially migrated database. +Use the **berkeley_migration_verifier** app to perform checks for both the fully migrated and partially migrated database. ``` mina-berkeley-migration-verifier \ @@ -363,18 +363,18 @@ where: `--migrated-archive-uri`: connection string to the database that will hold the migrated data -Note that: you can run incremental migration continuously on top of the initial migration or the last incremental until the fork block is announced. +Note that: you can run incremental migration continuously on top of the initial migration or the last incremental until the fork block is announced. ### Stage 3: Remainder migration -When the fork block is announced, you must tackle the remainder migration. This is the last migration run -you need to perform. In this stage, you close the migration cycle with the last migration of the remainder blocks between the current last canonical block and the fork block (which can be pending, so you don't need to wait 290 blocks until it would become canonical). +When the fork block is announced, you must tackle the remainder migration. This is the last migration run +you need to perform. In this stage, you close the migration cycle with the last migration of the remainder blocks between the current last canonical block and the fork block (which can be pending, so you don't need to wait 290 blocks until it would become canonical). You must use `--fork-state-hash` as an additional parameter to the **berkeley-migration** app. - Inputs - Latest Devnet/Mainnet database - Devnet/Mainnet genesis ledger - - Replayer checkpoint from last run + - Replayer checkpoint from last run - Migrated Berkeley database from last run - Fork block state hash @@ -451,7 +451,7 @@ where: #### Phase 3: Validations -Use the **berkeley_migration_verifier** app to perform checks for both the fully migrated and partially migrated databases. +Use the **berkeley_migration_verifier** app to perform checks for both the fully migrated and partially migrated databases. ``` mina-berkeley-migration-verifier \ @@ -472,13 +472,17 @@ where: `--fork-config`: fork genesis config file is the new genesis config that is distributed with the new daemon and is published after the fork block is announced -### Example migration steps using Mina Foundation data for Devnet +### Example migration steps using Mina Foundation data for Devnet using Debian -See: [Worked example using March 22 data](/berkeley-upgrade/worked-archive-example) +See: [Worked example using March 22 data](/berkeley-upgrade/archive-migration/debian-example) + +### Example migration steps using Mina Foundation data for Mainnet using Docker + +See: [Worked example using March 22 data](/berkeley-upgrade/archive-migration/docker-example) ## How to verify a successful migration -o1Labs and Mina Foundation make every effort to provide reliable tools of high quality. However, it is not possible to eliminate all errors and test all possible Mainnet archive variations. +o1Labs and Mina Foundation make every effort to provide reliable tools of high quality. However, it is not possible to eliminate all errors and test all possible Mainnet archive variations. All important checks are implemented in the `mina-berkeley-migration-verifier` application. However, you can use the following checklist if you want to perform the checks manually: @@ -496,17 +500,17 @@ However, you can use the following checklist if you want to perform the checks m ## Tips and tricks -We are aware that the migration process can be very long (a couple of days). Therefore, we encourage you to use cron jobs that migrate data incrementally. +We are aware that the migration process can be very long (a couple of days). Therefore, we encourage you to use cron jobs that migrate data incrementally. The cron job requires access to Google Cloud buckets (or other storage): - A bucket to store migrated-so-far database dumps - A bucket to store checkpoint files We are tightly coupled with Google Cloud infrastructure due to the precomputed block upload mechanism. -This is why we are using also buckets for storing dumps and checkpoint. However, you do not have to use Google Cloud for other things than +This is why we are using also buckets for storing dumps and checkpoint. However, you do not have to use Google Cloud for other things than precomputed blocks. With configuration, you can use any gsutil-compatible storage backend (for example, S3). -Before running the cron job, upload an initial database dump and an initial checkpoint file. +Before running the cron job, upload an initial database dump and an initial checkpoint file. To create the files, run these steps locally: @@ -546,8 +550,8 @@ After solving any of below issues you can rerun process and migration will continue form last position #### Async was unable to add a file descriptor to its table of open file descriptors -For example: - +For example: + ``` ("Async was unable to add a file descriptor to its table of open file descriptors" (file_descr 18) @@ -590,7 +594,7 @@ You provided the migrated schema as source one when invoking script or berkeley- #### Poor performance of migration when accessing remote database -We conducted migration tests with both a local database and a distant database (RDS). +We conducted migration tests with both a local database and a distant database (RDS). The migration using the local database appears to process significantly faster. We strongly suggest to use offline database installed locally #### ERROR: out of shared memory @@ -600,14 +604,14 @@ The migration using the local database appears to process significantly faster. \nHINT: You might need to increase max_pred_locks_per_transaction ``` -Solution is either to increase `max_pred_locks_per_transaction` setting in postgres database. +Solution is either to increase `max_pred_locks_per_transaction` setting in postgres database. Alternative is to isolate database from mainnet traffic (for example by exporting dump from live database and import it on isolated environment) #### Berkeley migration app is consuming all of my resources When running a full migration, you can stumble on memory leaks that prevent you from cleanly performing the migration in one pass. A machine with 64 GB of RAM can be frozen after ~40k migrated blocks. Each 200 blocks inserted into the database increases the memory leak by 4-10 MB. -A potential workaround is to split the migration into smaller parts using cron jobs or automation scripts. +A potential workaround is to split the migration into smaller parts using cron jobs or automation scripts. ## FAQ diff --git a/docs/berkeley-upgrade/understanding-archive-migration.mdx b/docs/berkeley-upgrade/archive-migration/understanding-archive-migration.mdx similarity index 100% rename from docs/berkeley-upgrade/understanding-archive-migration.mdx rename to docs/berkeley-upgrade/archive-migration/understanding-archive-migration.mdx diff --git a/docs/berkeley-upgrade/flags-configs.mdx b/docs/berkeley-upgrade/flags-configs.mdx new file mode 100644 index 000000000..c319afd1b --- /dev/null +++ b/docs/berkeley-upgrade/flags-configs.mdx @@ -0,0 +1,138 @@ +--- +title: Post-Upgrade Flags and Configurations for Mainnet +sidebar_label: Post-Upgrade Flags and Configurations +hide_title: true +description: Post-Upgrade Flags and Configurations for Mainnet +keywords: + - Berkeley + - upgrade + - flags + - configurations +--- + +# Post-Upgrade Flags and Configurations for Mainnet + +Please refer to the Berkeley node release notes [here](https://github.com/MinaProtocol/mina/releases/tag/3.0.0devnet) **_[NEEDS-UPDATE]_**. + +### Network details +**_[NEEDS-UPDATE]_** + +``` +Chain ID +29936104443aaf264a7f0192ac64b1c7173198c1ed404c1bcff5e562e05eb7f6 + +Git SHA-1 +dc6bf78b8ddbbca3a1a248971b76af1514bf05aa + +Seed List +https://bootnodes.minaprotocol.com/networks/mainnet.txt + +Node build +https://github.com/MinaProtocol/mina/releases/tag/3.0.0devnet +``` + +### Block Producer​s + +Start your node in the Devnet with the flags and environment variables listed below. + +``` +mina daemon +--block-producer-key +--config-directory +--file-log-rotations 500 +--generate-genesis-proof true +--libp2p-keypair +--log-json +--peer-list-url https://bootnodes.minaprotocol.com/networks/mainnet.txt + +ENVIRONMENT VARIABLES +RAYON_NUM_THREADS=6 +MINA_LIBP2P_PASS +MINA_PRIVKEY_PASS +``` + +### SNARK Coordinator +Configure your node in the Devnet with specific flags and environment variables as listed. + +``` +mina daemon +--config-directory +--enable-peer-exchange true +--file-log-rotations 500 +--libp2p-keypair +--log-json +--peer-list-url https://bootnodes.minaprotocol.com/networks/mainnet.txt +--run-snark-coordinator +--snark-worker-fee 0.001 +--work-selection [seq|rand] + +ENVIRONMENT VARIABLES +MINA_LIBP2P_PASS +``` + +### SNARK Workers +Connect to SNARK Coordinator node if required and run the following flags. +``` +mina internal snark-worker +--proof-level full +--shutdown-on-disconnect false +--daemon-address + +ENVIRONMENT VARIABLES +RAYON_NUM_THREADS:8 +``` + +### Archive Node +Running an Archive Node involves setting up a non-block-producing node and a PostgreSQL database configured with specific flags and environment variables. + +For more information about running archive nodes, see [Archive Node](/node-operators/archive-node). + +The PostgreSQL database requires two schemas: +1. The PostgreSQL schema used by the Mina archive database: in the [release notes](https://github.com/MinaProtocol/mina/releases/tag/3.0.0devnet) **_[NEEDS-UPDATE]_** +2. The PostgreSQL schema extensions to support zkApp commands: in the [release notes](https://github.com/MinaProtocol/mina/releases/tag/3.0.0devnet) **_[NEEDS-UPDATE]_** + +The non-block-producing node must be configured with the following flags: +``` +mina daemon +--archive-address : +--config-directory +--enable-peer-exchange true +--file-log-rotations 500 +--generate-genesis-proof true +--libp2p-keypair +--log-json +--peer-list-url https://bootnodes.minaprotocol.com/networks/mainnet.txt + +ENVIRONMENT VARIABLES +MINA_LIBP2P_PASS +``` + +This non-block-producing node connects to the archive node with the addresses and port specified in the `--archive-address` flag. + +The **archive node** command looks like this: + +``` +mina-archive run +--metrics-port +--postgres-uri postgres://:@
:/ +--server-port 3086 +--log-json +--log-level DEBUG +``` + +### Rosetta API +Once you have the Archive Node stack up and running, start the Rosetta API Docker image with the following command: + +``` +docker run +--name rosetta --rm \ +-p 3088:3088 \ +--entrypoint '' \ +gcr.io/o1labs-192920/mina-rosetta: \ +/usr/local/bin/mina-rosetta \ +--archive-uri "${PG_CONNECTION_STRING}" \ +--graphql-uri "${GRAPHQL_URL}" \ +--log-json \ +--log-level ${LOG_LEVEL} \ +--port 3088 +``` diff --git a/docs/berkeley-upgrade/requirements.mdx b/docs/berkeley-upgrade/requirements.mdx new file mode 100644 index 000000000..43eeb7115 --- /dev/null +++ b/docs/berkeley-upgrade/requirements.mdx @@ -0,0 +1,52 @@ +--- +title: Requirements +sidebar_label: Requirements +hide_title: true +description: Berkeley upgrade is a major upgrade that requires all nodes in a network to upgrade to a newer version. It is not backward compatible. +keywords: + - Berkeley + - upgrade + - hardware requirements +--- + +# Requirements + +## Hardware Requirements + +Please note the following are the hardware requirements for each node type after the upgrade: + +| Node Type | Memory | CPU | Storage | Network | +|--|--|--|--|--| +| Mina Daemon Node | 32 GB RAM | 8 core processor with BMI2 and AVX CPU instruction set are required | 64 GB | 1 Mbps Internet Connection | +| SNARK Coordinator | 32 GB RAM | 8 core processor | 64 GB | 1 Mbps Internet Connection | +| SNARK Worker | 32 GB RAM | 4 core/8 threads per worker with BMI2 and AVX CPU instruction set are required | 64 GB | 1 Mbps Internet Connection | +| Archive Node | 32 GB RAM | 8 core processor | 64 GB | 1 Mbps Internet Connection | +| Rosetta API standalone Docker image | 32 GB RAM | 8 core processor | 64 GB | 1 Mbps Internet Connection | + +## Mina Daemon Requirements + +### IP and Port configuration​ + +**IP:** + +By default the Mina Daemon will attempt to retrieve its public IP address from the system. If you are running the node behind a NAT or firewall, you can set the `--external-ip` flag to specify the public IP address. + +**Port:** + +Nodes must expose a port publicly to communicate with other peers. +Mina uses by default the port `8302` which is the default libp2p port. + +You can use a different port by setting the `--external-port` flag. + +### Generation of libp2p keypair​ + +Each node within the network must possess its own distinct libp2p key pair, although the same libp2p keys can be reused from before the upgrade, in case you need to manually generate new libp2p keys use the following command: + +``` +mina libp2p generate-keypair -privkey-path +``` +Further information on [generating key pairs](/node-operators/generating-a-keypair) on Mina Protocol. + +### Node Auto-restart + +Ensure your nodes are set to restart automatically after a crash. For guidance, refer to the [auto-restart instructions](/node-operators/block-producer-node/connecting-to-the-network#start-a-mina-node-with-auto-restart-flows-using-systemd) diff --git a/docs/berkeley-upgrade/upgrade-steps.mdx b/docs/berkeley-upgrade/upgrade-steps.mdx new file mode 100644 index 000000000..dc95dd5ed --- /dev/null +++ b/docs/berkeley-upgrade/upgrade-steps.mdx @@ -0,0 +1,124 @@ +--- +title: Upgrade Steps +sidebar_label: Upgrade Steps +hide_title: true +description: Detailed upgrade steps and operators' tasks +keywords: + - Berkeley + - upgrade + - Detailed upgrade steps and operators' tasks +--- + +# Upgrade Steps + + Mainnet Upgrade steps + +## Pre-Upgrade + +- During the Pre-Upgrade phase, node operators shall prepare for the upcoming upgrade. The most important steps are: + - Review the [upgrade readiness checklist](https://docs.google.com/document/d/1rTmJvyaK33dWjJXMOSiUIGgf8z7turxolGHUpVHNxEU/edit#heading=h.2hqz0ixwjk3f) to confirm they have covered the required steps. + - Upgrade their nodes to the 1.4.1 stable version + - Ensure servers are provisioned to run Berkeley nodes, meeting the new hardware requirements + - Upgrade their nodes to the node version 1.5.0, with stop-slots, when this version becomes available + - Start the archive node initial migration if they run archive nodes and wish to perform the migration in a decentralized manner + +**Please note: ** a simplified Node Status service will be part of the upgrade tooling and enabled by default in Pre-Upgrade release with the stop-slots (1.5.0). This feature will allow for a safe upgrade by monitoring the amount of upgraded active stake. Only non-sensitive data will be reported. If operators are not comfortable sharing their node version, they will have the option to disable the node version reports by using the node flag `--node-stats-type none` + +### Block Producers and SNARK Workers +1. Review the [upgrade readiness checklist](https://docs.google.com/document/d/1rTmJvyaK33dWjJXMOSiUIGgf8z7turxolGHUpVHNxEU). +1. Provision servers that meet the minimum hardware requirements, mainly the new 32Gb RAM requirement and the support for AVX and BMI2 CPU instructions. +1. Upgrade nodes to node version 1.5.0 when available (1.5.0 has built-in stop slots). + +### Archive Node Operators and Rosetta Operators +- Two migration processes will be available to archive node operators: trustless and trustful. If the archive node operator wants to perform the trustless migration, follow these steps; otherwise, go to the Upgrade phase- the trustful migration will rely on o1Labs database exports and docker images to migrate the archive node database and doesn’t require any actions at this stage. + +1. Trustless migration: + - Perform the initial archive node migration, since Devnet is a long-lived network, the initial migration process can take up to 48 hours, depending on your server specification and infrastructure. + - If your Mina Daemon, archive node, or PostgreSQL database runs on different machines, the migration performance will be greatly impacted. + - For more information on the archive node migration process, please refer to the [Archive Migration](/berkeley-upgrade/archive-migration) section. +2. Upgrade all nodes to the latest stable version [1.4.1](https://github.com/MinaProtocol/mina/releases/tag/1.4.1). +3. Provision servers that meet the minimum hardware requirements, mainly the new 32Gb RAM requirement. +4. Upgrade their nodes to the version that has the built-in stop slots before the pre-defined stop-transaction-slot. + +### Exchanges +1. Make sure to test your system integration with Berkeley's new features. Pay special attention to: + - If you use the **o1labs/client-sdk** library to sign transactions, you should switch to **mina-signer** https://www.npmjs.com/package/mina-signer. **o1labs/client-sdk was deprecated some time ago and will be unusable** once the network has been upgraded. Please review the migration instructions in Appendix 2 of this document + - If you rely on the archive node SQL database tables, please review the schema changes in Appendix 1 of this document. +2. Upgrade all nodes to the latest stable version [1.4.1](https://github.com/MinaProtocol/mina/releases/tag/1.4.1). +3. Provision servers that meet the minimum hardware requirements, mainly the new 32Gb RAM requirement. +4. Upgrade their nodes to the version that has the built-in stop slots before the pre-defined stop-transaction-slot. + +## State Finalization +- Between the predefined stop-transaction-slot and stop-network-slot, a stabilization period of 100 slots will occur. During this phase, the network consensus will not accept new blocks with transactions on them, including coinbase transactions. The state finalization period safeguards all nodes to reach a consensus on the latest network state before the upgrade. +- During the state finalization slots, it is crucial to maintain a high block density, so block producers and SNARK workers shall continue running their nodes to support the network's stability and security. +- Archive nodes should also continue to execute to ensure finalized blocks are in the database and can be migrated, preserving the integrity and accessibility of the network's history. + +### Block Producers and SNARK Workers +1. It is crucial for the network's successful upgrade that all block producers and SNARK workers maintain their block-producing nodes up and running throughout the state finalization phase. +2. If you are running multiple daemons like is common with many operators, you can run one single node at this stage. +3. If you are a Delegation Program operator, remember that your uptime data will continue to be tracked during the state finalization phase and will be considered for the delegation grant in the following epoch. + +### Archive Node Operators and Rosetta Operators +**If you plan to do the trustful migration, you can skip this step.** +If you are doing the trustless migration, then: +1. Continue to execute the archive node to ensure finalized blocks are in the database and can be migrated. +2. Continue to run incremental archive node migrations until after the network stops at the stop-network slot. +3. For more information on the archive node migration process, please refer to the [Archive Migration](/berkeley-upgrade/archive-migration) section + +### Exchanges + +Exchanges shall disable MINA deposits and withdrawals during the state finalization period (the period between stop-transaction-slot and stop-network-slot) since any transactions after the stop-transaction-slot will not be part of the upgraded chain. + +Remember that although you might be able to submit transactions, the majority of the block producers will be running a node that discards any blocks with transactions. + +## Upgrade + +- Starting at the stop-network-slot the network will not produce nor accept new blocks, resulting in halting the network. During the upgrade period, o1Labs will use automated tooling to export the network state based on the block at the slot just before the stop-transaction-slot. The exported state will then be baked in the new Berkeley build that will be used to initiate the upgraded network. It is during the upgrade windows that the Berkeley network infrastructure will be bootstrapped and seed nodes will become available. o1Labs will also finalize the archive node migration and publish the PostSQL database dumps for import by the archive node operators that wish to bootstrap their archives in a trustful manner. +- There is a tool to validate that the Berkeley node was built from the pre upgrade network state. To validate follow the instructions in this [location](https://github.com/MinaProtocol/mina/blob/berkeley/docs/upgrading-to-berkeley.md) + +### Block Producers and SNARK Workers +1. During the upgrade phase (between stop-network-slot and the publishing of the Berkeley release), block producers can shut down their nodes. +2. After the publication of the Berkeley node release, block producers and SNARK workers should upgrade their nodes and be ready for block production at the genesis timestamp, meaning the slot when the first Berkeley block will be produced. +3. It is possible to continue using the same libp2p key after the upgrade. Remember to adjust the new flag to pass the libp2p key to the node. + +### Archive Node Operators and Rosetta Operators +1. Upon publishing the archive node Berkeley release, archive node operators and Rosetta operators shall upgrade their systems. +There will be both Docker images and archive node releases to choose from. +2. Depending on the chosen migration method: + - Trustless + - Operators should point their Berkeley archive process to the previously migrated database. + - Trustful + - Operators shall import the SQL dump file provided by o1Labs to a freshly created database. + - Operators should point their Berkeley archive process to the newly created database. + +**Please note:** both the trustless and trustful migration processes will discard all Devnet blocks that are not canonical. If you wish to preserve the entire block history, i.e. including non-canonical blocks, you should maintain the Devnet archive node database for posterior querying needs. + +### Exchanges +1. Exchanges shall disable MINA deposits and withdrawals during the entirety of the upgrade downtime, since the stop-transaction-slot until the Devnet Berkeley network is operational. +2. After the Berkeley releases are published, exchanges shall upgrade their nodes and prepare for the new network to start block production. + +## Post-Upgrade +- At approximately 1 hour after the publishing of the Berkeley node release, at a predefined slot (Berkeley genesis timestamp), block production will start, and the network is successfully upgraded. +- Node operators can monitor their nodes and provide feedback to the technical team in case of any issues. Builders can start deploying zkApps. +- **Please note:** The Node Status service will not be enabled by default in the Berkeley release. If you wish to provide Node Status and Error metrics and reports to Mina Foundation, helping monitor the network in the initial phase, please use the following flags when running your nodes: + - `--node-stats-type [full|simple]` + - `--node-status-url https://nodestats-itn.minaprotocol.tools/submit/stats` + - `--node-error-url https://nodestats-itn.minaprotocol.tools/submit/stats` + - The error collection service tries to report any node crashes before the node process is terminated + +### Block Producers and SNARK Workers +1. Have all the systems upgraded and prepared for the start of block production. +2. Monitor nodes and network health, and provide feedback to the engineering team in case of any issues. + +### Archive Node Operators and Rosetta Operators +1. Have all the systems upgraded and prepared for the start of block production. +2. Monitor nodes and network health, and provide feedback to the engineering team in case of any issues. + +### Exchange and Builders +1. After the predefined Berkeley genesis timestamp, block production will commence. MINA deposits and withdrawals can be resumed. +2. Have all the systems upgraded and prepared for the start of block production. +3. Monitor nodes and network health, and provide feedback to the engineering team in case of any issues. diff --git a/docs/welcome.mdx b/docs/welcome.mdx index 8dfa880d1..d6fd0ae3e 100644 --- a/docs/welcome.mdx +++ b/docs/welcome.mdx @@ -1,24 +1,33 @@ --- id: welcome -title: Welcome +title: "Important: Mainnet Upgrade" description: Mina is building a gateway between the real world and crypto — and the infrastructure for the secure, democratic future we all deserve. slug: / --- import HomepageFeatures from "@site/src/components/features/HomepageFeatures"; -# Welcome +# Mainnet (Berkeley) Major Upgrade -:::info +:::caution May 21st 2024 -[Mina Major Upgrade](https://minaprotocol.com/blog/mina-protocols-upcoming-major-upgrade-everything-you-need-to-know) +Please make sure to upgrade your mina nodes to **1.5.x** -### Mina’s Major Upgrade is scheduled for
June 4, 2024. - -In order to prepare for the Berkeley upgrade, please refer to the [Berkeley Upgrade Guide](/berkeley-upgrade) for more information. +=> [Release notes](https://github.com/MinaProtocol/mina/discussions/15333) <= **_[NEEDS-UPDATE]_** ::: -
+After a series of private and public testing rounds, the Mina Protocol developers and community are confident about the software and processes that will upgrade the Mina Protocol Mainnet network. + +This [Berkeley Upgrade section](/berkeley-upgrade/requirements) describe the upgrade processes and operations of the different node types. Please refer to [Mina’s Discord channels](https://discord.gg/minaprotocol) for further support. + +### Mainnet Upgrade Timeline + + + +### Feedback and Questions​ +Thank you for participating in the Berkeley upgrade. + +If you have any questions or feedback related to the Berkeley upgrade, please use the dedicated Discord #mainnet channel. **_[NEEDS-UPDATE]_** - +### Next: [How to upgrade your Mina node](/berkeley-upgrade/requirements) diff --git a/sidebars.js b/sidebars.js index 0743350c9..70d3b9d38 100644 --- a/sidebars.js +++ b/sidebars.js @@ -1,6 +1,34 @@ module.exports = { docs: [ 'welcome', + { + type: 'category', + label: 'Berkeley Upgrade', + items: [ + 'berkeley-upgrade/requirements', + { + type: 'category', + label: 'Archive Migration', + link: { + type: 'doc', + id: 'berkeley-upgrade/archive-migration/index' + }, + items: [ + 'berkeley-upgrade/archive-migration/understanding-archive-migration', + 'berkeley-upgrade/archive-migration/archive-migration-prerequisites', + 'berkeley-upgrade/archive-migration/archive-migration-installation', + 'berkeley-upgrade/archive-migration/migrating-archive-database-to-berkeley', + `berkeley-upgrade/archive-migration/mainnet-database-maintenance`, + 'berkeley-upgrade/archive-migration/debian-example', + 'berkeley-upgrade/archive-migration/docker-example', + 'berkeley-upgrade/archive-migration/appendix', + ], + }, + 'berkeley-upgrade/upgrade-steps', + 'berkeley-upgrade/flags-configs', + 'berkeley-upgrade/appendix', + ], + }, { type: 'category', label: 'About Mina', @@ -483,7 +511,7 @@ module.exports = { id: 'node-operators/snark-workers/index' }, items: [ - 'node-operators/snark-workers/getting-started', + 'node-operators/snark-workers/getting-started', ], }, { @@ -496,7 +524,7 @@ module.exports = { items: [ 'node-operators/archive-node/getting-started', 'node-operators/archive-node/archive-redundancy', - + ], }, { @@ -507,7 +535,7 @@ module.exports = { id: 'node-operators/seed-peers/index' }, items: [ - 'node-operators/seed-peers/getting-started', + 'node-operators/seed-peers/getting-started', ], }, { @@ -563,22 +591,6 @@ module.exports = { 'node-developers/contributing' ], }, - { - type: 'category', - label: 'Berkeley Upgrade', - link: { - type: 'doc', - id: 'berkeley-upgrade/index' - }, - items: [ - 'berkeley-upgrade/understanding-archive-migration', - 'berkeley-upgrade/archive-migration-prerequisites', - 'berkeley-upgrade/archive-migration-installation', - 'berkeley-upgrade/migrating-archive-database-to-berkeley', - `berkeley-upgrade/mainnet-database-maintenance`, - 'berkeley-upgrade/worked-archive-example', - ], - }, { type: 'category', label: 'Exchange Operators', diff --git a/static/img/11_Mina_Berkeley_Upgrade.png b/static/img/11_Mina_Berkeley_Upgrade.png new file mode 100644 index 000000000..7470fb3f7 Binary files /dev/null and b/static/img/11_Mina_Berkeley_Upgrade.png differ diff --git a/static/img/homepage/major_upgrade.jpg b/static/img/homepage/major_upgrade.jpg deleted file mode 100644 index d34158a56..000000000 Binary files a/static/img/homepage/major_upgrade.jpg and /dev/null differ