From 4ed59b25767cd4d637be2e550e89f279e1fa6cca Mon Sep 17 00:00:00 2001 From: Ignasi Date: Wed, 22 May 2024 19:00:35 +0800 Subject: [PATCH 1/4] Fix all tests except rollup manager --- test/contractsv2/PolygonValidiumEtrog.test.ts | 61 +++++++++++++-- test/contractsv2/PolygonZkEVMEtrog.test.ts | 76 ++++++++++++++++--- 2 files changed, 123 insertions(+), 14 deletions(-) diff --git a/test/contractsv2/PolygonValidiumEtrog.test.ts b/test/contractsv2/PolygonValidiumEtrog.test.ts index d134fc650..6b7ea22cf 100644 --- a/test/contractsv2/PolygonValidiumEtrog.test.ts +++ b/test/contractsv2/PolygonValidiumEtrog.test.ts @@ -579,8 +579,59 @@ describe("PolygonZkEVMEtrog", () => { ).to.emit(polTokenContract, "Approval"); // Sequence Batches - const currentLastBatchSequenced = 1; - const indexL1InfoRoot = 0; // No bridges in sequence + const indexL1InfoRoot = 1; + // Do one bridge to have first leaf of l1InfoTree with value + const depositCount = await polygonZkEVMBridgeContract.depositCount(); + const originNetwork = networkIDMainnet; + const originAddress = deployer.address; + const amount = ethers.parseEther("10"); + const destinationNetwork = networkIDRollup; + const destinationAddress = deployer.address; + const tokenName = "Matic Token"; + const tokenSymbol = "MATIC"; + const decimals = 18; + const metadataToken = ethers.AbiCoder.defaultAbiCoder().encode( + ["string", "string", "uint8"], + [tokenName, tokenSymbol, decimals] + ); + const metadata = metadataToken; + const metadataHash = ethers.solidityPackedKeccak256(["bytes"], [metadata]); + + // create a new deposit + await expect(polTokenContract.approve(polygonZkEVMBridgeContract.target, amount)) + .to.emit(polTokenContract, "Approval") + .withArgs(deployer.address, polygonZkEVMBridgeContract.target, amount); + + // pre compute root merkle tree in Js + const height = 32; + const merkleTree = new MerkleTreeBridge(height); + const leafValue = getLeafValue( + LEAF_TYPE_ASSET, + originNetwork, + originAddress, + destinationNetwork, + destinationAddress, + amount, + metadataHash + ); + merkleTree.add(leafValue); + + await expect( + polygonZkEVMBridgeContract.bridgeMessage(destinationNetwork, destinationAddress, true, metadata, { + value: amount, + }) + ) + .to.emit(polygonZkEVMBridgeContract, "BridgeEvent") + .withArgs( + LEAF_TYPE_MESSAGE, + originNetwork, + originAddress, + destinationNetwork, + destinationAddress, + amount, + metadata, + depositCount + ); await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( [sequence], @@ -682,11 +733,11 @@ describe("PolygonZkEVMEtrog", () => { trustedSequencer.address ) ).to.be.revertedWithCustomError(PolygonZKEVMV2Contract, "ForcedDataDoesNotMatch"); - + const l1InfoRoot = await polygonZkEVMGlobalExitRoot.l1InfoRootMap(indexL1InfoRoot); const expectedAccInputHash2 = calculateAccInputHashetrog( - expectedAccInputHash, + await PolygonZKEVMV2Contract.lastAccInputHash(), ethers.keccak256(l2txData), - ethers.ZeroHash, + l1InfoRoot, currentTime, trustedSequencer.address, ethers.ZeroHash diff --git a/test/contractsv2/PolygonZkEVMEtrog.test.ts b/test/contractsv2/PolygonZkEVMEtrog.test.ts index 68cea0dd8..a08adace0 100644 --- a/test/contractsv2/PolygonZkEVMEtrog.test.ts +++ b/test/contractsv2/PolygonZkEVMEtrog.test.ts @@ -382,7 +382,8 @@ describe("PolygonZkEVMEtrog", () => { expect(tx.nonce).to.be.equal(0); expect(tx.chainId).to.be.equal(0); - const expectedAccInputHash = calculateAccInputHashetrog( + // calculate accINputHash + const initExpectedAccInputHash = calculateAccInputHashetrog( ethers.ZeroHash, ethers.keccak256(transaction), await polygonZkEVMGlobalExitRoot.getLastGlobalExitRoot(), @@ -390,9 +391,7 @@ describe("PolygonZkEVMEtrog", () => { trustedSequencer.address, blockCreatedRollup?.parentHash ); - - // calculate accINputHash - expect(await PolygonZKEVMV2Contract.lastAccInputHash()).to.be.equal(expectedAccInputHash); + expect(await PolygonZKEVMV2Contract.lastAccInputHash()).to.be.equal(initExpectedAccInputHash); // try verify batches const l2txData = "0x123456"; @@ -411,9 +410,68 @@ describe("PolygonZkEVMEtrog", () => { ).to.emit(polTokenContract, "Approval"); // Sequence Batches - const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); - const indexL1InfoRoot = 0; // No bridges in sequence + const indexL1InfoRoot = 1; + // Do one bridge to have first leaf of l1InfoTree with value + const depositCount = await polygonZkEVMBridgeContract.depositCount(); + const originNetwork = networkIDMainnet; + const originAddress = deployer.address; + const amount = ethers.parseEther("10"); + const destinationNetwork = networkIDRollup; + const destinationAddress = deployer.address; + const tokenName = "Matic Token"; + const tokenSymbol = "MATIC"; + const decimals = 18; + const metadataToken = ethers.AbiCoder.defaultAbiCoder().encode( + ["string", "string", "uint8"], + [tokenName, tokenSymbol, decimals] + ); + const metadata = metadataToken; + const metadataHash = ethers.solidityPackedKeccak256(["bytes"], [metadata]); + + // create a new deposit + await expect(polTokenContract.approve(polygonZkEVMBridgeContract.target, amount)) + .to.emit(polTokenContract, "Approval") + .withArgs(deployer.address, polygonZkEVMBridgeContract.target, amount); + + // pre compute root merkle tree in Js + const height = 32; + const merkleTree = new MerkleTreeBridge(height); + const leafValue = getLeafValue( + LEAF_TYPE_ASSET, + originNetwork, + originAddress, + destinationNetwork, + destinationAddress, + amount, + metadataHash + ); + merkleTree.add(leafValue); + await expect( + polygonZkEVMBridgeContract.bridgeMessage(destinationNetwork, destinationAddress, true, metadata, { + value: amount, + }) + ) + .to.emit(polygonZkEVMBridgeContract, "BridgeEvent") + .withArgs( + LEAF_TYPE_MESSAGE, + originNetwork, + originAddress, + destinationNetwork, + destinationAddress, + amount, + metadata, + depositCount + ); + const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); + const expectedAccInputHash = calculateAccInputHashetrog( + await PolygonZKEVMV2Contract.lastAccInputHash(), + ethers.keccak256(l2txData), + await polygonZkEVMGlobalExitRoot.getRoot(), + currentTime + 10, + trustedSequencer.address, + ethers.ZeroHash + ); await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( [sequence], @@ -427,7 +485,7 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( [sequence], - 1, + 5, currentTime + 10, expectedAccInputHash, trustedSequencer.address @@ -439,7 +497,7 @@ describe("PolygonZkEVMEtrog", () => { [sequence], indexL1InfoRoot, currentTime + 10, - expectedAccInputHash, + ethers.keccak256(ethers.ZeroHash), // Random expectedAccInputHash trustedSequencer.address ) ).to.be.revertedWithCustomError(PolygonZKEVMV2Contract, "FinalAccInputHashDoesNotMatch"); @@ -518,7 +576,7 @@ describe("PolygonZkEVMEtrog", () => { ).to.be.revertedWithCustomError(PolygonZKEVMV2Contract, "ForcedDataDoesNotMatch"); const expectedAccInputHash2 = calculateAccInputHashetrog( - expectedAccInputHash, + await PolygonZKEVMV2Contract.lastAccInputHash(), ethers.keccak256(l2txData), await polygonZkEVMGlobalExitRoot.l1InfoRootMap(indexL1InfoRoot), currentTime, From 98011e820aa28fd45a91e04bd7f02bbfa8453528 Mon Sep 17 00:00:00 2001 From: Ignasi Date: Thu, 13 Jun 2024 18:28:04 +0200 Subject: [PATCH 2/4] Fixed all tests except upgrade --- .../PolygonGlobalExitRootV2.test.ts | 2 +- test/contractsv2/PolygonRollupManager.test.ts | 287 +++++++----------- .../PolygonRollupManagerUpgrade.test.ts | 2 +- test/contractsv2/PolygonValidiumEtrog.test.ts | 127 +++----- 4 files changed, 155 insertions(+), 263 deletions(-) diff --git a/test/contractsv2/PolygonGlobalExitRootV2.test.ts b/test/contractsv2/PolygonGlobalExitRootV2.test.ts index 7efef8097..eec51ac17 100644 --- a/test/contractsv2/PolygonGlobalExitRootV2.test.ts +++ b/test/contractsv2/PolygonGlobalExitRootV2.test.ts @@ -42,7 +42,7 @@ function calculateGlobalExitRootLeaf(newGlobalExitRoot: any, lastBlockHash: any, [newGlobalExitRoot, lastBlockHash, timestamp] ); } -describe("Polygon Globlal exit root v2", () => { +describe("Polygon Global exit root v2", () => { let deployer: any; let rollupManager: any; let bridge: any; diff --git a/test/contractsv2/PolygonRollupManager.test.ts b/test/contractsv2/PolygonRollupManager.test.ts index 5db8f9de3..d06e511ec 100644 --- a/test/contractsv2/PolygonRollupManager.test.ts +++ b/test/contractsv2/PolygonRollupManager.test.ts @@ -11,9 +11,7 @@ import { PolygonRollupBaseEtrog, TokenWrapped, Address, - PolygonValidiumStorageMigration, PolygonDataCommittee, - PolygonValidiumEtrogPrevious, } from "../../typechain-types"; import {takeSnapshot, time} from "@nomicfoundation/hardhat-network-helpers"; import {processorUtils, contractUtils, MTBridge, mtBridgeUtils} from "@0xpolygonhermez/zkevm-commonjs"; @@ -593,7 +591,7 @@ describe("Polygon Rollup Manager", () => { const lastBlockHash = lastBlock?.parentHash; const lastGlobalExitRootS = await polygonZkEVMGlobalExitRoot.getLastGlobalExitRoot(); - // calcualte accINputHash + // calculate accInputHash expect(await newZkEVMContract.lastAccInputHash()).to.be.equal(expectedAccInputHash2); // Create a new local exit root mocking some bridge @@ -1352,17 +1350,20 @@ describe("Polygon Rollup Manager", () => { "Approval" ); - // Sequence Batches const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); - let currentLastBatchSequenced = 1; - + const indexL1InfoRoot = 0; + const expectedAccInputHash1 = calculateAccInputHashetrog( + await newZkEVMContract.lastAccInputHash(), + ethers.keccak256(l2txData), + await polygonZkEVMGlobalExitRoot.l1InfoRootMap(indexL1InfoRoot), + currentTime, + trustedSequencer.address, + ethers.ZeroHash + ); + // Sequence Batches const txSequenceBatches = await newZkEVMContract .connect(trustedSequencer) - .sequenceBatches([sequence], currentTime, currentLastBatchSequenced++, trustedSequencer.address); - - const lastBlock = await ethers.provider.getBlock("latest"); - const lastBlockHash = lastBlock?.parentHash; - const lastGlobalExitRootS = await polygonZkEVMGlobalExitRoot.getLastGlobalExitRoot(); + .sequenceBatches([sequence], indexL1InfoRoot, currentTime, expectedAccInputHash1, trustedSequencer.address); const receipt = await txSequenceBatches.wait(); const logs = receipt.logs; @@ -1478,11 +1479,23 @@ describe("Polygon Rollup Manager", () => { ) ).to.be.revertedWithCustomError(rollupManagerContract, "NewAccInputHashDoesNotExist"); - // Calcualte new globalExitroot + // Calculate new globalExitRoot const merkleTreeRollups = new MerkleTreeBridge(height); - merkleTreeRollups.add(newLocalExitRoot); + merkleTreeRollups.add(merkleTreezkEVM.getRoot()); const rootRollups = merkleTreeRollups.getRoot(); + const lastGlobalExitRootS2 = calculateGlobalExitRoot(ethers.ZeroHash, rootRollups); + const lastBlock2 = await ethers.provider.getBlock("latest"); + const lastBlockHash2 = lastBlock2?.hash; + const leafValueUpdateGER2 = calculateGlobalExitRootLeaf( + lastGlobalExitRootS2, + lastBlockHash2, + lastBlock2?.timestamp + 5 + ); + merkleTreeGLobalExitRoot.add(leafValueUpdateGER2); + const currentL1InfoRoot = merkleTreeGLobalExitRoot.getRoot(); + await ethers.provider.send("evm_setNextBlockTimestamp", [lastBlock2?.timestamp + 5]); + // Verify batch await expect( rollupManagerContract @@ -1501,7 +1514,7 @@ describe("Polygon Rollup Manager", () => { .to.emit(rollupManagerContract, "VerifyBatchesTrustedAggregator") .withArgs(newCreatedRollupID, newVerifiedBatch, newStateRoot, newLocalExitRoot, trustedAggregator.address) .to.emit(polygonZkEVMGlobalExitRoot, "UpdateL1InfoTree") - .withArgs(ethers.ZeroHash, rootRollups); + .withArgs(ethers.ZeroHash, rootRollups, currentL1InfoRoot); const finalAggregatorMatic = await polTokenContract.balanceOf(beneficiary.address); @@ -1653,7 +1666,7 @@ describe("Polygon Rollup Manager", () => { // In order to create a new rollup type, create an implementation of the contract // Create zkEVM implementation - const PolygonZKEVMV2Factory = await ethers.getContractFactory("PolygonZkEVMEtrogPrevious"); + const PolygonZKEVMV2Factory = await ethers.getContractFactory("PolygonZkEVMEtrog"); const PolygonZKEVMV2Contract = await PolygonZKEVMV2Factory.deploy( polygonZkEVMGlobalExitRoot.target, polTokenContract.target, @@ -1940,10 +1953,7 @@ describe("Polygon Rollup Manager", () => { // Sequence Batches const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); - let currentLastBatchSequenced = 0; - await expect( - newZkEVMContract.connect(trustedSequencer).sequenceBatches([sequence], trustedSequencer.address) - ).to.emit(newZkEVMContract, "SequenceBatches"); + const indexL1InfoRoot = 0; const lastBlock = await ethers.provider.getBlock("latest"); @@ -1958,6 +1968,18 @@ describe("Polygon Rollup Manager", () => { ethers.ZeroHash ); + await expect( + newZkEVMContract + .connect(trustedSequencer) + .sequenceBatches( + [sequence], + indexL1InfoRoot, + currentTime, + expectedAccInputHash2, + trustedSequencer.address + ) + ).to.emit(newZkEVMContract, "SequenceBatches"); + // calcualte accINputHash expect(await newZkEVMContract.lastAccInputHash()).to.be.equal(expectedAccInputHash2); @@ -2057,6 +2079,18 @@ describe("Polygon Rollup Manager", () => { merkleTreeRollups.add(newLocalExitRoot); const rootRollups = merkleTreeRollups.getRoot(); + const lastGlobalExitRootS2 = calculateGlobalExitRoot(ethers.ZeroHash, rootRollups); + const lastBlock2 = await ethers.provider.getBlock("latest"); + const lastBlockHash2 = lastBlock2?.hash; + const leafValueUpdateGER2 = calculateGlobalExitRootLeaf( + lastGlobalExitRootS2, + lastBlockHash2, + lastBlock2?.timestamp + 5 + ); + const merkleTreeGLobalExitRoot = new MerkleTreeBridge(height); + merkleTreeGLobalExitRoot.add(leafValueUpdateGER2); + const currentL1InfoRoot = merkleTreeGLobalExitRoot.getRoot(); + await ethers.provider.send("evm_setNextBlockTimestamp", [lastBlock2?.timestamp + 5]); // Verify batch await expect( rollupManagerContract @@ -2075,7 +2109,7 @@ describe("Polygon Rollup Manager", () => { .to.emit(rollupManagerContract, "VerifyBatchesTrustedAggregator") .withArgs(newCreatedRollupID, newVerifiedBatch, newStateRoot, newLocalExitRoot, trustedAggregator.address) .to.emit(polygonZkEVMGlobalExitRoot, "UpdateL1InfoTree") - .withArgs(ethers.ZeroHash, rootRollups); + .withArgs(ethers.ZeroHash, rootRollups, currentL1InfoRoot); const finalAggregatorMatic = await polTokenContract.balanceOf(beneficiary.address); @@ -2380,7 +2414,7 @@ describe("Polygon Rollup Manager", () => { // In order to create a new rollup type, create an implementation of the contract // Create zkEVM implementation - const PolygonValidiumPreviousVersion = await ethers.getContractFactory("PolygonValidiumEtrogPrevious"); + const PolygonValidiumPreviousVersion = await ethers.getContractFactory("PolygonValidiumEtrog"); const PolygonZKEVMV2Contract = await PolygonValidiumPreviousVersion.deploy( polygonZkEVMGlobalExitRoot.target, polTokenContract.target, @@ -2568,25 +2602,30 @@ describe("Polygon Rollup Manager", () => { expect(await newZkEVMContract.dataAvailabilityProtocol()).to.be.equal(PolygonDataCommitee.target); await PolygonDataCommitee.setupCommittee(0, [], "0x"); - - await expect( - newZkEVMContract - .connect(trustedSequencer) - .sequenceBatchesValidium([sequence], trustedSequencer.address, "0x") - ).to.emit(newZkEVMContract, "SequenceBatches"); - + const indexL1InfoRoot = 0; const lastBlock = await ethers.provider.getBlock("latest"); const rootSC = await polygonZkEVMGlobalExitRoot.getRoot(); - const expectedAccInputHash2 = calculateAccInputHashetrog( - expectedAccInputHash, + await newZkEVMContract.lastAccInputHash(), ethers.keccak256(l2txData), rootSC, - lastBlock?.timestamp, + currentTime, trustedSequencer.address, ethers.ZeroHash ); + await expect( + newZkEVMContract + .connect(trustedSequencer) + .sequenceBatchesValidium( + [sequence], + indexL1InfoRoot, + currentTime, + expectedAccInputHash2, + trustedSequencer.address, + "0x" + ) + ).to.emit(newZkEVMContract, "SequenceBatches"); // calcualte accINputHash expect(await newZkEVMContract.lastAccInputHash()).to.be.equal(expectedAccInputHash2); @@ -2605,6 +2644,19 @@ describe("Polygon Rollup Manager", () => { const merkleTreeRollups = new MerkleTreeBridge(32); const rootRollups = merkleTreeRollups.getRoot(); + const lastGlobalExitRootS2 = calculateGlobalExitRoot(ethers.ZeroHash, rootRollups); + const lastBlock2 = await ethers.provider.getBlock("latest"); + const lastBlockHash2 = lastBlock2?.hash; + const leafValueUpdateGER2 = calculateGlobalExitRootLeaf( + lastGlobalExitRootS2, + lastBlockHash2, + lastBlock2?.timestamp + 5 + ); + const height = 32; + const merkleTreeGLobalExitRoot = new MerkleTreeBridge(height); + merkleTreeGLobalExitRoot.add(leafValueUpdateGER2); + const currentL1InfoRoot = merkleTreeGLobalExitRoot.getRoot(); + await ethers.provider.send("evm_setNextBlockTimestamp", [lastBlock2?.timestamp + 5]); // Verify batch await expect( rollupManagerContract @@ -2623,7 +2675,7 @@ describe("Polygon Rollup Manager", () => { .to.emit(rollupManagerContract, "VerifyBatchesTrustedAggregator") .withArgs(newCreatedRollupID, newVerifiedBatch, newStateRoot, newLocalExitRoot, trustedAggregator.address) .to.emit(polygonZkEVMGlobalExitRoot, "UpdateL1InfoTree") - .withArgs(ethers.ZeroHash, rootRollups); + .withArgs(ethers.ZeroHash, rootRollups, currentL1InfoRoot); const finalAggregatorMatic = await polTokenContract.balanceOf(beneficiary.address); @@ -2636,150 +2688,6 @@ describe("Polygon Rollup Manager", () => { expect(await polygonZkEVMGlobalExitRoot.getLastGlobalExitRoot()).to.be.equal( calculateGlobalExitRoot(ethers.ZeroHash, rootRollups) ); - - // Upgrade rollup - // In order to update a new rollup type, create an implementation of the contract - - // Create zkEVM implementation - const PolygonValidiumStorageMigration = await ethers.getContractFactory("PolygonValidiumStorageMigration"); - const PolygonValidiumMigrationContract = await PolygonValidiumStorageMigration.deploy( - polygonZkEVMGlobalExitRoot.target, - polTokenContract.target, - polygonZkEVMBridgeContract.target, - rollupManagerContract.target - ); - await PolygonValidiumMigrationContract.waitForDeployment(); - - // Add a new rollup type with timelock - const etrogRollupType = 2; - await expect( - rollupManagerContract - .connect(timelock) - .addNewRollupType( - PolygonValidiumMigrationContract.target, - verifierContract.target, - forkID, - rollupCompatibilityID, - genesisRandom, - descirption - ) - ) - .to.emit(rollupManagerContract, "AddNewRollupType") - .withArgs( - etrogRollupType, - PolygonValidiumMigrationContract.target, - verifierContract.target, - forkID, - rollupCompatibilityID, - genesisRandom, - descirption - ); - - // Add a new rollup type with timelock - const randomType = 3; - await expect( - rollupManagerContract - .connect(timelock) - .addNewRollupType( - PolygonValidiumMigrationContract.target, - verifierContract.target, - forkID, - randomType, - genesisRandom, - descirption - ) - ) - .to.emit(rollupManagerContract, "AddNewRollupType") - .withArgs( - randomType, - PolygonValidiumMigrationContract.target, - verifierContract.target, - forkID, - randomType, - genesisRandom, - descirption - ); - - // assert new rollup type - const createdEtrogRollupType = await rollupManagerContract.rollupTypeMap(etrogRollupType); - - const expectedEtrogRollupType = [ - PolygonValidiumMigrationContract.target, - verifierContract.target, - forkID, - rollupCompatibilityID, - false, - genesisRandom, - ]; - expect(createdEtrogRollupType).to.be.deep.equal(expectedEtrogRollupType); - - // Validate upgrade OZ - - await upgrades.validateUpgrade(PolygonValidiumPreviousVersion, PolygonValidiumStorageMigration, { - constructorArgs: [ - polygonZkEVMGlobalExitRoot.target, - polTokenContract.target, - polygonZkEVMBridgeContract.target, - rollupManagerContract.target, - ], - unsafeAllow: ["constructor", "state-variable-immutable"], - } as any); - - expect(await upgrades.erc1967.getImplementationAddress(newZKEVMAddress as string)).to.be.equal( - PolygonZKEVMV2Contract.target - ); - - await expect( - rollupManagerContract - .connect(timelock) - .updateRollup( - newZKEVMAddress, - etrogRollupType, - PolygonValidiumStorageMigration.interface.encodeFunctionData("initializeMigration", []) - ) - ) - .to.emit(rollupManagerContract, "UpdateRollup") - .withArgs(newRollupTypeID, etrogRollupType, newVerifiedBatch); - - // Check mapping on rollup Manager - const rollupDataFinal = await rollupManagerContract.rollupIDToRollupData(newCreatedRollupID); - expect(rollupDataFinal.rollupContract).to.be.equal(newZKEVMAddress); - expect(rollupDataFinal.chainID).to.be.equal(chainID); - expect(rollupDataFinal.verifier).to.be.equal(verifierContract.target); - expect(rollupDataFinal.forkID).to.be.equal(forkID); - expect(rollupDataFinal.lastLocalExitRoot).to.be.equal(newLocalExitRoot); - expect(rollupDataFinal.lastBatchSequenced).to.be.equal(newVerifiedBatch); - expect(rollupDataFinal.lastVerifiedBatch).to.be.equal(newVerifiedBatch); - expect(rollupDataFinal.lastPendingState).to.be.equal(0); - expect(rollupDataFinal.lastPendingStateConsolidated).to.be.equal(0); - expect(rollupDataFinal.lastVerifiedBatchBeforeUpgrade).to.be.equal(newVerifiedBatch); - expect(rollupDataFinal.rollupTypeID).to.be.equal(etrogRollupType); - expect(rollupDataFinal.rollupCompatibilityID).to.be.equal(0); - - expect(await upgrades.erc1967.getImplementationAddress(newZKEVMAddress as string)).to.be.equal( - PolygonValidiumMigrationContract.target - ); - - expect(await newZkEVMContract.dataAvailabilityProtocol()).to.be.equal(PolygonDataCommitee.target); - - // // Finally check compatibility with current ROllups: - // const PolygonCurrentValidium = await ethers.getContractFactory("PolygonValidiumEtrog"); - // const PolygonCurrentValidiumContract = await PolygonCurrentValidium.deploy( - // polygonZkEVMGlobalExitRoot.target, - // polTokenContract.target, - // polygonZkEVMBridgeContract.target, - // rollupManagerContract.target - // ); - // await PolygonCurrentValidiumContract.waitForDeployment(); - // await upgrades.validateUpgrade(PolygonValidiumStorageMigration, PolygonCurrentValidium, { - // constructorArgs: [ - // polygonZkEVMGlobalExitRoot.target, - // polTokenContract.target, - // polygonZkEVMBridgeContract.target, - // rollupManagerContract.target, - // ], - // unsafeAllow: ["constructor", "state-variable-immutable"], - // } as any); }); it("should add existing rollup and test full flow", async () => { @@ -2956,12 +2864,21 @@ describe("Polygon Rollup Manager", () => { // Sequence Batches const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); - let currentLastBatchSequenced = 1; + const indexL1InfoRoot = 0; + const expectedAccInputHash = calculateAccInputHashetrog( + await PolygonZKEVMV2Contract.lastAccInputHash(), + ethers.keccak256(l2txData), + await polygonZkEVMGlobalExitRoot.getRoot(), + currentTime, + trustedSequencer.address, + ethers.ZeroHash + ); await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( [sequence], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced++, + expectedAccInputHash, trustedSequencer.address ) ).to.emit(PolygonZKEVMV2Contract, "SequenceBatches"); @@ -3076,6 +2993,18 @@ describe("Polygon Rollup Manager", () => { merkleTreeRollups.add(newLocalExitRoot); const rootRollups = merkleTreeRollups.getRoot(); + const merkleTreeGLobalExitRoot = new MerkleTreeBridge(height); + const lastGlobalExitRootS2 = calculateGlobalExitRoot(ethers.ZeroHash, rootRollups); + const lastBlock2 = await ethers.provider.getBlock("latest"); + const lastBlockHash2 = lastBlock2?.hash; + const leafValueUpdateGER2 = calculateGlobalExitRootLeaf( + lastGlobalExitRootS2, + lastBlockHash2, + lastBlock2?.timestamp + 5 + ); + merkleTreeGLobalExitRoot.add(leafValueUpdateGER2); + const currentL1InfoRoot = merkleTreeGLobalExitRoot.getRoot(); + await ethers.provider.send("evm_setNextBlockTimestamp", [lastBlock2?.timestamp + 5]); // Verify batch await expect( rollupManagerContract @@ -3094,7 +3023,7 @@ describe("Polygon Rollup Manager", () => { .to.emit(rollupManagerContract, "VerifyBatchesTrustedAggregator") .withArgs(RollupID, newVerifiedBatch, newStateRoot, newLocalExitRoot, trustedAggregator.address) .to.emit(polygonZkEVMGlobalExitRoot, "UpdateL1InfoTree") - .withArgs(ethers.ZeroHash, rootRollups); + .withArgs(ethers.ZeroHash, rootRollups, currentL1InfoRoot); const finalAggregatorMatic = await polTokenContract.balanceOf(beneficiary.address); diff --git a/test/contractsv2/PolygonRollupManagerUpgrade.test.ts b/test/contractsv2/PolygonRollupManagerUpgrade.test.ts index 9f9e8cd75..d4f666cc1 100644 --- a/test/contractsv2/PolygonRollupManagerUpgrade.test.ts +++ b/test/contractsv2/PolygonRollupManagerUpgrade.test.ts @@ -245,7 +245,7 @@ describe("Polygon Rollup manager upgraded", () => { }); }); - it("Cannot initialzie again", async () => { + it("Cannot initialize again", async () => { await expect( rollupManagerContract.initialize( trustedAggregator.address, diff --git a/test/contractsv2/PolygonValidiumEtrog.test.ts b/test/contractsv2/PolygonValidiumEtrog.test.ts index 6b7ea22cf..d91dbd55a 100644 --- a/test/contractsv2/PolygonValidiumEtrog.test.ts +++ b/test/contractsv2/PolygonValidiumEtrog.test.ts @@ -129,7 +129,6 @@ describe("PolygonZkEVMEtrog", () => { // deploy globalExitRoot const PolygonZkEVMGlobalExitRootFactory = await ethers.getContractFactory("PolygonZkEVMGlobalExitRootV2"); polygonZkEVMGlobalExitRoot = await upgrades.deployProxy(PolygonZkEVMGlobalExitRootFactory, [], { - initializer: false, constructorArgs: [precalculateRollupManagerAddress, precalculateBridgeAddress], unsafeAllow: ["constructor", "state-variable-immutable"], }); @@ -579,59 +578,7 @@ describe("PolygonZkEVMEtrog", () => { ).to.emit(polTokenContract, "Approval"); // Sequence Batches - const indexL1InfoRoot = 1; - // Do one bridge to have first leaf of l1InfoTree with value - const depositCount = await polygonZkEVMBridgeContract.depositCount(); - const originNetwork = networkIDMainnet; - const originAddress = deployer.address; - const amount = ethers.parseEther("10"); - const destinationNetwork = networkIDRollup; - const destinationAddress = deployer.address; - const tokenName = "Matic Token"; - const tokenSymbol = "MATIC"; - const decimals = 18; - const metadataToken = ethers.AbiCoder.defaultAbiCoder().encode( - ["string", "string", "uint8"], - [tokenName, tokenSymbol, decimals] - ); - const metadata = metadataToken; - const metadataHash = ethers.solidityPackedKeccak256(["bytes"], [metadata]); - - // create a new deposit - await expect(polTokenContract.approve(polygonZkEVMBridgeContract.target, amount)) - .to.emit(polTokenContract, "Approval") - .withArgs(deployer.address, polygonZkEVMBridgeContract.target, amount); - - // pre compute root merkle tree in Js - const height = 32; - const merkleTree = new MerkleTreeBridge(height); - const leafValue = getLeafValue( - LEAF_TYPE_ASSET, - originNetwork, - originAddress, - destinationNetwork, - destinationAddress, - amount, - metadataHash - ); - merkleTree.add(leafValue); - - await expect( - polygonZkEVMBridgeContract.bridgeMessage(destinationNetwork, destinationAddress, true, metadata, { - value: amount, - }) - ) - .to.emit(polygonZkEVMBridgeContract, "BridgeEvent") - .withArgs( - LEAF_TYPE_MESSAGE, - originNetwork, - originAddress, - destinationNetwork, - destinationAddress, - amount, - metadata, - depositCount - ); + const indexL1InfoRoot = 0; // No bridges in sequence await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( [sequence], @@ -691,8 +638,8 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( hugeBatchArray, - currentTime, indexL1InfoRoot, + currentTime, expectedAccInputHash, trustedSequencer.address ) @@ -709,8 +656,8 @@ describe("PolygonZkEVMEtrog", () => { forcedBlockHashL1: ethers.ZeroHash, }, ], - currentTime, indexL1InfoRoot, + currentTime, expectedAccInputHash, trustedSequencer.address ) @@ -727,17 +674,17 @@ describe("PolygonZkEVMEtrog", () => { forcedBlockHashL1: ethers.ZeroHash, }, ], - currentTime, indexL1InfoRoot, + currentTime, expectedAccInputHash, trustedSequencer.address ) ).to.be.revertedWithCustomError(PolygonZKEVMV2Contract, "ForcedDataDoesNotMatch"); - const l1InfoRoot = await polygonZkEVMGlobalExitRoot.l1InfoRootMap(indexL1InfoRoot); + const expectedAccInputHash2 = calculateAccInputHashetrog( await PolygonZKEVMV2Contract.lastAccInputHash(), ethers.keccak256(l2txData), - l1InfoRoot, + await polygonZkEVMGlobalExitRoot.getRoot(), currentTime, trustedSequencer.address, ethers.ZeroHash @@ -745,8 +692,8 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatches( [sequence], - currentTime, indexL1InfoRoot, + currentTime, expectedAccInputHash2, trustedSequencer.address ) @@ -844,12 +791,13 @@ describe("PolygonZkEVMEtrog", () => { // Sequence Batches const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); - let currentLastBatchSequenced = 1; + const indexL1InfoRoot = 0; await expect( PolygonZKEVMV2Contract.sequenceBatchesValidium( [sequenceValidium], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, "0x1233" ) @@ -858,8 +806,9 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( [], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, "0x1233" ) @@ -875,8 +824,9 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( hugeBatchArray, + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, "0x" ) @@ -893,8 +843,9 @@ describe("PolygonZkEVMEtrog", () => { forcedBlockHashL1: ethers.ZeroHash, }, ], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, "0x" ) @@ -903,8 +854,9 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( [sequenceValidium], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, "0x1233" ) @@ -940,8 +892,16 @@ describe("PolygonZkEVMEtrog", () => { addrBytes = addrBytes + walletsDataCommitee[i].address.slice(2); } + const expectedAccInputHash2 = calculateAccInputHashetrog( + await PolygonZKEVMV2Contract.lastAccInputHash(), + hashedData, + await polygonZkEVMGlobalExitRoot.getRoot(), + currentTime, + trustedSequencer.address, + ethers.ZeroHash + ); const commiteeHash = ethers.keccak256(addrBytes); - const signedData = ethers.solidityPackedKeccak256(["bytes32", "bytes32"], [ethers.ZeroHash, hashedData]); + const signedData = expectedAccInputHash2; let message = "0x"; for (let i = 0; i < walletsDataCommitee.length; i++) { const newSignature = walletsDataCommitee[i].signingKey.sign(signedData); @@ -956,8 +916,9 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( [sequenceValidium], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, badDataAvMessage ) @@ -966,8 +927,9 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( [sequenceValidium], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash, trustedSequencer.address, badDataAvMessage.slice(0, -2) ) @@ -976,23 +938,15 @@ describe("PolygonZkEVMEtrog", () => { await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( [sequenceValidium], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash2, trustedSequencer.address, dataAvailabilityMessage ) ).to.emit(PolygonZKEVMV2Contract, "SequenceBatches"); - const expectedAccInputHash2 = calculateAccInputHashetrog( - expectedAccInputHash, - hashedData, - await polygonZkEVMGlobalExitRoot.getRoot(), - currentTime, - trustedSequencer.address, - ethers.ZeroHash - ); - - // calcualte accINputHash + // calculate accINputHash expect(await PolygonZKEVMV2Contract.lastAccInputHash()).to.be.equal(expectedAccInputHash2); }); @@ -1363,12 +1317,21 @@ describe("PolygonZkEVMEtrog", () => { .withArgs(commiteeHash); const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); - const currentLastBatchSequenced = 1; + const indexL1InfoRoot = 0; + const expectedAccInputHash2 = calculateAccInputHashetrog( + await PolygonZKEVMV2Contract.lastAccInputHash(), + ethers.keccak256(l2txData), + await polygonZkEVMGlobalExitRoot.getLastGlobalExitRoot(), + timestampForceBatch, + trustedSequencer.address, + blockForced?.parentHash + ); await expect( PolygonZKEVMV2Contract.connect(trustedSequencer).sequenceBatchesValidium( [sequenceForced], + indexL1InfoRoot, currentTime, - currentLastBatchSequenced, + expectedAccInputHash2, trustedSequencer.address, "0x12" ) From 57f3184b2d305994611ee0e64df76d6febee3b58 Mon Sep 17 00:00:00 2001 From: invocamanman Date: Mon, 17 Jun 2024 15:18:23 +0200 Subject: [PATCH 3/4] finish test, add audit fixe --- contracts/v2/PolygonRollupManager.sol | 36 +- .../v2/interfaces/IPolygonRollupBase.sol | 3 +- contracts/v2/lib/PolygonRollupBaseEtrog.sol | 8 +- .../IPolygonRollupBasePrevious.sol | 22 + .../PolygonRollupBaseEtrogPrevious.sol | 1879 ++++++++-------- .../previousVersions/PolygonRollupManager.sol | 1911 +++++++++++++++++ .../PolygonValidiumEtrogPrevious.sol | 596 ++--- .../PolygonZkEVMEtrogPrevious.sol | 64 +- test/contractsv2/PolygonRollupManager.test.ts | 22 +- .../PolygonRollupManagerUpgrade.test.ts | 115 +- 10 files changed, 3344 insertions(+), 1312 deletions(-) create mode 100644 contracts/v2/previousVersions/IPolygonRollupBasePrevious.sol create mode 100644 contracts/v2/previousVersions/PolygonRollupManager.sol diff --git a/contracts/v2/PolygonRollupManager.sol b/contracts/v2/PolygonRollupManager.sol index 19f897c3b..4b04307d1 100644 --- a/contracts/v2/PolygonRollupManager.sol +++ b/contracts/v2/PolygonRollupManager.sol @@ -327,7 +327,7 @@ contract PolygonRollupManager is */ event RollbackBatches( uint32 indexed rollupID, - uint64 indexed batchToRollback, + uint64 indexed targetBatch, bytes32 accInputHashToRollback ); @@ -637,7 +637,7 @@ contract PolygonRollupManager is rollupAddressToID[address(rollupContract)] ]; - // If rollupID does not exist (rollupID = 0), will revert afterwards + // Check all sequenced batches are verified if (rollup.lastBatchSequenced != rollup.lastVerifiedBatch) { revert AllSequencedMustBeVerified(); } @@ -732,11 +732,11 @@ contract PolygonRollupManager is /** * @notice Rollback batches of the target rollup * @param rollupContract Rollup consensus proxy address - * @param batchToRollback Batch to rollback + * @param targetBatch Batch to rollback up to but not including this batch */ function rollbackBatches( IPolygonRollupBase rollupContract, - uint64 batchToRollback + uint64 targetBatch ) external { // Check msg.sender has _UPDATE_ROLLUP_ROLE rol or is the admin of the network if ( @@ -758,23 +758,23 @@ contract PolygonRollupManager is // Batch to rollback should be already sequenced if ( - batchToRollback >= lastBatchSequenced || - batchToRollback < rollup.lastVerifiedBatch + targetBatch >= lastBatchSequenced || + targetBatch < rollup.lastVerifiedBatch ) { revert RollbackBatchIsNotValid(); } uint64 currentBatch = lastBatchSequenced; - // delete sequence batches structs until the batchToRollback - while (currentBatch != batchToRollback) { + // delete sequence batches structs until the targetBatch + while (currentBatch != targetBatch) { // Load previous end of sequence batch uint64 previousBatch = rollup .sequencedBatches[currentBatch] .previousLastBatchSequenced; // Batch to rollback must be end of a sequence - if (previousBatch < batchToRollback) { + if (previousBatch < targetBatch) { revert RollbackBatchIsNotEndOfSequence(); } @@ -786,21 +786,27 @@ contract PolygonRollupManager is } // Update last batch sequenced on rollup data - rollup.lastBatchSequenced = batchToRollback; + rollup.lastBatchSequenced = targetBatch; // Update totalSequencedBatches - totalSequencedBatches -= lastBatchSequenced - batchToRollback; + totalSequencedBatches -= lastBatchSequenced - targetBatch; + + // Clean pending state if any + if (rollup.lastPendingState > 0) { + rollup.lastPendingState = 0; + rollup.lastPendingStateConsolidated = 0; + } // Callback the consensus contract rollupContract.rollbackBatches( - batchToRollback, - rollup.sequencedBatches[batchToRollback].accInputHash + targetBatch, + rollup.sequencedBatches[targetBatch].accInputHash ); emit RollbackBatches( rollupID, - batchToRollback, - rollup.sequencedBatches[batchToRollback].accInputHash + targetBatch, + rollup.sequencedBatches[targetBatch].accInputHash ); } diff --git a/contracts/v2/interfaces/IPolygonRollupBase.sol b/contracts/v2/interfaces/IPolygonRollupBase.sol index 1c7df4dc5..023828f52 100644 --- a/contracts/v2/interfaces/IPolygonRollupBase.sol +++ b/contracts/v2/interfaces/IPolygonRollupBase.sol @@ -21,8 +21,7 @@ interface IPolygonRollupBase { function admin() external returns (address); function rollbackBatches( - uint64 batchToRollback, + uint64 targetBatch, bytes32 accInputHashToRollback ) external; - } diff --git a/contracts/v2/lib/PolygonRollupBaseEtrog.sol b/contracts/v2/lib/PolygonRollupBaseEtrog.sol index e75c268bc..49b1777e0 100644 --- a/contracts/v2/lib/PolygonRollupBaseEtrog.sol +++ b/contracts/v2/lib/PolygonRollupBaseEtrog.sol @@ -239,7 +239,7 @@ abstract contract PolygonRollupBaseEtrog is * @dev Emitted when a aggregator verifies batches */ event RollbackBatches( - uint64 indexed batchToRollback, + uint64 indexed targetBatch, bytes32 accInputHashToRollback ); @@ -583,17 +583,17 @@ abstract contract PolygonRollupBaseEtrog is /** * @notice Callback on rollback batches, can only be called by the rollup manager - * @param batchToRollback Batch to rollback + * @param targetBatch Batch to rollback up to but not including this batch * @param accInputHashToRollback Acc input hash to rollback */ function rollbackBatches( - uint64 batchToRollback, + uint64 targetBatch, bytes32 accInputHashToRollback ) public virtual override onlyRollupManager { // Rollback the accumulated input hash lastAccInputHash = accInputHashToRollback; - emit RollbackBatches(batchToRollback, accInputHashToRollback); + emit RollbackBatches(targetBatch, accInputHashToRollback); } //////////////////////////// diff --git a/contracts/v2/previousVersions/IPolygonRollupBasePrevious.sol b/contracts/v2/previousVersions/IPolygonRollupBasePrevious.sol new file mode 100644 index 000000000..31ca166d9 --- /dev/null +++ b/contracts/v2/previousVersions/IPolygonRollupBasePrevious.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity ^0.8.20; + +interface IPolygonRollupBasePrevious { + function initialize( + address _admin, + address sequencer, + uint32 networkID, + address gasTokenAddress, + string memory sequencerURL, + string memory _networkName + ) external; + + function onVerifyBatches( + uint64 lastVerifiedBatch, + bytes32 newStateRoot, + address aggregator + ) external; + + function admin() external returns (address); +} diff --git a/contracts/v2/previousVersions/PolygonRollupBaseEtrogPrevious.sol b/contracts/v2/previousVersions/PolygonRollupBaseEtrogPrevious.sol index 903f2961a..f9422cec1 100644 --- a/contracts/v2/previousVersions/PolygonRollupBaseEtrogPrevious.sol +++ b/contracts/v2/previousVersions/PolygonRollupBaseEtrogPrevious.sol @@ -1,923 +1,956 @@ -// // SPDX-License-Identifier: AGPL-3.0 -// pragma solidity ^0.8.20; - -// import "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol"; -// import "../interfaces/IPolygonZkEVMGlobalExitRootV2.sol"; -// import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; -// import "../../interfaces/IPolygonZkEVMErrors.sol"; -// import "../interfaces/IPolygonZkEVMVEtrogErrors.sol"; -// import "../PolygonRollupManager.sol"; -// import "../interfaces/IPolygonRollupBase.sol"; -// import "../interfaces/IPolygonZkEVMBridgeV2.sol"; -// import "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/IERC20MetadataUpgradeable.sol"; -// import "../lib/PolygonConstantsBase.sol"; - -// /** -// * Contract responsible for managing the states and the updates of L2 network. -// * There will be a trusted sequencer, which is able to send transactions. -// * Any user can force some transaction and the sequencer will have a timeout to add them in the queue. -// * The sequenced state is deterministic and can be precalculated before it's actually verified by a zkProof. -// * The aggregators will be able to verify the sequenced state with zkProofs and therefore make available the withdrawals from L2 network. -// * To enter and exit of the L2 network will be used a PolygonZkEVMBridge smart contract that will be deployed in both networks. -// */ -// contract PolygonRollupBaseEtrogPrevious is -// Initializable, -// PolygonConstantsBase, -// IPolygonZkEVMVEtrogErrors, -// IPolygonRollupBase -// { -// using SafeERC20Upgradeable for IERC20Upgradeable; - -// /** -// * @notice Struct which will be used to call sequenceBatches -// * @param transactions L2 ethereum transactions EIP-155 or pre-EIP-155 with signature: -// * EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data, chainid, 0, 0,) || v || r || s -// * pre-EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data) || v || r || s -// * @param forcedGlobalExitRoot Global exit root, empty when sequencing a non forced batch -// * @param forcedTimestamp Minimum timestamp of the force batch data, empty when sequencing a non forced batch -// * @param forcedBlockHashL1 blockHash snapshot of the force batch data, empty when sequencing a non forced batch -// */ -// struct BatchData { -// bytes transactions; -// bytes32 forcedGlobalExitRoot; -// uint64 forcedTimestamp; -// bytes32 forcedBlockHashL1; -// } - -// // Max transactions bytes that can be added in a single batch -// // Max keccaks circuit = (2**23 / 155286) * 44 = 2376 -// // Bytes per keccak = 136 -// // Minimum Static keccaks batch = 2 -// // Max bytes allowed = (2376 - 2) * 136 = 322864 bytes - 1 byte padding -// // Rounded to 300000 bytes -// // In order to process the transaction, the data is approximately hashed twice for ecrecover: -// // 300000 bytes / 2 = 150000 bytes -// // Since geth pool currently only accepts at maximum 128kb transactions: -// // https://github.com/ethereum/go-ethereum/blob/master/core/txpool/txpool.go#L54 -// // We will limit this length to be compliant with the geth restrictions since our node will use it -// // We let 8kb as a sanity margin -// uint256 internal constant _MAX_TRANSACTIONS_BYTE_LENGTH = 120000; - -// // Max force batch transaction length -// // This is used to avoid huge calldata attacks, where the attacker call force batches from another contract -// uint256 internal constant _MAX_FORCE_BATCH_BYTE_LENGTH = 5000; - -// // In order to encode the initialize transaction of the bridge there's have a constant part and the metadata which is variable -// // Note the total transaction will be constrained to 65535 to avoid attacks and simplify the implementation - -// // List rlp: 1 listLenLen "0xf9" (0xf7 + 2), + listLen 2 (32 bytes + txData bytes) (do not accept more than 65535 bytes) - -// // First byte of the initialize bridge tx, indicates a list with a lengt of 2 bytes -// // Since the minimum constant bytes will be: 259 (tx data empty) + 31 (tx parameters) = 259 (0x103) will always take 2 bytes to express the lenght of the rlp -// // Note that more than 2 bytes of list len is not supported, since it's constrained to 65535 -// uint8 public constant INITIALIZE_TX_BRIDGE_LIST_LEN_LEN = 0xf9; - -// // Tx parameters until the bridge address -// bytes public constant INITIALIZE_TX_BRIDGE_PARAMS = hex"80808401c9c38094"; - -// // RLP encoded metadata (non empty) - -// // TxData bytes: 164 bytes data ( signature 4 bytes + 5 parameters*32bytes + -// // (abi encoded metadata: 32 bytes position + 32 bytes len + 32 bytes position name + 32 bytes length name + 32 bytes position Symbol + 32 bytes length Symbol -// //+ 32 bytes decimal )) min 7*32 bytes = -// // = 164 bytes + 224 bytes = 388 (0x0184) minimum -// // Extra data: nameLen padded to 32 bytes + symbol len padded to 32 bytes - -// // Constant bytes: 1 nonce "0x80" + 1 gasPrice "0x80" + 5 gasLimit "0x8401c9c380" (30M gas) -// // + 21 to ("0x94" + bridgeAddress") + 1 value "0x80" + 1 stringLenLen "0xb9" (0xb7 + 2) + -// // stringLen (0x0184 + nameLen padded to 32 bytes + symbol len padded to 32 bytes) + txData bytes = 32 bytes + txData bytes -// uint16 public constant INITIALIZE_TX_CONSTANT_BYTES = 32; - -// // Tx parameters after the bridge address -// bytes public constant INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS = -// hex"80b9"; - -// // RLP empty metadata - -// // TxData empty metadata bytes: 164 bytes data ( signature 4 bytes + 5 parameters*32bytes + -// // (abi encoded metadata: 32 bytes position + 32 bytes len = 2*32 bytes = -// // = 164 bytes + 64 bytes = 228 (0xe4) - -// // Constant bytes empty metadata : 1 nonce "0x80" + 1 gasPrice "0x80" + 5 gasLimit "0x8401c9c380" (30M gas) -// // + 21 to ("0x94" + bridgeAddress") + 1 value "0x80" + 1 stringLenLen "0xb8" (0xb7 + 1) + -// // 1 stringLen (0xe4) + txData bytes = 31 bytes + txData bytes empty metadata 228 = 259 -// uint16 public constant INITIALIZE_TX_CONSTANT_BYTES_EMPTY_METADATA = 31; - -// uint8 public constant INITIALIZE_TX_DATA_LEN_EMPTY_METADATA = 228; // 0xe4 - -// // Tx parameters after the bridge address -// bytes -// public constant INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS_EMPTY_METADATA = -// hex"80b8"; - -// // Signature used to initialize the bridge - -// // V parameter of the initialize signature -// uint8 public constant SIGNATURE_INITIALIZE_TX_V = 27; - -// // R parameter of the initialize signature -// bytes32 public constant SIGNATURE_INITIALIZE_TX_R = -// 0x00000000000000000000000000000000000000000000000000000005ca1ab1e0; - -// // S parameter of the initialize signature -// bytes32 public constant SIGNATURE_INITIALIZE_TX_S = -// 0x000000000000000000000000000000000000000000000000000000005ca1ab1e; - -// // Effective percentage of the initalize transaction -// bytes1 public constant INITIALIZE_TX_EFFECTIVE_PERCENTAGE = 0xFF; - -// // Global Exit Root address L2 -// IBasePolygonZkEVMGlobalExitRoot -// public constant GLOBAL_EXIT_ROOT_MANAGER_L2 = -// IBasePolygonZkEVMGlobalExitRoot( -// 0xa40D5f56745a118D0906a34E69aeC8C0Db1cB8fA -// ); - -// // POL token address -// IERC20Upgradeable public immutable pol; - -// // Global Exit Root interface -// IPolygonZkEVMGlobalExitRootV2 public immutable globalExitRootManager; - -// // PolygonZkEVM Bridge Address -// IPolygonZkEVMBridgeV2 public immutable bridgeAddress; - -// // Rollup manager -// PolygonRollupManager public immutable rollupManager; - -// // Address that will be able to adjust contract parameters -// address public admin; - -// // This account will be able to accept the admin role -// address public pendingAdmin; - -// // Trusted sequencer address -// address public trustedSequencer; - -// // Trusted sequencer URL -// string public trustedSequencerURL; - -// // L2 network name -// string public networkName; - -// // Current accumulate input hash -// bytes32 public lastAccInputHash; - -// // Queue of forced batches with their associated data -// // ForceBatchNum --> hashedForcedBatchData -// // hashedForcedBatchData: hash containing the necessary information to force a batch: -// // keccak256(keccak256(bytes transactions), bytes32 forcedGlobalExitRoot, unint64 forcedTimestamp, bytes32 forcedBlockHashL1) -// mapping(uint64 => bytes32) public forcedBatches; - -// // Last forced batch -// uint64 public lastForceBatch; - -// // Last forced batch included in the sequence -// uint64 public lastForceBatchSequenced; - -// // Force batch timeout -// uint64 public forceBatchTimeout; - -// // Indicates what address is able to do forced batches -// // If the address is set to 0, forced batches are open to everyone -// address public forceBatchAddress; - -// // Token address that will be used to pay gas fees in this rollup. This variable it's just for read purposes -// address public gasTokenAddress; - -// // Native network of the token address of the gas tokena address. This variable it's just for read purposes -// uint32 public gasTokenNetwork; - -// /** -// * @dev Emitted when the trusted sequencer sends a new batch of transactions -// */ -// event SequenceBatches(uint64 indexed numBatch, bytes32 l1InfoRoot); - -// /** -// * @dev Emitted when a batch is forced -// */ -// event ForceBatch( -// uint64 indexed forceBatchNum, -// bytes32 lastGlobalExitRoot, -// address sequencer, -// bytes transactions -// ); - -// /** -// * @dev Emitted when forced batches are sequenced by not the trusted sequencer -// */ -// event SequenceForceBatches(uint64 indexed numBatch); - -// /** -// * @dev Emitted when the contract is initialized, contain the first sequenced transaction -// */ -// event InitialSequenceBatches( -// bytes transactions, -// bytes32 lastGlobalExitRoot, -// address sequencer -// ); - -// /** -// * @dev Emitted when a aggregator verifies batches -// */ -// event VerifyBatches( -// uint64 indexed numBatch, -// bytes32 stateRoot, -// address indexed aggregator -// ); - -// /** -// * @dev Emitted when the admin updates the trusted sequencer address -// */ -// event SetTrustedSequencer(address newTrustedSequencer); - -// /** -// * @dev Emitted when the admin updates the sequencer URL -// */ -// event SetTrustedSequencerURL(string newTrustedSequencerURL); - -// /** -// * @dev Emitted when the admin update the force batch timeout -// */ -// event SetForceBatchTimeout(uint64 newforceBatchTimeout); - -// /** -// * @dev Emitted when the admin update the force batch address -// */ -// event SetForceBatchAddress(address newForceBatchAddress); - -// /** -// * @dev Emitted when the admin starts the two-step transfer role setting a new pending admin -// */ -// event TransferAdminRole(address newPendingAdmin); - -// /** -// * @dev Emitted when the pending admin accepts the admin role -// */ -// event AcceptAdminRole(address newAdmin); - -// // General parameters that will have in common all networks that deploys rollup manager - -// /** -// * @param _globalExitRootManager Global exit root manager address -// * @param _pol POL token address -// * @param _bridgeAddress Bridge address -// * @param _rollupManager Global exit root manager address -// */ -// constructor( -// IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, -// IERC20Upgradeable _pol, -// IPolygonZkEVMBridgeV2 _bridgeAddress, -// PolygonRollupManager _rollupManager -// ) { -// globalExitRootManager = _globalExitRootManager; -// pol = _pol; -// bridgeAddress = _bridgeAddress; -// rollupManager = _rollupManager; -// } - -// /** -// * @param _admin Admin address -// * @param sequencer Trusted sequencer address -// * @param networkID Indicates the network identifier that will be used in the bridge -// * @param _gasTokenAddress Indicates the token address in mainnet that will be used as a gas token -// * Note if a wrapped token of the bridge is used, the original network and address of this wrapped are used instead -// * @param sequencerURL Trusted sequencer URL -// * @param _networkName L2 network name -// */ -// function initialize( -// address _admin, -// address sequencer, -// uint32 networkID, -// address _gasTokenAddress, -// string memory sequencerURL, -// string memory _networkName -// ) external virtual onlyRollupManager initializer { -// bytes memory gasTokenMetadata; - -// if (_gasTokenAddress != address(0)) { -// // Ask for token metadata, the same way is enconded in the bridge -// // Note that this function will revert if the token is not in this network -// // Note that this could be a possible reentrant call, but cannot make changes on the state since are static call -// gasTokenMetadata = bridgeAddress.getTokenMetadata(_gasTokenAddress); - -// // Check gas token address on the bridge -// ( -// uint32 originWrappedNetwork, -// address originWrappedAddress -// ) = bridgeAddress.wrappedTokenToTokenInfo(_gasTokenAddress); - -// if (originWrappedNetwork != 0) { -// // It's a wrapped token, get the wrapped parameters -// gasTokenAddress = originWrappedAddress; -// gasTokenNetwork = originWrappedNetwork; -// } else { -// // gasTokenNetwork will be mainnet, for instance 0 -// gasTokenAddress = _gasTokenAddress; -// } -// } -// // Sequence transaction to initilize the bridge - -// // Calculate transaction to initialize the bridge -// bytes memory transaction = generateInitializeTransaction( -// networkID, -// gasTokenAddress, -// gasTokenNetwork, -// gasTokenMetadata -// ); - -// bytes32 currentTransactionsHash = keccak256(transaction); - -// // Get current timestamp and global exit root -// uint64 currentTimestamp = uint64(block.timestamp); -// bytes32 lastGlobalExitRoot = globalExitRootManager -// .getLastGlobalExitRoot(); - -// // Add the transaction to the sequence as if it was a force transaction -// bytes32 newAccInputHash = keccak256( -// abi.encodePacked( -// bytes32(0), // Current acc Input hash -// currentTransactionsHash, -// lastGlobalExitRoot, // Global exit root -// currentTimestamp, -// sequencer, -// blockhash(block.number - 1) -// ) -// ); - -// lastAccInputHash = newAccInputHash; - -// rollupManager.onSequenceBatches( -// uint64(1), // num total batches -// newAccInputHash -// ); - -// // Set initialize variables -// admin = _admin; -// trustedSequencer = sequencer; - -// trustedSequencerURL = sequencerURL; -// networkName = _networkName; - -// forceBatchAddress = _admin; - -// // Constant deployment variables -// forceBatchTimeout = 5 days; - -// emit InitialSequenceBatches(transaction, lastGlobalExitRoot, sequencer); -// } - -// modifier onlyAdmin() { -// if (admin != msg.sender) { -// revert OnlyAdmin(); -// } -// _; -// } - -// modifier onlyTrustedSequencer() { -// if (trustedSequencer != msg.sender) { -// revert OnlyTrustedSequencer(); -// } -// _; -// } - -// modifier isSenderAllowedToForceBatches() { -// address cacheForceBatchAddress = forceBatchAddress; -// if ( -// cacheForceBatchAddress != address(0) && -// cacheForceBatchAddress != msg.sender -// ) { -// revert ForceBatchNotAllowed(); -// } -// _; -// } - -// modifier onlyRollupManager() { -// if (address(rollupManager) != msg.sender) { -// revert OnlyRollupManager(); -// } -// _; -// } - -// ///////////////////////////////////// -// // Sequence/Verify batches functions -// //////////////////////////////////// - -// /** -// * @notice Allows a sequencer to send multiple batches -// * @param batches Struct array which holds the necessary data to append new batches to the sequence -// * @param l2Coinbase Address that will receive the fees from L2 -// * note Pol is not a reentrant token -// */ -// function sequenceBatches( -// BatchData[] calldata batches, -// address l2Coinbase -// ) public virtual onlyTrustedSequencer { -// uint256 batchesNum = batches.length; -// if (batchesNum == 0) { -// revert SequenceZeroBatches(); -// } - -// if (batchesNum > _MAX_VERIFY_BATCHES) { -// revert ExceedMaxVerifyBatches(); -// } - -// // Update global exit root if there are new deposits -// bridgeAddress.updateGlobalExitRoot(); - -// // Get global batch variables -// bytes32 l1InfoRoot = globalExitRootManager.getRoot(); -// uint64 currentTimestamp = uint64(block.timestamp); - -// // Store storage variables in memory, to save gas, because will be overrided multiple times -// uint64 currentLastForceBatchSequenced = lastForceBatchSequenced; -// bytes32 currentAccInputHash = lastAccInputHash; - -// // Store in a temporal variable, for avoid access again the storage slot -// uint64 initLastForceBatchSequenced = currentLastForceBatchSequenced; - -// for (uint256 i = 0; i < batchesNum; i++) { -// // Load current sequence -// BatchData memory currentBatch = batches[i]; - -// // Store the current transactions hash since can be used more than once for gas saving -// bytes32 currentTransactionsHash = keccak256( -// currentBatch.transactions -// ); - -// // Check if it's a forced batch -// if (currentBatch.forcedTimestamp > 0) { -// currentLastForceBatchSequenced++; - -// // Check forced data matches -// bytes32 hashedForcedBatchData = keccak256( -// abi.encodePacked( -// currentTransactionsHash, -// currentBatch.forcedGlobalExitRoot, -// currentBatch.forcedTimestamp, -// currentBatch.forcedBlockHashL1 -// ) -// ); - -// if ( -// hashedForcedBatchData != -// forcedBatches[currentLastForceBatchSequenced] -// ) { -// revert ForcedDataDoesNotMatch(); -// } - -// // Calculate next accumulated input hash -// currentAccInputHash = keccak256( -// abi.encodePacked( -// currentAccInputHash, -// currentTransactionsHash, -// currentBatch.forcedGlobalExitRoot, -// currentBatch.forcedTimestamp, -// l2Coinbase, -// currentBatch.forcedBlockHashL1 -// ) -// ); - -// // Delete forceBatch data since won't be used anymore -// delete forcedBatches[currentLastForceBatchSequenced]; -// } else { -// // Note that forcedGlobalExitRoot and forcedBlockHashL1 remain unused and unchecked in this path -// // The synchronizer should be aware of that -// if ( -// currentBatch.transactions.length > -// _MAX_TRANSACTIONS_BYTE_LENGTH -// ) { -// revert TransactionsLengthAboveMax(); -// } - -// // Calculate next accumulated input hash -// currentAccInputHash = keccak256( -// abi.encodePacked( -// currentAccInputHash, -// currentTransactionsHash, -// l1InfoRoot, -// currentTimestamp, -// l2Coinbase, -// bytes32(0) -// ) -// ); -// } -// } - -// // Sanity check, should be unreachable -// if (currentLastForceBatchSequenced > lastForceBatch) { -// revert ForceBatchesOverflow(); -// } - -// // Store back the storage variables -// lastAccInputHash = currentAccInputHash; - -// uint256 nonForcedBatchesSequenced = batchesNum; - -// // Check if there has been forced batches -// if (currentLastForceBatchSequenced != initLastForceBatchSequenced) { -// uint64 forcedBatchesSequenced = currentLastForceBatchSequenced - -// initLastForceBatchSequenced; -// // substract forced batches -// nonForcedBatchesSequenced -= forcedBatchesSequenced; - -// // Transfer pol for every forced batch submitted -// pol.safeTransfer( -// address(rollupManager), -// calculatePolPerForceBatch() * (forcedBatchesSequenced) -// ); - -// // Store new last force batch sequenced -// lastForceBatchSequenced = currentLastForceBatchSequenced; -// } - -// // Pay collateral for every non-forced batch submitted -// pol.safeTransferFrom( -// msg.sender, -// address(rollupManager), -// rollupManager.getBatchFee() * nonForcedBatchesSequenced -// ); - -// uint64 currentBatchSequenced = rollupManager.onSequenceBatches( -// uint64(batchesNum), -// currentAccInputHash -// ); - -// emit SequenceBatches(currentBatchSequenced, l1InfoRoot); -// } - -// /** -// * @notice Callback on verify batches, can only be called by the rollup manager -// * @param lastVerifiedBatch Last verified batch -// * @param newStateRoot new state root -// * @param aggregator Aggregator address -// */ -// function onVerifyBatches( -// uint64 lastVerifiedBatch, -// bytes32 newStateRoot, -// address aggregator -// ) public virtual override onlyRollupManager { -// emit VerifyBatches(lastVerifiedBatch, newStateRoot, aggregator); -// } - -// //////////////////////////// -// // Force batches functions -// //////////////////////////// - -// /** -// * @notice Allows a sequencer/user to force a batch of L2 transactions. -// * This should be used only in extreme cases where the trusted sequencer does not work as expected -// * Note The sequencer has certain degree of control on how non-forced and forced batches are ordered -// * In order to assure that users force transactions will be processed properly, user must not sign any other transaction -// * with the same nonce -// * @param transactions L2 ethereum transactions EIP-155 or pre-EIP-155 with signature: -// * @param polAmount Max amount of pol tokens that the sender is willing to pay -// */ -// function forceBatch( -// bytes calldata transactions, -// uint256 polAmount -// ) public virtual isSenderAllowedToForceBatches { -// // Check if rollup manager is on emergency state -// if (rollupManager.isEmergencyState()) { -// revert ForceBatchesNotAllowedOnEmergencyState(); -// } - -// // Calculate pol collateral -// uint256 polFee = rollupManager.getForcedBatchFee(); - -// if (polFee > polAmount) { -// revert NotEnoughPOLAmount(); -// } - -// if (transactions.length > _MAX_FORCE_BATCH_BYTE_LENGTH) { -// revert TransactionsLengthAboveMax(); -// } - -// // keep the pol fees on this contract until forced it's sequenced -// pol.safeTransferFrom(msg.sender, address(this), polFee); - -// // Get globalExitRoot global exit root -// bytes32 lastGlobalExitRoot = globalExitRootManager -// .getLastGlobalExitRoot(); - -// // Update forcedBatches mapping -// lastForceBatch++; - -// forcedBatches[lastForceBatch] = keccak256( -// abi.encodePacked( -// keccak256(transactions), -// lastGlobalExitRoot, -// uint64(block.timestamp), -// blockhash(block.number - 1) -// ) -// ); - -// if (msg.sender == tx.origin) { -// // Getting the calldata from an EOA is easy so no need to put the `transactions` in the event -// emit ForceBatch(lastForceBatch, lastGlobalExitRoot, msg.sender, ""); -// } else { -// // Getting internal transaction calldata is complicated (because it requires an archive node) -// // Therefore it's worth it to put the `transactions` in the event, which is easy to query -// emit ForceBatch( -// lastForceBatch, -// lastGlobalExitRoot, -// msg.sender, -// transactions -// ); -// } -// } - -// /** -// * @notice Allows anyone to sequence forced Batches if the trusted sequencer has not done so in the timeout period -// * @param batches Struct array which holds the necessary data to append force batches -// */ -// function sequenceForceBatches( -// BatchData[] calldata batches -// ) external virtual isSenderAllowedToForceBatches { -// // Check if rollup manager is on emergency state -// if ( -// rollupManager.lastDeactivatedEmergencyStateTimestamp() + -// _HALT_AGGREGATION_TIMEOUT > -// block.timestamp -// ) { -// revert HaltTimeoutNotExpiredAfterEmergencyState(); -// } - -// uint256 batchesNum = batches.length; - -// if (batchesNum == 0) { -// revert SequenceZeroBatches(); -// } - -// if (batchesNum > _MAX_VERIFY_BATCHES) { -// revert ExceedMaxVerifyBatches(); -// } - -// if ( -// uint256(lastForceBatchSequenced) + batchesNum > -// uint256(lastForceBatch) -// ) { -// revert ForceBatchesOverflow(); -// } - -// // Store storage variables in memory, to save gas, because will be overrided multiple times -// uint64 currentLastForceBatchSequenced = lastForceBatchSequenced; -// bytes32 currentAccInputHash = lastAccInputHash; - -// // Sequence force batches -// for (uint256 i = 0; i < batchesNum; i++) { -// // Load current sequence -// BatchData memory currentBatch = batches[i]; -// currentLastForceBatchSequenced++; - -// // Store the current transactions hash since it's used more than once for gas saving -// bytes32 currentTransactionsHash = keccak256( -// currentBatch.transactions -// ); - -// // Check forced data matches -// bytes32 hashedForcedBatchData = keccak256( -// abi.encodePacked( -// currentTransactionsHash, -// currentBatch.forcedGlobalExitRoot, -// currentBatch.forcedTimestamp, -// currentBatch.forcedBlockHashL1 -// ) -// ); - -// if ( -// hashedForcedBatchData != -// forcedBatches[currentLastForceBatchSequenced] -// ) { -// revert ForcedDataDoesNotMatch(); -// } - -// // Delete forceBatch data since won't be used anymore -// delete forcedBatches[currentLastForceBatchSequenced]; - -// if (i == (batchesNum - 1)) { -// // The last batch will have the most restrictive timestamp -// if ( -// currentBatch.forcedTimestamp + forceBatchTimeout > -// block.timestamp -// ) { -// revert ForceBatchTimeoutNotExpired(); -// } -// } -// // Calculate next acc input hash -// currentAccInputHash = keccak256( -// abi.encodePacked( -// currentAccInputHash, -// currentTransactionsHash, -// currentBatch.forcedGlobalExitRoot, -// currentBatch.forcedTimestamp, -// msg.sender, -// currentBatch.forcedBlockHashL1 -// ) -// ); -// } - -// // Transfer pol for every forced batch submitted -// pol.safeTransfer( -// address(rollupManager), -// calculatePolPerForceBatch() * (batchesNum) -// ); - -// // Store back the storage variables -// lastAccInputHash = currentAccInputHash; -// lastForceBatchSequenced = currentLastForceBatchSequenced; - -// uint64 currentBatchSequenced = rollupManager.onSequenceBatches( -// uint64(batchesNum), -// currentAccInputHash -// ); - -// emit SequenceForceBatches(currentBatchSequenced); -// } - -// ////////////////// -// // admin functions -// ////////////////// - -// /** -// * @notice Allow the admin to set a new trusted sequencer -// * @param newTrustedSequencer Address of the new trusted sequencer -// */ -// function setTrustedSequencer( -// address newTrustedSequencer -// ) external onlyAdmin { -// trustedSequencer = newTrustedSequencer; - -// emit SetTrustedSequencer(newTrustedSequencer); -// } - -// /** -// * @notice Allow the admin to set the trusted sequencer URL -// * @param newTrustedSequencerURL URL of trusted sequencer -// */ -// function setTrustedSequencerURL( -// string memory newTrustedSequencerURL -// ) external onlyAdmin { -// trustedSequencerURL = newTrustedSequencerURL; - -// emit SetTrustedSequencerURL(newTrustedSequencerURL); -// } - -// /** -// * @notice Allow the admin to change the force batch address, that will be allowed to force batches -// * If address 0 is set, then everyone is able to force batches, this action is irreversible -// * @param newForceBatchAddress New force batch address -// */ -// function setForceBatchAddress( -// address newForceBatchAddress -// ) external onlyAdmin { -// if (forceBatchAddress == address(0)) { -// revert ForceBatchesDecentralized(); -// } -// forceBatchAddress = newForceBatchAddress; - -// emit SetForceBatchAddress(newForceBatchAddress); -// } - -// /** -// * @notice Allow the admin to set the forcedBatchTimeout -// * The new value can only be lower, except if emergency state is active -// * @param newforceBatchTimeout New force batch timeout -// */ -// function setForceBatchTimeout( -// uint64 newforceBatchTimeout -// ) external onlyAdmin { -// if (newforceBatchTimeout > _HALT_AGGREGATION_TIMEOUT) { -// revert InvalidRangeForceBatchTimeout(); -// } - -// if (!rollupManager.isEmergencyState()) { -// if (newforceBatchTimeout >= forceBatchTimeout) { -// revert InvalidRangeForceBatchTimeout(); -// } -// } - -// forceBatchTimeout = newforceBatchTimeout; -// emit SetForceBatchTimeout(newforceBatchTimeout); -// } - -// /** -// * @notice Starts the admin role transfer -// * This is a two step process, the pending admin must accepted to finalize the process -// * @param newPendingAdmin Address of the new pending admin -// */ -// function transferAdminRole(address newPendingAdmin) external onlyAdmin { -// pendingAdmin = newPendingAdmin; -// emit TransferAdminRole(newPendingAdmin); -// } - -// /** -// * @notice Allow the current pending admin to accept the admin role -// */ -// function acceptAdminRole() external { -// if (pendingAdmin != msg.sender) { -// revert OnlyPendingAdmin(); -// } - -// admin = pendingAdmin; -// emit AcceptAdminRole(pendingAdmin); -// } - -// ////////////////// -// // view/pure functions -// ////////////////// - -// /** -// * @notice Function to calculate the reward for a forced batch -// */ -// function calculatePolPerForceBatch() public view returns (uint256) { -// uint256 currentBalance = pol.balanceOf(address(this)); - -// // Pending forced Batches = last forced batch added - last forced batch sequenced -// uint256 pendingForcedBatches = lastForceBatch - lastForceBatchSequenced; - -// if (pendingForcedBatches == 0) return 0; -// return currentBalance / pendingForcedBatches; -// } - -// /** -// * @notice Generate Initialize transaction for hte bridge on L2 -// * @param networkID Indicates the network identifier that will be used in the bridge -// * @param _gasTokenAddress Indicates the token address that will be used to pay gas fees in the new rollup -// * @param _gasTokenNetwork Indicates the native network of the token address -// * @param _gasTokenMetadata Abi encoded gas token metadata -// */ -// function generateInitializeTransaction( -// uint32 networkID, -// address _gasTokenAddress, -// uint32 _gasTokenNetwork, -// bytes memory _gasTokenMetadata -// ) public view returns (bytes memory) { -// bytes memory initializeBrigeData = abi.encodeCall( -// IPolygonZkEVMBridgeV2.initialize, -// ( -// networkID, -// _gasTokenAddress, -// _gasTokenNetwork, -// GLOBAL_EXIT_ROOT_MANAGER_L2, -// address(0), // Rollup manager on L2 does not exist -// _gasTokenMetadata -// ) -// ); - -// bytes memory bytesToSign; - -// if (_gasTokenMetadata.length == 0) { -// bytesToSign = abi.encodePacked( -// INITIALIZE_TX_BRIDGE_LIST_LEN_LEN, -// uint16(initializeBrigeData.length) + -// INITIALIZE_TX_CONSTANT_BYTES_EMPTY_METADATA, // do not support more than 2 bytes of length, intended to revert on overflow -// INITIALIZE_TX_BRIDGE_PARAMS, -// bridgeAddress, -// INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS_EMPTY_METADATA, -// INITIALIZE_TX_DATA_LEN_EMPTY_METADATA, -// initializeBrigeData -// ); -// } else { -// // Do not support more than 65535 bytes -// if (initializeBrigeData.length > type(uint16).max) { -// revert HugeTokenMetadataNotSupported(); -// } -// uint16 initializeBrigeDataLen = uint16(initializeBrigeData.length); - -// bytesToSign = abi.encodePacked( -// INITIALIZE_TX_BRIDGE_LIST_LEN_LEN, -// uint16(initializeBrigeData.length) + -// INITIALIZE_TX_CONSTANT_BYTES, // do not support more than 2 bytes of length, intended to revert on overflow -// INITIALIZE_TX_BRIDGE_PARAMS, -// bridgeAddress, -// INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS, -// initializeBrigeDataLen, -// initializeBrigeData -// ); -// } - -// // Sanity check that the ecrecover will work -// // Should never happen that giving a valid signature, ecrecover "breaks" -// address signer = ecrecover( -// keccak256(bytesToSign), -// SIGNATURE_INITIALIZE_TX_V, -// SIGNATURE_INITIALIZE_TX_R, -// SIGNATURE_INITIALIZE_TX_S -// ); - -// if (signer == address(0)) { -// revert InvalidInitializeTransaction(); -// } - -// bytes memory transaction = abi.encodePacked( -// bytesToSign, -// SIGNATURE_INITIALIZE_TX_R, -// SIGNATURE_INITIALIZE_TX_S, -// SIGNATURE_INITIALIZE_TX_V, -// INITIALIZE_TX_EFFECTIVE_PERCENTAGE -// ); - -// return transaction; -// } -// } +// SPDX-License-Identifier: AGPL-3.0 +pragma solidity ^0.8.20; + +import "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol"; +import "../interfaces/IPolygonZkEVMGlobalExitRootV2.sol"; +import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; +import "../../interfaces/IPolygonZkEVMErrors.sol"; +import "../interfaces/IPolygonZkEVMVEtrogErrors.sol"; +import "../PolygonRollupManager.sol"; +import "./IPolygonRollupBasePrevious.sol"; +import "../interfaces/IPolygonZkEVMBridgeV2.sol"; +import "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/IERC20MetadataUpgradeable.sol"; +import "../lib/PolygonConstantsBase.sol"; + +/** + * Contract responsible for managing the states and the updates of L2 network. + * There will be a trusted sequencer, which is able to send transactions. + * Any user can force some transaction and the sequencer will have a timeout to add them in the queue. + * The sequenced state is deterministic and can be precalculated before it's actually verified by a zkProof. + * The aggregators will be able to verify the sequenced state with zkProofs and therefore make available the withdrawals from L2 network. + * To enter and exit of the L2 network will be used a PolygonZkEVMBridge smart contract that will be deployed in both networks. + */ +abstract contract PolygonRollupBaseEtrogPrevious is + Initializable, + PolygonConstantsBase, + IPolygonZkEVMVEtrogErrors, + IPolygonRollupBasePrevious +{ + using SafeERC20Upgradeable for IERC20Upgradeable; + + /** + * @notice Struct which will be used to call sequenceBatches + * @param transactions L2 ethereum transactions EIP-155 or pre-EIP-155 with signature: + * EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data, chainid, 0, 0,) || v || r || s + * pre-EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data) || v || r || s + * @param forcedGlobalExitRoot Global exit root, empty when sequencing a non forced batch + * @param forcedTimestamp Minimum timestamp of the force batch data, empty when sequencing a non forced batch + * @param forcedBlockHashL1 blockHash snapshot of the force batch data, empty when sequencing a non forced batch + */ + struct BatchData { + bytes transactions; + bytes32 forcedGlobalExitRoot; + uint64 forcedTimestamp; + bytes32 forcedBlockHashL1; + } + + // Max transactions bytes that can be added in a single batch + // Max keccaks circuit = (2**23 / 155286) * 44 = 2376 + // Bytes per keccak = 136 + // Minimum Static keccaks batch = 2 + // Max bytes allowed = (2376 - 2) * 136 = 322864 bytes - 1 byte padding + // Rounded to 300000 bytes + // In order to process the transaction, the data is approximately hashed twice for ecrecover: + // 300000 bytes / 2 = 150000 bytes + // Since geth pool currently only accepts at maximum 128kb transactions: + // https://github.com/ethereum/go-ethereum/blob/master/core/txpool/txpool.go#L54 + // We will limit this length to be compliant with the geth restrictions since our node will use it + // We let 8kb as a sanity margin + uint256 internal constant _MAX_TRANSACTIONS_BYTE_LENGTH = 120000; + + // Max force batch transaction length + // This is used to avoid huge calldata attacks, where the attacker call force batches from another contract + uint256 internal constant _MAX_FORCE_BATCH_BYTE_LENGTH = 5000; + + // In order to encode the initialize transaction of the bridge there's have a constant part and the metadata which is variable + // Note the total transaction will be constrained to 65535 to avoid attacks and simplify the implementation + + // List rlp: 1 listLenLen "0xf9" (0xf7 + 2), + listLen 2 (32 bytes + txData bytes) (do not accept more than 65535 bytes) + + // First byte of the initialize bridge tx, indicates a list with a lengt of 2 bytes + // Since the minimum constant bytes will be: 259 (tx data empty) + 31 (tx parameters) = 259 (0x103) will always take 2 bytes to express the lenght of the rlp + // Note that more than 2 bytes of list len is not supported, since it's constrained to 65535 + uint8 public constant INITIALIZE_TX_BRIDGE_LIST_LEN_LEN = 0xf9; + + // Tx parameters until the bridge address + bytes public constant INITIALIZE_TX_BRIDGE_PARAMS = hex"80808401c9c38094"; + + // RLP encoded metadata (non empty) + + // TxData bytes: 164 bytes data ( signature 4 bytes + 5 parameters*32bytes + + // (abi encoded metadata: 32 bytes position + 32 bytes len + 32 bytes position name + 32 bytes length name + 32 bytes position Symbol + 32 bytes length Symbol + //+ 32 bytes decimal )) min 7*32 bytes = + // = 164 bytes + 224 bytes = 388 (0x0184) minimum + // Extra data: nameLen padded to 32 bytes + symbol len padded to 32 bytes + + // Constant bytes: 1 nonce "0x80" + 1 gasPrice "0x80" + 5 gasLimit "0x8401c9c380" (30M gas) + // + 21 to ("0x94" + bridgeAddress") + 1 value "0x80" + 1 stringLenLen "0xb9" (0xb7 + 2) + + // stringLen (0x0184 + nameLen padded to 32 bytes + symbol len padded to 32 bytes) + txData bytes = 32 bytes + txData bytes + uint16 public constant INITIALIZE_TX_CONSTANT_BYTES = 32; + + // Tx parameters after the bridge address + bytes public constant INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS = + hex"80b9"; + + // RLP empty metadata + + // TxData empty metadata bytes: 164 bytes data ( signature 4 bytes + 5 parameters*32bytes + + // (abi encoded metadata: 32 bytes position + 32 bytes len = 2*32 bytes = + // = 164 bytes + 64 bytes = 228 (0xe4) + + // Constant bytes empty metadata : 1 nonce "0x80" + 1 gasPrice "0x80" + 5 gasLimit "0x8401c9c380" (30M gas) + // + 21 to ("0x94" + bridgeAddress") + 1 value "0x80" + 1 stringLenLen "0xb8" (0xb7 + 1) + + // 1 stringLen (0xe4) + txData bytes = 31 bytes + txData bytes empty metadata 228 = 259 + uint16 public constant INITIALIZE_TX_CONSTANT_BYTES_EMPTY_METADATA = 31; + + uint8 public constant INITIALIZE_TX_DATA_LEN_EMPTY_METADATA = 228; // 0xe4 + + // Tx parameters after the bridge address + bytes + public constant INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS_EMPTY_METADATA = + hex"80b8"; + + // Signature used to initialize the bridge + + // V parameter of the initialize signature + uint8 public constant SIGNATURE_INITIALIZE_TX_V = 27; + + // R parameter of the initialize signature + bytes32 public constant SIGNATURE_INITIALIZE_TX_R = + 0x00000000000000000000000000000000000000000000000000000005ca1ab1e0; + + // S parameter of the initialize signature + bytes32 public constant SIGNATURE_INITIALIZE_TX_S = + 0x000000000000000000000000000000000000000000000000000000005ca1ab1e; + + // Effective percentage of the initalize transaction + bytes1 public constant INITIALIZE_TX_EFFECTIVE_PERCENTAGE = 0xFF; + + // Global Exit Root address L2 + IBasePolygonZkEVMGlobalExitRoot + public constant GLOBAL_EXIT_ROOT_MANAGER_L2 = + IBasePolygonZkEVMGlobalExitRoot( + 0xa40D5f56745a118D0906a34E69aeC8C0Db1cB8fA + ); + + // Timestamp range that's given to the sequencer as a safety measure to avoid reverts if the transaction is mined to quickly + uint256 public constant TIMESTAMP_RANGE = 36; + + // POL token address + IERC20Upgradeable public immutable pol; + + // Global Exit Root interface + IPolygonZkEVMGlobalExitRootV2 public immutable globalExitRootManager; + + // PolygonZkEVM Bridge Address + IPolygonZkEVMBridgeV2 public immutable bridgeAddress; + + // Rollup manager + PolygonRollupManager public immutable rollupManager; + + // Address that will be able to adjust contract parameters + address public admin; + + // This account will be able to accept the admin role + address public pendingAdmin; + + // Trusted sequencer address + address public trustedSequencer; + + // Trusted sequencer URL + string public trustedSequencerURL; + + // L2 network name + string public networkName; + + // Current accumulate input hash + bytes32 public lastAccInputHash; + + // Queue of forced batches with their associated data + // ForceBatchNum --> hashedForcedBatchData + // hashedForcedBatchData: hash containing the necessary information to force a batch: + // keccak256(keccak256(bytes transactions), bytes32 forcedGlobalExitRoot, unint64 forcedTimestamp, bytes32 forcedBlockHashL1) + mapping(uint64 => bytes32) public forcedBatches; + + // Last forced batch + uint64 public lastForceBatch; + + // Last forced batch included in the sequence + uint64 public lastForceBatchSequenced; + + // Force batch timeout + uint64 public forceBatchTimeout; + + // Indicates what address is able to do forced batches + // If the address is set to 0, forced batches are open to everyone + address public forceBatchAddress; + + // Token address that will be used to pay gas fees in this rollup. This variable it's just for read purposes + address public gasTokenAddress; + + // Native network of the token address of the gas tokena address. This variable it's just for read purposes + uint32 public gasTokenNetwork; + + /** + * @dev This empty reserved space is put in place to allow future versions to add new + * variables without shifting down storage in the inheritance chain. + */ + uint256[50] private _gap; + + /** + * @dev Emitted when the trusted sequencer sends a new batch of transactions + */ + event SequenceBatches(uint64 indexed numBatch, bytes32 l1InfoRoot); + + /** + * @dev Emitted when a batch is forced + */ + event ForceBatch( + uint64 indexed forceBatchNum, + bytes32 lastGlobalExitRoot, + address sequencer, + bytes transactions + ); + + /** + * @dev Emitted when forced batches are sequenced by not the trusted sequencer + */ + event SequenceForceBatches(uint64 indexed numBatch); + + /** + * @dev Emitted when the contract is initialized, contain the first sequenced transaction + */ + event InitialSequenceBatches( + bytes transactions, + bytes32 lastGlobalExitRoot, + address sequencer + ); + + /** + * @dev Emitted when a aggregator verifies batches + */ + event VerifyBatches( + uint64 indexed numBatch, + bytes32 stateRoot, + address indexed aggregator + ); + + /** + * @dev Emitted when the admin updates the trusted sequencer address + */ + event SetTrustedSequencer(address newTrustedSequencer); + + /** + * @dev Emitted when the admin updates the sequencer URL + */ + event SetTrustedSequencerURL(string newTrustedSequencerURL); + + /** + * @dev Emitted when the admin update the force batch timeout + */ + event SetForceBatchTimeout(uint64 newforceBatchTimeout); + + /** + * @dev Emitted when the admin update the force batch address + */ + event SetForceBatchAddress(address newForceBatchAddress); + + /** + * @dev Emitted when the admin starts the two-step transfer role setting a new pending admin + */ + event TransferAdminRole(address newPendingAdmin); + + /** + * @dev Emitted when the pending admin accepts the admin role + */ + event AcceptAdminRole(address newAdmin); + + // General parameters that will have in common all networks that deploys rollup manager + + /** + * @param _globalExitRootManager Global exit root manager address + * @param _pol POL token address + * @param _bridgeAddress Bridge address + * @param _rollupManager Global exit root manager address + */ + constructor( + IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, + IERC20Upgradeable _pol, + IPolygonZkEVMBridgeV2 _bridgeAddress, + PolygonRollupManager _rollupManager + ) { + globalExitRootManager = _globalExitRootManager; + pol = _pol; + bridgeAddress = _bridgeAddress; + rollupManager = _rollupManager; + } + + /** + * @param _admin Admin address + * @param sequencer Trusted sequencer address + * @param networkID Indicates the network identifier that will be used in the bridge + * @param _gasTokenAddress Indicates the token address in mainnet that will be used as a gas token + * Note if a wrapped token of the bridge is used, the original network and address of this wrapped are used instead + * @param sequencerURL Trusted sequencer URL + * @param _networkName L2 network name + */ + function initialize( + address _admin, + address sequencer, + uint32 networkID, + address _gasTokenAddress, + string memory sequencerURL, + string memory _networkName + ) external virtual onlyRollupManager initializer { + bytes memory gasTokenMetadata = _verifyOrigin(_gasTokenAddress); + + // Sequence transaction to initilize the bridge + + // Calculate transaction to initialize the bridge + bytes memory transaction = generateInitializeTransaction( + networkID, + gasTokenAddress, + gasTokenNetwork, + gasTokenMetadata + ); + + bytes32 currentTransactionsHash = keccak256(transaction); + + // Get current timestamp and global exit root + uint64 currentTimestamp = uint64(block.timestamp); + bytes32 lastGlobalExitRoot = globalExitRootManager + .getLastGlobalExitRoot(); + + // Add the transaction to the sequence as if it was a force transaction + bytes32 newAccInputHash = keccak256( + abi.encodePacked( + bytes32(0), // Current acc Input hash + currentTransactionsHash, + lastGlobalExitRoot, // Global exit root + currentTimestamp, + sequencer, + blockhash(block.number - 1) + ) + ); + + lastAccInputHash = newAccInputHash; + + rollupManager.onSequenceBatches( + uint64(1), // num total batches + newAccInputHash + ); + + // Set initialize variables + admin = _admin; + trustedSequencer = sequencer; + + trustedSequencerURL = sequencerURL; + networkName = _networkName; + + forceBatchAddress = _admin; + + // Constant deployment variables + forceBatchTimeout = 5 days; + + emit InitialSequenceBatches(transaction, lastGlobalExitRoot, sequencer); + } + + modifier onlyAdmin() { + if (admin != msg.sender) { + revert OnlyAdmin(); + } + _; + } + + modifier onlyTrustedSequencer() { + if (trustedSequencer != msg.sender) { + revert OnlyTrustedSequencer(); + } + _; + } + + modifier isSenderAllowedToForceBatches() { + address cacheForceBatchAddress = forceBatchAddress; + if ( + cacheForceBatchAddress != address(0) && + cacheForceBatchAddress != msg.sender + ) { + revert ForceBatchNotAllowed(); + } + _; + } + + modifier onlyRollupManager() { + if (address(rollupManager) != msg.sender) { + revert OnlyRollupManager(); + } + _; + } + + ///////////////////////////////////// + // Sequence/Verify batches functions + //////////////////////////////////// + + /** + * @notice Allows a sequencer to send multiple batches + * @param batches Struct array which holds the necessary data to append new batches to the sequence + * @param maxSequenceTimestamp Max timestamp of the sequence. This timestamp must be inside a safety range (actual + 36 seconds). + * This timestamp should be equal or higher of the last block inside the sequence, otherwise this batch will be invalidated by circuit. + * @param initSequencedBatch This parameter must match the current last batch sequenced. + * This will be a protection for the sequencer to avoid sending undesired data + * @param l2Coinbase Address that will receive the fees from L2 + * note Pol is not a reentrant token + */ + function sequenceBatches( + BatchData[] calldata batches, + uint64 maxSequenceTimestamp, + uint64 initSequencedBatch, + address l2Coinbase + ) public virtual onlyTrustedSequencer { + uint256 batchesNum = batches.length; + if (batchesNum == 0) { + revert SequenceZeroBatches(); + } + + if (batchesNum > _MAX_VERIFY_BATCHES) { + revert ExceedMaxVerifyBatches(); + } + + // Check max sequence timestamp inside of range + if ( + uint256(maxSequenceTimestamp) > (block.timestamp + TIMESTAMP_RANGE) + ) { + revert MaxTimestampSequenceInvalid(); + } + + // Update global exit root if there are new deposits + bridgeAddress.updateGlobalExitRoot(); + + // Get global batch variables + bytes32 l1InfoRoot = globalExitRootManager.getRoot(); + + // Store storage variables in memory, to save gas, because will be overrided multiple times + uint64 currentLastForceBatchSequenced = lastForceBatchSequenced; + bytes32 currentAccInputHash = lastAccInputHash; + + // Store in a temporal variable, for avoid access again the storage slot + uint64 initLastForceBatchSequenced = currentLastForceBatchSequenced; + + for (uint256 i = 0; i < batchesNum; i++) { + // Load current sequence + BatchData memory currentBatch = batches[i]; + + // Store the current transactions hash since can be used more than once for gas saving + bytes32 currentTransactionsHash = keccak256( + currentBatch.transactions + ); + + // Check if it's a forced batch + if (currentBatch.forcedTimestamp > 0) { + currentLastForceBatchSequenced++; + + // Check forced data matches + bytes32 hashedForcedBatchData = keccak256( + abi.encodePacked( + currentTransactionsHash, + currentBatch.forcedGlobalExitRoot, + currentBatch.forcedTimestamp, + currentBatch.forcedBlockHashL1 + ) + ); + + if ( + hashedForcedBatchData != + forcedBatches[currentLastForceBatchSequenced] + ) { + revert ForcedDataDoesNotMatch(); + } + + // Calculate next accumulated input hash + currentAccInputHash = keccak256( + abi.encodePacked( + currentAccInputHash, + currentTransactionsHash, + currentBatch.forcedGlobalExitRoot, + currentBatch.forcedTimestamp, + l2Coinbase, + currentBatch.forcedBlockHashL1 + ) + ); + + // Delete forceBatch data since won't be used anymore + delete forcedBatches[currentLastForceBatchSequenced]; + } else { + // Note that forcedGlobalExitRoot and forcedBlockHashL1 remain unused and unchecked in this path + // The synchronizer should be aware of that + if ( + currentBatch.transactions.length > + _MAX_TRANSACTIONS_BYTE_LENGTH + ) { + revert TransactionsLengthAboveMax(); + } + + // Calculate next accumulated input hash + currentAccInputHash = keccak256( + abi.encodePacked( + currentAccInputHash, + currentTransactionsHash, + l1InfoRoot, + maxSequenceTimestamp, + l2Coinbase, + bytes32(0) + ) + ); + } + } + + // Sanity check, should be unreachable + if (currentLastForceBatchSequenced > lastForceBatch) { + revert ForceBatchesOverflow(); + } + + // Store back the storage variables + lastAccInputHash = currentAccInputHash; + + uint256 nonForcedBatchesSequenced = batchesNum; + + // Check if there has been forced batches + if (currentLastForceBatchSequenced != initLastForceBatchSequenced) { + uint64 forcedBatchesSequenced = currentLastForceBatchSequenced - + initLastForceBatchSequenced; + // substract forced batches + nonForcedBatchesSequenced -= forcedBatchesSequenced; + + // Transfer pol for every forced batch submitted + pol.safeTransfer( + address(rollupManager), + calculatePolPerForceBatch() * (forcedBatchesSequenced) + ); + + // Store new last force batch sequenced + lastForceBatchSequenced = currentLastForceBatchSequenced; + } + + // Pay collateral for every non-forced batch submitted + pol.safeTransferFrom( + msg.sender, + address(rollupManager), + rollupManager.getBatchFee() * nonForcedBatchesSequenced + ); + + uint64 currentBatchSequenced = rollupManager.onSequenceBatches( + uint64(batchesNum), + currentAccInputHash + ); + + // Check init sequenced batch + if ( + initSequencedBatch != (currentBatchSequenced - uint64(batchesNum)) + ) { + revert InitSequencedBatchDoesNotMatch(); + } + + emit SequenceBatches(currentBatchSequenced, l1InfoRoot); + } + + /** + * @notice Callback on verify batches, can only be called by the rollup manager + * @param lastVerifiedBatch Last verified batch + * @param newStateRoot new state root + * @param aggregator Aggregator address + */ + function onVerifyBatches( + uint64 lastVerifiedBatch, + bytes32 newStateRoot, + address aggregator + ) public virtual override onlyRollupManager { + emit VerifyBatches(lastVerifiedBatch, newStateRoot, aggregator); + } + + //////////////////////////// + // Force batches functions + //////////////////////////// + + /** + * @notice Allows a sequencer/user to force a batch of L2 transactions. + * This should be used only in extreme cases where the trusted sequencer does not work as expected + * Note The sequencer has certain degree of control on how non-forced and forced batches are ordered + * In order to assure that users force transactions will be processed properly, user must not sign any other transaction + * with the same nonce + * @param transactions L2 ethereum transactions EIP-155 or pre-EIP-155 with signature: + * @param polAmount Max amount of pol tokens that the sender is willing to pay + */ + function forceBatch( + bytes calldata transactions, + uint256 polAmount + ) public virtual isSenderAllowedToForceBatches { + // Check if rollup manager is on emergency state + if (rollupManager.isEmergencyState()) { + revert ForceBatchesNotAllowedOnEmergencyState(); + } + + // Calculate pol collateral + uint256 polFee = rollupManager.getForcedBatchFee(); + + if (polFee > polAmount) { + revert NotEnoughPOLAmount(); + } + + if (transactions.length > _MAX_FORCE_BATCH_BYTE_LENGTH) { + revert TransactionsLengthAboveMax(); + } + + // keep the pol fees on this contract until forced it's sequenced + pol.safeTransferFrom(msg.sender, address(this), polFee); + + // Get globalExitRoot global exit root + bytes32 lastGlobalExitRoot = globalExitRootManager + .getLastGlobalExitRoot(); + + // Update forcedBatches mapping + lastForceBatch++; + + forcedBatches[lastForceBatch] = keccak256( + abi.encodePacked( + keccak256(transactions), + lastGlobalExitRoot, + uint64(block.timestamp), + blockhash(block.number - 1) + ) + ); + + if (msg.sender == tx.origin) { + // Getting the calldata from an EOA is easy so no need to put the `transactions` in the event + emit ForceBatch(lastForceBatch, lastGlobalExitRoot, msg.sender, ""); + } else { + // Getting internal transaction calldata is complicated (because it requires an archive node) + // Therefore it's worth it to put the `transactions` in the event, which is easy to query + emit ForceBatch( + lastForceBatch, + lastGlobalExitRoot, + msg.sender, + transactions + ); + } + } + + /** + * @notice Allows anyone to sequence forced Batches if the trusted sequencer has not done so in the timeout period + * @param batches Struct array which holds the necessary data to append force batches + */ + function sequenceForceBatches( + BatchData[] calldata batches + ) external virtual isSenderAllowedToForceBatches { + // Check if rollup manager is on emergency state + if ( + rollupManager.lastDeactivatedEmergencyStateTimestamp() + + _HALT_AGGREGATION_TIMEOUT > + block.timestamp + ) { + revert HaltTimeoutNotExpiredAfterEmergencyState(); + } + + uint256 batchesNum = batches.length; + + if (batchesNum == 0) { + revert SequenceZeroBatches(); + } + + if (batchesNum > _MAX_VERIFY_BATCHES) { + revert ExceedMaxVerifyBatches(); + } + + if ( + uint256(lastForceBatchSequenced) + batchesNum > + uint256(lastForceBatch) + ) { + revert ForceBatchesOverflow(); + } + + // Store storage variables in memory, to save gas, because will be overrided multiple times + uint64 currentLastForceBatchSequenced = lastForceBatchSequenced; + bytes32 currentAccInputHash = lastAccInputHash; + + // Sequence force batches + for (uint256 i = 0; i < batchesNum; i++) { + // Load current sequence + BatchData memory currentBatch = batches[i]; + currentLastForceBatchSequenced++; + + // Store the current transactions hash since it's used more than once for gas saving + bytes32 currentTransactionsHash = keccak256( + currentBatch.transactions + ); + + // Check forced data matches + bytes32 hashedForcedBatchData = keccak256( + abi.encodePacked( + currentTransactionsHash, + currentBatch.forcedGlobalExitRoot, + currentBatch.forcedTimestamp, + currentBatch.forcedBlockHashL1 + ) + ); + + if ( + hashedForcedBatchData != + forcedBatches[currentLastForceBatchSequenced] + ) { + revert ForcedDataDoesNotMatch(); + } + + // Delete forceBatch data since won't be used anymore + delete forcedBatches[currentLastForceBatchSequenced]; + + if (i == (batchesNum - 1)) { + // The last batch will have the most restrictive timestamp + if ( + currentBatch.forcedTimestamp + forceBatchTimeout > + block.timestamp + ) { + revert ForceBatchTimeoutNotExpired(); + } + } + // Calculate next acc input hash + currentAccInputHash = keccak256( + abi.encodePacked( + currentAccInputHash, + currentTransactionsHash, + currentBatch.forcedGlobalExitRoot, + currentBatch.forcedTimestamp, + msg.sender, + currentBatch.forcedBlockHashL1 + ) + ); + } + + // Transfer pol for every forced batch submitted + pol.safeTransfer( + address(rollupManager), + calculatePolPerForceBatch() * (batchesNum) + ); + + // Store back the storage variables + lastAccInputHash = currentAccInputHash; + lastForceBatchSequenced = currentLastForceBatchSequenced; + + uint64 currentBatchSequenced = rollupManager.onSequenceBatches( + uint64(batchesNum), + currentAccInputHash + ); + + emit SequenceForceBatches(currentBatchSequenced); + } + + ////////////////// + // admin functions + ////////////////// + + /** + * @notice Allow the admin to set a new trusted sequencer + * @param newTrustedSequencer Address of the new trusted sequencer + */ + function setTrustedSequencer( + address newTrustedSequencer + ) external onlyAdmin { + trustedSequencer = newTrustedSequencer; + + emit SetTrustedSequencer(newTrustedSequencer); + } + + /** + * @notice Allow the admin to set the trusted sequencer URL + * @param newTrustedSequencerURL URL of trusted sequencer + */ + function setTrustedSequencerURL( + string memory newTrustedSequencerURL + ) external onlyAdmin { + trustedSequencerURL = newTrustedSequencerURL; + + emit SetTrustedSequencerURL(newTrustedSequencerURL); + } + + /** + * @notice Allow the admin to change the force batch address, that will be allowed to force batches + * If address 0 is set, then everyone is able to force batches, this action is irreversible + * @param newForceBatchAddress New force batch address + */ + function setForceBatchAddress( + address newForceBatchAddress + ) external onlyAdmin { + if (forceBatchAddress == address(0)) { + revert ForceBatchesDecentralized(); + } + forceBatchAddress = newForceBatchAddress; + + emit SetForceBatchAddress(newForceBatchAddress); + } + + /** + * @notice Allow the admin to set the forcedBatchTimeout + * The new value can only be lower, except if emergency state is active + * @param newforceBatchTimeout New force batch timeout + */ + function setForceBatchTimeout( + uint64 newforceBatchTimeout + ) external onlyAdmin { + if (newforceBatchTimeout > _HALT_AGGREGATION_TIMEOUT) { + revert InvalidRangeForceBatchTimeout(); + } + + if (!rollupManager.isEmergencyState()) { + if (newforceBatchTimeout >= forceBatchTimeout) { + revert InvalidRangeForceBatchTimeout(); + } + } + + forceBatchTimeout = newforceBatchTimeout; + emit SetForceBatchTimeout(newforceBatchTimeout); + } + + /** + * @notice Starts the admin role transfer + * This is a two step process, the pending admin must accepted to finalize the process + * @param newPendingAdmin Address of the new pending admin + */ + function transferAdminRole(address newPendingAdmin) external onlyAdmin { + pendingAdmin = newPendingAdmin; + emit TransferAdminRole(newPendingAdmin); + } + + /** + * @notice Allow the current pending admin to accept the admin role + */ + function acceptAdminRole() external { + if (pendingAdmin != msg.sender) { + revert OnlyPendingAdmin(); + } + + admin = pendingAdmin; + emit AcceptAdminRole(pendingAdmin); + } + + ////////////////// + // view/pure functions + ////////////////// + + /** + * @notice Function to calculate the reward for a forced batch + */ + function calculatePolPerForceBatch() public view returns (uint256) { + uint256 currentBalance = pol.balanceOf(address(this)); + + // Pending forced Batches = last forced batch added - last forced batch sequenced + uint256 pendingForcedBatches = lastForceBatch - lastForceBatchSequenced; + + if (pendingForcedBatches == 0) return 0; + return currentBalance / pendingForcedBatches; + } + + /** + * @notice Generate Initialize transaction for hte bridge on L2 + * @param networkID Indicates the network identifier that will be used in the bridge + * @param _gasTokenAddress Indicates the token address that will be used to pay gas fees in the new rollup + * @param _gasTokenNetwork Indicates the native network of the token address + * @param _gasTokenMetadata Abi encoded gas token metadata + */ + function generateInitializeTransaction( + uint32 networkID, + address _gasTokenAddress, + uint32 _gasTokenNetwork, + bytes memory _gasTokenMetadata + ) public view returns (bytes memory) { + bytes memory initializeBrigeData = abi.encodeCall( + IPolygonZkEVMBridgeV2.initialize, + ( + networkID, + _gasTokenAddress, + _gasTokenNetwork, + GLOBAL_EXIT_ROOT_MANAGER_L2, + address(0), // Rollup manager on L2 does not exist + _gasTokenMetadata + ) + ); + + bytes memory bytesToSign; + + if (_gasTokenMetadata.length == 0) { + bytesToSign = abi.encodePacked( + INITIALIZE_TX_BRIDGE_LIST_LEN_LEN, + uint16(initializeBrigeData.length) + + INITIALIZE_TX_CONSTANT_BYTES_EMPTY_METADATA, // do not support more than 2 bytes of length, intended to revert on overflow + INITIALIZE_TX_BRIDGE_PARAMS, + bridgeAddress, + INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS_EMPTY_METADATA, + INITIALIZE_TX_DATA_LEN_EMPTY_METADATA, + initializeBrigeData + ); + } else { + // Do not support more than 65535 bytes + if (initializeBrigeData.length > type(uint16).max) { + revert HugeTokenMetadataNotSupported(); + } + uint16 initializeBrigeDataLen = uint16(initializeBrigeData.length); + + bytesToSign = abi.encodePacked( + INITIALIZE_TX_BRIDGE_LIST_LEN_LEN, + uint16(initializeBrigeData.length) + + INITIALIZE_TX_CONSTANT_BYTES, // do not support more than 2 bytes of length, intended to revert on overflow + INITIALIZE_TX_BRIDGE_PARAMS, + bridgeAddress, + INITIALIZE_TX_BRIDGE_PARAMS_AFTER_BRIDGE_ADDRESS, + initializeBrigeDataLen, + initializeBrigeData + ); + } + + // Sanity check that the ecrecover will work + // Should never happen that giving a valid signature, ecrecover "breaks" + address signer = ecrecover( + keccak256(bytesToSign), + SIGNATURE_INITIALIZE_TX_V, + SIGNATURE_INITIALIZE_TX_R, + SIGNATURE_INITIALIZE_TX_S + ); + + if (signer == address(0)) { + revert InvalidInitializeTransaction(); + } + + bytes memory transaction = abi.encodePacked( + bytesToSign, + SIGNATURE_INITIALIZE_TX_R, + SIGNATURE_INITIALIZE_TX_S, + SIGNATURE_INITIALIZE_TX_V, + INITIALIZE_TX_EFFECTIVE_PERCENTAGE + ); + + return transaction; + } + + function _verifyOrigin( + address _gasTokenAddress + ) internal virtual returns (bytes memory gasTokenMetadata) { + if (_gasTokenAddress != address(0)) { + // Ask for token metadata, the same way is enconded in the bridge + // Note that this function will revert if the token is not in this network + // Note that this could be a possible reentrant call, but cannot make changes on the state since are static call + gasTokenMetadata = bridgeAddress.getTokenMetadata(_gasTokenAddress); + + // Check gas token address on the bridge + ( + uint32 originWrappedNetwork, + address originWrappedAddress + ) = bridgeAddress.wrappedTokenToTokenInfo(_gasTokenAddress); + + if (originWrappedNetwork != 0) { + // It's a wrapped token, get the wrapped parameters + gasTokenAddress = originWrappedAddress; + gasTokenNetwork = originWrappedNetwork; + } else { + // gasTokenNetwork will be mainnet, for instance 0 + gasTokenAddress = _gasTokenAddress; + } + } + } +} diff --git a/contracts/v2/previousVersions/PolygonRollupManager.sol b/contracts/v2/previousVersions/PolygonRollupManager.sol new file mode 100644 index 000000000..c18c2c67b --- /dev/null +++ b/contracts/v2/previousVersions/PolygonRollupManager.sol @@ -0,0 +1,1911 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity 0.8.20; + +import "../interfaces/IPolygonRollupManager.sol"; +import "../interfaces/IPolygonZkEVMGlobalExitRootV2.sol"; +import "../../interfaces/IPolygonZkEVMBridge.sol"; +import "../interfaces/IPolygonRollupBase.sol"; +import "../../interfaces/IVerifierRollup.sol"; +import "../../lib/EmergencyManager.sol"; +import "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol"; +import "../lib/PolygonTransparentProxy.sol"; +import "../lib/PolygonAccessControlUpgradeable.sol"; +import "../lib/LegacyZKEVMStateVariables.sol"; +import "../consensus/zkEVM/PolygonZkEVMExistentEtrog.sol"; +import "../lib/PolygonConstantsBase.sol"; + +/** + * Contract responsible for managing rollups and the verification of their batches. + * This contract will create and update rollups and store all the hashed sequenced data from them. + * The logic for sequence batches is moved to the `consensus` contracts, while the verification of all of + * them will be done in this one. In this way, the proof aggregation of the rollups will be easier on a close future. + */ +contract PolygonRollupManagerPrevious is + PolygonAccessControlUpgradeable, + EmergencyManager, + LegacyZKEVMStateVariables, + PolygonConstantsBase, + IPolygonRollupManager +{ + using SafeERC20Upgradeable for IERC20Upgradeable; + + /** + * @notice Struct which to store the rollup type data + * @param consensusImplementation Consensus implementation ( contains the consensus logic for the transaparent proxy) + * @param verifier verifier + * @param forkID fork ID + * @param rollupCompatibilityID Rollup compatibility ID, to check upgradability between rollup types + * @param obsolete Indicates if the rollup type is obsolete + * @param genesis Genesis block of the rollup, note that will only be used on creating new rollups, not upgrade them + */ + struct RollupType { + address consensusImplementation; + IVerifierRollup verifier; + uint64 forkID; + uint8 rollupCompatibilityID; + bool obsolete; + bytes32 genesis; + } + + /** + * @notice Struct which to store the rollup data of each chain + * @param rollupContract Rollup consensus contract, which manages everything + * related to sequencing transactions + * @param chainID Chain ID of the rollup + * @param verifier Verifier contract + * @param forkID ForkID of the rollup + * @param batchNumToStateRoot State root mapping + * @param sequencedBatches Queue of batches that defines the virtual state + * @param pendingStateTransitions Pending state mapping + * @param lastLocalExitRoot Last exit root verified, used for compute the rollupExitRoot + * @param lastBatchSequenced Last batch sent by the consensus contract + * @param lastVerifiedBatch Last batch verified + * @param lastPendingState Last pending state + * @param lastPendingStateConsolidated Last pending state consolidated + * @param lastVerifiedBatchBeforeUpgrade Last batch verified before the last upgrade + * @param rollupTypeID Rollup type ID, can be 0 if it was added as an existing rollup + * @param rollupCompatibilityID Rollup ID used for compatibility checks when upgrading + */ + struct RollupData { + IPolygonRollupBase rollupContract; + uint64 chainID; + IVerifierRollup verifier; + uint64 forkID; + mapping(uint64 batchNum => bytes32) batchNumToStateRoot; + mapping(uint64 batchNum => SequencedBatchData) sequencedBatches; + mapping(uint256 pendingStateNum => PendingState) pendingStateTransitions; + bytes32 lastLocalExitRoot; + uint64 lastBatchSequenced; + uint64 lastVerifiedBatch; + uint64 lastPendingState; + uint64 lastPendingStateConsolidated; + uint64 lastVerifiedBatchBeforeUpgrade; + uint64 rollupTypeID; + uint8 rollupCompatibilityID; + } + + // Modulus zkSNARK + uint256 internal constant _RFIELD = + 21888242871839275222246405745257275088548364400416034343698204186575808495617; + + // Max batch multiplier per verification + uint256 internal constant _MAX_BATCH_MULTIPLIER = 12; + + // Max batch fee value + uint256 internal constant _MAX_BATCH_FEE = 1000 ether; + + // Min value batch fee + uint256 internal constant _MIN_BATCH_FEE = 1 gwei; + + // Goldilocks prime field + uint256 internal constant _GOLDILOCKS_PRIME_FIELD = 0xFFFFFFFF00000001; // 2 ** 64 - 2 ** 32 + 1 + + // Max uint64 + uint256 internal constant _MAX_UINT_64 = type(uint64).max; // 0xFFFFFFFFFFFFFFFF + + // Exit merkle tree levels + uint256 internal constant _EXIT_TREE_DEPTH = 32; + + // Roles + + // Be able to add a new rollup type + bytes32 internal constant _ADD_ROLLUP_TYPE_ROLE = + keccak256("ADD_ROLLUP_TYPE_ROLE"); + + // Be able to obsolete a rollup type, which means that new rollups cannot use this type + bytes32 internal constant _OBSOLETE_ROLLUP_TYPE_ROLE = + keccak256("OBSOLETE_ROLLUP_TYPE_ROLE"); + + // Be able to create a new rollup using a rollup type + bytes32 internal constant _CREATE_ROLLUP_ROLE = + keccak256("CREATE_ROLLUP_ROLE"); + + // Be able to create a new rollup which does not have to follow any rollup type. + // Also sets the genesis block for that network + bytes32 internal constant _ADD_EXISTING_ROLLUP_ROLE = + keccak256("ADD_EXISTING_ROLLUP_ROLE"); + + // Be able to update a rollup to a new rollup type that it's compatible + bytes32 internal constant _UPDATE_ROLLUP_ROLE = + keccak256("UPDATE_ROLLUP_ROLE"); + + // Be able to that has priority to verify batches and consolidates the state instantly + bytes32 internal constant _TRUSTED_AGGREGATOR_ROLE = + keccak256("TRUSTED_AGGREGATOR_ROLE"); + + // Be able to set the trusted aggregator address + bytes32 internal constant _TRUSTED_AGGREGATOR_ROLE_ADMIN = + keccak256("TRUSTED_AGGREGATOR_ROLE_ADMIN"); + + // Be able to tweak parameters + bytes32 internal constant _TWEAK_PARAMETERS_ROLE = + keccak256("TWEAK_PARAMETERS_ROLE"); + + // Be able to set the current batch fee + bytes32 internal constant _SET_FEE_ROLE = keccak256("SET_FEE_ROLE"); + + // Be able to stop the emergency state + bytes32 internal constant _STOP_EMERGENCY_ROLE = + keccak256("STOP_EMERGENCY_ROLE"); + + // Be able to activate the emergency state without any further condition + bytes32 internal constant _EMERGENCY_COUNCIL_ROLE = + keccak256("EMERGENCY_COUNCIL_ROLE"); + + // Be able to set the emergency council address + bytes32 internal constant _EMERGENCY_COUNCIL_ADMIN = + keccak256("EMERGENCY_COUNCIL_ADMIN"); + + // Global Exit Root address + IPolygonZkEVMGlobalExitRootV2 public immutable globalExitRootManager; + + // PolygonZkEVM Bridge Address + IPolygonZkEVMBridge public immutable bridgeAddress; + + // POL token address + IERC20Upgradeable public immutable pol; + + // Number of rollup types added, every new type will be assigned sequencially a new ID + uint32 public rollupTypeCount; + + // Rollup type mapping + mapping(uint32 rollupTypeID => RollupType) public rollupTypeMap; + + // Number of rollups added, every new rollup will be assigned sequencially a new ID + uint32 public rollupCount; + + // Rollups ID mapping + mapping(uint32 rollupID => RollupData) public rollupIDToRollupData; + + // Rollups address mapping + mapping(address rollupAddress => uint32 rollupID) public rollupAddressToID; + + // Chain ID mapping for nullifying + // note we will take care to avoid that current known chainIDs are not reused in our networks (example: 1) + mapping(uint64 chainID => uint32 rollupID) public chainIDToRollupID; + + // Total sequenced batches across all rollups + uint64 public totalSequencedBatches; + + // Total verified batches across all rollups + uint64 public totalVerifiedBatches; + + // Last timestamp when an aggregation happen + uint64 public lastAggregationTimestamp; + + // Trusted aggregator timeout, if a sequence is not verified in this time frame, + // everyone can verify that sequence + uint64 public trustedAggregatorTimeout; + + // Once a pending state exceeds this timeout it can be consolidated + uint64 public pendingStateTimeout; + + // Time target of the verification of a batch + // Adaptively the batchFee will be updated to achieve this target + uint64 public verifyBatchTimeTarget; + + // Batch fee multiplier with 3 decimals that goes from 1000 - 1023 + uint16 public multiplierBatchFee; + + // Current POL fee per batch sequenced + // note This variable is internal, since the view function getBatchFee is likely to be upgraded + uint256 internal _batchFee; + + // Timestamp when the last emergency state was deactivated + uint64 public lastDeactivatedEmergencyStateTimestamp; + + /** + * @dev Emitted when a new rollup type is added + */ + event AddNewRollupType( + uint32 indexed rollupTypeID, + address consensusImplementation, + address verifier, + uint64 forkID, + uint8 rollupCompatibilityID, + bytes32 genesis, + string description + ); + + /** + * @dev Emitted when a a rolup type is obsoleted + */ + event ObsoleteRollupType(uint32 indexed rollupTypeID); + + /** + * @dev Emitted when a new rollup is created based on a rollupType + */ + event CreateNewRollup( + uint32 indexed rollupID, + uint32 rollupTypeID, + address rollupAddress, + uint64 chainID, + address gasTokenAddress + ); + + /** + * @dev Emitted when an existing rollup is added + */ + event AddExistingRollup( + uint32 indexed rollupID, + uint64 forkID, + address rollupAddress, + uint64 chainID, + uint8 rollupCompatibilityID, + uint64 lastVerifiedBatchBeforeUpgrade + ); + + /** + * @dev Emitted when a rollup is udpated + */ + event UpdateRollup( + uint32 indexed rollupID, + uint32 newRollupTypeID, + uint64 lastVerifiedBatchBeforeUpgrade + ); + + /** + * @dev Emitted when a new verifier is added + */ + event OnSequenceBatches(uint32 indexed rollupID, uint64 lastBatchSequenced); + + /** + * @dev Emitted when an aggregator verifies batches + */ + event VerifyBatches( + uint32 indexed rollupID, + uint64 numBatch, + bytes32 stateRoot, + bytes32 exitRoot, + address indexed aggregator + ); + + /** + * @dev Emitted when the trusted aggregator verifies batches + */ + event VerifyBatchesTrustedAggregator( + uint32 indexed rollupID, + uint64 numBatch, + bytes32 stateRoot, + bytes32 exitRoot, + address indexed aggregator + ); + + /** + * @dev Emitted when pending state is consolidated + */ + event ConsolidatePendingState( + uint32 indexed rollupID, + uint64 numBatch, + bytes32 stateRoot, + bytes32 exitRoot, + uint64 pendingStateNum + ); + + /** + * @dev Emitted when is proved a different state given the same batches + */ + event ProveNonDeterministicPendingState( + bytes32 storedStateRoot, + bytes32 provedStateRoot + ); + + /** + * @dev Emitted when the trusted aggregator overrides pending state + */ + event OverridePendingState( + uint32 indexed rollupID, + uint64 numBatch, + bytes32 stateRoot, + bytes32 exitRoot, + address aggregator + ); + + /** + * @dev Emitted when is updated the trusted aggregator timeout + */ + event SetTrustedAggregatorTimeout(uint64 newTrustedAggregatorTimeout); + + /** + * @dev Emitted when is updated the pending state timeout + */ + event SetPendingStateTimeout(uint64 newPendingStateTimeout); + + /** + * @dev Emitted when is updated the multiplier batch fee + */ + event SetMultiplierBatchFee(uint16 newMultiplierBatchFee); + + /** + * @dev Emitted when is updated the verify batch timeout + */ + event SetVerifyBatchTimeTarget(uint64 newVerifyBatchTimeTarget); + + /** + * @dev Emitted when is updated the trusted aggregator address + */ + event SetTrustedAggregator(address newTrustedAggregator); + + /** + * @dev Emitted when is updated the batch fee + */ + event SetBatchFee(uint256 newBatchFee); + + /** + * @param _globalExitRootManager Global exit root manager address + * @param _pol POL token address + * @param _bridgeAddress Bridge address + */ + constructor( + IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, + IERC20Upgradeable _pol, + IPolygonZkEVMBridge _bridgeAddress + ) { + globalExitRootManager = _globalExitRootManager; + pol = _pol; + bridgeAddress = _bridgeAddress; + + // Disable initalizers on the implementation following the best practices + _disableInitializers(); + } + + /** + * @param trustedAggregator Trusted aggregator address + * @param _pendingStateTimeout Pending state timeout + * @param _trustedAggregatorTimeout Trusted aggregator timeout + * @param admin Admin of the rollup manager + * @param timelock Timelock address + * @param emergencyCouncil Emergency council address + * @param polygonZkEVM New deployed Polygon zkEVM which will be initialized wiht previous values + * @param zkEVMVerifier Verifier of the new zkEVM deployed + * @param zkEVMForkID Fork id of the new zkEVM deployed + * @param zkEVMChainID Chain id of the new zkEVM deployed + */ + function initialize( + address trustedAggregator, + uint64 _pendingStateTimeout, + uint64 _trustedAggregatorTimeout, + address admin, + address timelock, + address emergencyCouncil, + PolygonZkEVMExistentEtrog polygonZkEVM, + IVerifierRollup zkEVMVerifier, + uint64 zkEVMForkID, + uint64 zkEVMChainID + ) external virtual reinitializer(2) { + pendingStateTimeout = _pendingStateTimeout; + trustedAggregatorTimeout = _trustedAggregatorTimeout; + + // Constant deployment variables + _batchFee = 0.1 ether; // 0.1 POL + verifyBatchTimeTarget = 30 minutes; + multiplierBatchFee = 1002; + + // Initialize OZ contracts + __AccessControl_init(); + + // setup roles + + // trusted aggregator role + _setupRole(_TRUSTED_AGGREGATOR_ROLE, trustedAggregator); + + // Timelock roles + _setupRole(DEFAULT_ADMIN_ROLE, timelock); + _setupRole(_ADD_ROLLUP_TYPE_ROLE, timelock); + _setupRole(_ADD_EXISTING_ROLLUP_ROLE, timelock); + + // note even this role can only update to an already added verifier/consensus + // Could break the compatibility of them, changing the virtual state + _setupRole(_UPDATE_ROLLUP_ROLE, timelock); + + // admin roles + _setupRole(_OBSOLETE_ROLLUP_TYPE_ROLE, admin); + _setupRole(_CREATE_ROLLUP_ROLE, admin); + _setupRole(_STOP_EMERGENCY_ROLE, admin); + _setupRole(_TWEAK_PARAMETERS_ROLE, admin); + + // admin should be able to update the trusted aggregator address + _setRoleAdmin(_TRUSTED_AGGREGATOR_ROLE, _TRUSTED_AGGREGATOR_ROLE_ADMIN); + _setupRole(_TRUSTED_AGGREGATOR_ROLE_ADMIN, admin); + _setupRole(_SET_FEE_ROLE, admin); + + // Emergency council roles + _setRoleAdmin(_EMERGENCY_COUNCIL_ROLE, _EMERGENCY_COUNCIL_ADMIN); + _setupRole(_EMERGENCY_COUNCIL_ROLE, emergencyCouncil); + _setupRole(_EMERGENCY_COUNCIL_ADMIN, emergencyCouncil); + + // Check last verified batch + uint64 zkEVMLastBatchSequenced = _legacylastBatchSequenced; + uint64 zkEVMLastVerifiedBatch = _legacyLastVerifiedBatch; + if (zkEVMLastBatchSequenced != zkEVMLastVerifiedBatch) { + revert AllzkEVMSequencedBatchesMustBeVerified(); + } + + // Initialize current zkEVM + RollupData storage currentZkEVM = _addExistingRollup( + IPolygonRollupBase(polygonZkEVM), + zkEVMVerifier, + zkEVMForkID, + zkEVMChainID, + 0, // Rollup compatibility ID is 0 + _legacyLastVerifiedBatch + ); + + // Copy variables from legacy + currentZkEVM.batchNumToStateRoot[ + zkEVMLastVerifiedBatch + ] = _legacyBatchNumToStateRoot[zkEVMLastVerifiedBatch]; + + // note previousLastBatchSequenced of the SequencedBatchData will be inconsistent, + // since there will not be a previous sequence stored in the sequence mapping. + // However since lastVerifiedBatch is equal to the lastBatchSequenced + // won't affect in any case + currentZkEVM.sequencedBatches[ + zkEVMLastBatchSequenced + ] = _legacySequencedBatches[zkEVMLastBatchSequenced]; + + currentZkEVM.lastBatchSequenced = zkEVMLastBatchSequenced; + currentZkEVM.lastVerifiedBatch = zkEVMLastVerifiedBatch; + currentZkEVM.lastVerifiedBatchBeforeUpgrade = zkEVMLastVerifiedBatch; + // rollupType and rollupCompatibilityID will be both 0 + + // Initialize polygon zkevm + polygonZkEVM.initializeUpgrade( + _legacyAdmin, + _legacyTrustedSequencer, + _legacyTrustedSequencerURL, + _legacyNetworkName, + _legacySequencedBatches[zkEVMLastBatchSequenced].accInputHash + ); + } + + /////////////////////////////////////// + // Rollups management functions + /////////////////////////////////////// + + /** + * @notice Add a new rollup type + * @param consensusImplementation Consensus implementation + * @param verifier Verifier address + * @param forkID ForkID of the verifier + * @param genesis Genesis block of the rollup + * @param description Description of the rollup type + */ + function addNewRollupType( + address consensusImplementation, + IVerifierRollup verifier, + uint64 forkID, + uint8 rollupCompatibilityID, + bytes32 genesis, + string memory description + ) external onlyRole(_ADD_ROLLUP_TYPE_ROLE) { + uint32 rollupTypeID = ++rollupTypeCount; + + rollupTypeMap[rollupTypeID] = RollupType({ + consensusImplementation: consensusImplementation, + verifier: verifier, + forkID: forkID, + rollupCompatibilityID: rollupCompatibilityID, + obsolete: false, + genesis: genesis + }); + + emit AddNewRollupType( + rollupTypeID, + consensusImplementation, + address(verifier), + forkID, + rollupCompatibilityID, + genesis, + description + ); + } + + /** + * @notice Obsolete Rollup type + * @param rollupTypeID Rollup type to obsolete + */ + function obsoleteRollupType( + uint32 rollupTypeID + ) external onlyRole(_OBSOLETE_ROLLUP_TYPE_ROLE) { + // Check that rollup type exists + if (rollupTypeID == 0 || rollupTypeID > rollupTypeCount) { + revert RollupTypeDoesNotExist(); + } + + // Check rollup type is not obsolete + RollupType storage currentRollupType = rollupTypeMap[rollupTypeID]; + if (currentRollupType.obsolete == true) { + revert RollupTypeObsolete(); + } + + currentRollupType.obsolete = true; + + emit ObsoleteRollupType(rollupTypeID); + } + + /** + * @notice Create a new rollup + * @param rollupTypeID Rollup type to deploy + * @param chainID ChainID of the rollup, must be a new one + * @param admin Admin of the new created rollup + * @param sequencer Sequencer of the new created rollup + * @param gasTokenAddress Indicates the token address that will be used to pay gas fees in the new rollup + * Note if a wrapped token of the bridge is used, the original network and address of this wrapped will be used instead + * @param sequencerURL Sequencer URL of the new created rollup + * @param networkName Network name of the new created rollup + */ + function createNewRollup( + uint32 rollupTypeID, + uint64 chainID, + address admin, + address sequencer, + address gasTokenAddress, + string memory sequencerURL, + string memory networkName + ) external onlyRole(_CREATE_ROLLUP_ROLE) { + // Check that rollup type exists + if (rollupTypeID == 0 || rollupTypeID > rollupTypeCount) { + revert RollupTypeDoesNotExist(); + } + + // Check rollup type is not obsolete + RollupType storage rollupType = rollupTypeMap[rollupTypeID]; + if (rollupType.obsolete == true) { + revert RollupTypeObsolete(); + } + + // Check chainID nullifier + if (chainIDToRollupID[chainID] != 0) { + revert ChainIDAlreadyExist(); + } + + // Create a new Rollup, using a transparent proxy pattern + // Consensus will be the implementation, and this contract the admin + uint32 rollupID = ++rollupCount; + address rollupAddress = address( + new PolygonTransparentProxy( + rollupType.consensusImplementation, + address(this), + new bytes(0) + ) + ); + + // Set chainID nullifier + chainIDToRollupID[chainID] = rollupID; + + // Store rollup data + rollupAddressToID[rollupAddress] = rollupID; + + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + rollup.rollupContract = IPolygonRollupBase(rollupAddress); + rollup.forkID = rollupType.forkID; + rollup.verifier = rollupType.verifier; + rollup.chainID = chainID; + rollup.batchNumToStateRoot[0] = rollupType.genesis; + rollup.rollupTypeID = rollupTypeID; + rollup.rollupCompatibilityID = rollupType.rollupCompatibilityID; + + emit CreateNewRollup( + rollupID, + rollupTypeID, + rollupAddress, + chainID, + gasTokenAddress + ); + + // Initialize new rollup + IPolygonRollupBase(rollupAddress).initialize( + admin, + sequencer, + rollupID, + gasTokenAddress, + sequencerURL, + networkName + ); + } + + /** + * @notice Add an already deployed rollup + * note that this rollup does not follow any rollupType + * @param rollupAddress Rollup address + * @param verifier Verifier address, must be added before + * @param forkID Fork id of the added rollup + * @param chainID Chain id of the added rollup + * @param genesis Genesis block for this rollup + * @param rollupCompatibilityID Compatibility ID for the added rollup + */ + function addExistingRollup( + IPolygonRollupBase rollupAddress, + IVerifierRollup verifier, + uint64 forkID, + uint64 chainID, + bytes32 genesis, + uint8 rollupCompatibilityID + ) external onlyRole(_ADD_EXISTING_ROLLUP_ROLE) { + // Check chainID nullifier + if (chainIDToRollupID[chainID] != 0) { + revert ChainIDAlreadyExist(); + } + + // Check if rollup address was already added + if (rollupAddressToID[address(rollupAddress)] != 0) { + revert RollupAddressAlreadyExist(); + } + + RollupData storage rollup = _addExistingRollup( + rollupAddress, + verifier, + forkID, + chainID, + rollupCompatibilityID, + 0 // last verified batch it's always 0 + ); + rollup.batchNumToStateRoot[0] = genesis; + } + + /** + * @notice Add an already deployed rollup + * note that this rollup does not follow any rollupType + * @param rollupAddress Rollup address + * @param verifier Verifier address, must be added before + * @param forkID Fork id of the added rollup + * @param chainID Chain id of the added rollup + * @param rollupCompatibilityID Compatibility ID for the added rollup + * @param lastVerifiedBatch Last verified batch before adding the rollup + */ + function _addExistingRollup( + IPolygonRollupBase rollupAddress, + IVerifierRollup verifier, + uint64 forkID, + uint64 chainID, + uint8 rollupCompatibilityID, + uint64 lastVerifiedBatch + ) internal returns (RollupData storage rollup) { + uint32 rollupID = ++rollupCount; + + // Set chainID nullifier + chainIDToRollupID[chainID] = rollupID; + + // Store rollup data + rollupAddressToID[address(rollupAddress)] = rollupID; + + rollup = rollupIDToRollupData[rollupID]; + rollup.rollupContract = rollupAddress; + rollup.forkID = forkID; + rollup.verifier = verifier; + rollup.chainID = chainID; + rollup.rollupCompatibilityID = rollupCompatibilityID; + // rollup type is 0, since it does not follow any rollup type + + emit AddExistingRollup( + rollupID, + forkID, + address(rollupAddress), + chainID, + rollupCompatibilityID, + lastVerifiedBatch + ); + } + + /** + * @notice Upgrade an existing rollup + * @param rollupContract Rollup consensus proxy address + * @param newRollupTypeID New rolluptypeID to upgrade to + * @param upgradeData Upgrade data + */ + function updateRollup( + ITransparentUpgradeableProxy rollupContract, + uint32 newRollupTypeID, + bytes calldata upgradeData + ) external onlyRole(_UPDATE_ROLLUP_ROLE) { + // Check that rollup type exists + if (newRollupTypeID == 0 || newRollupTypeID > rollupTypeCount) { + revert RollupTypeDoesNotExist(); + } + + // Check the rollup exists + uint32 rollupID = rollupAddressToID[address(rollupContract)]; + if (rollupID == 0) { + revert RollupMustExist(); + } + + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + // The update must be to a new rollup type + if (rollup.rollupTypeID == newRollupTypeID) { + revert UpdateToSameRollupTypeID(); + } + + RollupType storage newRollupType = rollupTypeMap[newRollupTypeID]; + + // Check rollup type is not obsolete + if (newRollupType.obsolete == true) { + revert RollupTypeObsolete(); + } + + // Check compatibility of the rollups + if ( + rollup.rollupCompatibilityID != newRollupType.rollupCompatibilityID + ) { + revert UpdateNotCompatible(); + } + + // Update rollup parameters + rollup.verifier = newRollupType.verifier; + rollup.forkID = newRollupType.forkID; + rollup.rollupTypeID = newRollupTypeID; + + uint64 lastVerifiedBatch = getLastVerifiedBatch(rollupID); + rollup.lastVerifiedBatchBeforeUpgrade = lastVerifiedBatch; + + // Upgrade rollup + rollupContract.upgradeToAndCall( + newRollupType.consensusImplementation, + upgradeData + ); + + emit UpdateRollup(rollupID, newRollupTypeID, lastVerifiedBatch); + } + + ///////////////////////////////////// + // Sequence/Verify batches functions + //////////////////////////////////// + + /** + * @notice Sequence batches, callback called by one of the consensus managed by this contract + * @param newSequencedBatches Number of batches sequenced + * @param newAccInputHash New accumulate input hash + */ + function onSequenceBatches( + uint64 newSequencedBatches, + bytes32 newAccInputHash + ) external ifNotEmergencyState returns (uint64) { + // Check that the msg.sender is an added rollup + uint32 rollupID = rollupAddressToID[msg.sender]; + if (rollupID == 0) { + revert SenderMustBeRollup(); + } + + // This prevents overwritting sequencedBatches + if (newSequencedBatches == 0) { + revert MustSequenceSomeBatch(); + } + + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + // Update total sequence parameters + totalSequencedBatches += newSequencedBatches; + + // Update sequenced batches of the current rollup + uint64 previousLastBatchSequenced = rollup.lastBatchSequenced; + uint64 newLastBatchSequenced = previousLastBatchSequenced + + newSequencedBatches; + + rollup.lastBatchSequenced = newLastBatchSequenced; + rollup.sequencedBatches[newLastBatchSequenced] = SequencedBatchData({ + accInputHash: newAccInputHash, + sequencedTimestamp: uint64(block.timestamp), + previousLastBatchSequenced: previousLastBatchSequenced + }); + + // Consolidate pending state if possible + _tryConsolidatePendingState(rollup); + + emit OnSequenceBatches(rollupID, newLastBatchSequenced); + + return newLastBatchSequenced; + } + + /** + * @notice Allows an aggregator to verify multiple batches + * @param rollupID Rollup identifier + * @param pendingStateNum Init pending state, 0 if consolidated state is used + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param newStateRoot New State root once the batch is processed + * @param beneficiary Address that will receive the verification reward + * @param proof Fflonk proof + */ + function verifyBatches( + uint32 rollupID, + uint64 pendingStateNum, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + address beneficiary, + bytes32[24] calldata proof + ) external ifNotEmergencyState { + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + // Check if the trusted aggregator timeout expired, + // Note that the sequencedBatches struct must exists for this finalNewBatch, if not newAccInputHash will be 0 + if ( + rollup.sequencedBatches[finalNewBatch].sequencedTimestamp + + trustedAggregatorTimeout > + block.timestamp + ) { + revert TrustedAggregatorTimeoutNotExpired(); + } + + if (finalNewBatch - initNumBatch > _MAX_VERIFY_BATCHES) { + revert ExceedMaxVerifyBatches(); + } + + _verifyAndRewardBatches( + rollup, + pendingStateNum, + initNumBatch, + finalNewBatch, + newLocalExitRoot, + newStateRoot, + beneficiary, + proof + ); + + // Update batch fees + _updateBatchFee(rollup, finalNewBatch); + + if (pendingStateTimeout == 0) { + // Consolidate state + rollup.lastVerifiedBatch = finalNewBatch; + rollup.batchNumToStateRoot[finalNewBatch] = newStateRoot; + rollup.lastLocalExitRoot = newLocalExitRoot; + + // Clean pending state if any + if (rollup.lastPendingState > 0) { + rollup.lastPendingState = 0; + rollup.lastPendingStateConsolidated = 0; + } + + // Interact with globalExitRootManager + globalExitRootManager.updateExitRoot(getRollupExitRoot()); + } else { + // Consolidate pending state if possible + _tryConsolidatePendingState(rollup); + + // Update pending state + rollup.lastPendingState++; + rollup.pendingStateTransitions[ + rollup.lastPendingState + ] = PendingState({ + timestamp: uint64(block.timestamp), + lastVerifiedBatch: finalNewBatch, + exitRoot: newLocalExitRoot, + stateRoot: newStateRoot + }); + } + + emit VerifyBatches( + rollupID, + finalNewBatch, + newStateRoot, + newLocalExitRoot, + msg.sender + ); + } + + /** + * @notice Allows a trusted aggregator to verify multiple batches + * @param rollupID Rollup identifier + * @param pendingStateNum Init pending state, 0 if consolidated state is used + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param newStateRoot New State root once the batch is processed + * @param beneficiary Address that will receive the verification reward + * @param proof Fflonk proof + */ + function verifyBatchesTrustedAggregator( + uint32 rollupID, + uint64 pendingStateNum, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + address beneficiary, + bytes32[24] calldata proof + ) external onlyRole(_TRUSTED_AGGREGATOR_ROLE) { + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + _verifyAndRewardBatches( + rollup, + pendingStateNum, + initNumBatch, + finalNewBatch, + newLocalExitRoot, + newStateRoot, + beneficiary, + proof + ); + + // Consolidate state + rollup.lastVerifiedBatch = finalNewBatch; + rollup.batchNumToStateRoot[finalNewBatch] = newStateRoot; + rollup.lastLocalExitRoot = newLocalExitRoot; + + // Clean pending state if any + if (rollup.lastPendingState > 0) { + rollup.lastPendingState = 0; + rollup.lastPendingStateConsolidated = 0; + } + + // Interact with globalExitRootManager + globalExitRootManager.updateExitRoot(getRollupExitRoot()); + + emit VerifyBatchesTrustedAggregator( + rollupID, + finalNewBatch, + newStateRoot, + newLocalExitRoot, + msg.sender + ); + } + + /** + * @notice Verify and reward batches internal function + * @param rollup Rollup Data storage pointer that will be used to the verification + * @param pendingStateNum Init pending state, 0 if consolidated state is used + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param newStateRoot New State root once the batch is processed + * @param beneficiary Address that will receive the verification reward + * @param proof Fflonk proof + */ + function _verifyAndRewardBatches( + RollupData storage rollup, + uint64 pendingStateNum, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + address beneficiary, + bytes32[24] calldata proof + ) internal virtual { + bytes32 oldStateRoot; + uint64 currentLastVerifiedBatch = _getLastVerifiedBatch(rollup); + + if (initNumBatch < rollup.lastVerifiedBatchBeforeUpgrade) { + revert InitBatchMustMatchCurrentForkID(); + } + + // Use pending state if specified, otherwise use consolidated state + if (pendingStateNum != 0) { + // Check that pending state exist + // Already consolidated pending states can be used aswell + if (pendingStateNum > rollup.lastPendingState) { + revert PendingStateDoesNotExist(); + } + + // Check choosen pending state + PendingState storage currentPendingState = rollup + .pendingStateTransitions[pendingStateNum]; + + // Get oldStateRoot from pending batch + oldStateRoot = currentPendingState.stateRoot; + + // Check initNumBatch matches the pending state + if (initNumBatch != currentPendingState.lastVerifiedBatch) { + revert InitNumBatchDoesNotMatchPendingState(); + } + } else { + // Use consolidated state + oldStateRoot = rollup.batchNumToStateRoot[initNumBatch]; + + if (oldStateRoot == bytes32(0)) { + revert OldStateRootDoesNotExist(); + } + + // Check initNumBatch is inside the range, sanity check + if (initNumBatch > currentLastVerifiedBatch) { + revert InitNumBatchAboveLastVerifiedBatch(); + } + } + + // Check final batch + if (finalNewBatch <= currentLastVerifiedBatch) { + revert FinalNumBatchBelowLastVerifiedBatch(); + } + + // Get snark bytes + bytes memory snarkHashBytes = _getInputSnarkBytes( + rollup, + initNumBatch, + finalNewBatch, + newLocalExitRoot, + oldStateRoot, + newStateRoot + ); + + // Calulate the snark input + uint256 inputSnark = uint256(sha256(snarkHashBytes)) % _RFIELD; + + // Verify proof + if (!rollup.verifier.verifyProof(proof, [inputSnark])) { + revert InvalidProof(); + } + + // Pay POL rewards + uint64 newVerifiedBatches = finalNewBatch - currentLastVerifiedBatch; + + pol.safeTransfer( + beneficiary, + calculateRewardPerBatch() * newVerifiedBatches + ); + + // Update aggregation parameters + totalVerifiedBatches += newVerifiedBatches; + lastAggregationTimestamp = uint64(block.timestamp); + + // Callback to the rollup address + rollup.rollupContract.onVerifyBatches( + finalNewBatch, + newStateRoot, + msg.sender + ); + } + + /** + * @notice Internal function to consolidate the state automatically once sequence or verify batches are called + * It tries to consolidate the first and the middle pending state in the queue + */ + function _tryConsolidatePendingState(RollupData storage rollup) internal { + // Check if there's any state to consolidate + if (rollup.lastPendingState > rollup.lastPendingStateConsolidated) { + // Check if it's possible to consolidate the next pending state + uint64 nextPendingState = rollup.lastPendingStateConsolidated + 1; + if (_isPendingStateConsolidable(rollup, nextPendingState)) { + // Check middle pending state ( binary search of 1 step) + uint64 middlePendingState = nextPendingState + + (rollup.lastPendingState - nextPendingState) / + 2; + + // Try to consolidate it, and if not, consolidate the nextPendingState + if (_isPendingStateConsolidable(rollup, middlePendingState)) { + _consolidatePendingState(rollup, middlePendingState); + } else { + _consolidatePendingState(rollup, nextPendingState); + } + } + } + } + + /** + * @notice Allows to consolidate any pending state that has already exceed the pendingStateTimeout + * Can be called by the trusted aggregator, which can consolidate any state without the timeout restrictions + * @param rollupID Rollup identifier + * @param pendingStateNum Pending state to consolidate + */ + function consolidatePendingState( + uint32 rollupID, + uint64 pendingStateNum + ) external { + RollupData storage rollup = rollupIDToRollupData[rollupID]; + // Check if pending state can be consolidated + // If trusted aggregator is the sender, do not check the timeout or the emergency state + if (!hasRole(_TRUSTED_AGGREGATOR_ROLE, msg.sender)) { + if (isEmergencyState) { + revert OnlyNotEmergencyState(); + } + + if (!_isPendingStateConsolidable(rollup, pendingStateNum)) { + revert PendingStateNotConsolidable(); + } + } + _consolidatePendingState(rollup, pendingStateNum); + } + + /** + * @notice Internal function to consolidate any pending state that has already exceed the pendingStateTimeout + * @param rollup Rollup data storage pointer + * @param pendingStateNum Pending state to consolidate + */ + function _consolidatePendingState( + RollupData storage rollup, + uint64 pendingStateNum + ) internal { + // Check if pendingStateNum is in correct range + // - not consolidated (implicity checks that is not 0) + // - exist ( has been added) + if ( + pendingStateNum <= rollup.lastPendingStateConsolidated || + pendingStateNum > rollup.lastPendingState + ) { + revert PendingStateInvalid(); + } + + PendingState storage currentPendingState = rollup + .pendingStateTransitions[pendingStateNum]; + + // Update state + uint64 newLastVerifiedBatch = currentPendingState.lastVerifiedBatch; + rollup.lastVerifiedBatch = newLastVerifiedBatch; + rollup.batchNumToStateRoot[newLastVerifiedBatch] = currentPendingState + .stateRoot; + rollup.lastLocalExitRoot = currentPendingState.exitRoot; + + // Update pending state + rollup.lastPendingStateConsolidated = pendingStateNum; + + // Interact with globalExitRootManager + globalExitRootManager.updateExitRoot(getRollupExitRoot()); + + emit ConsolidatePendingState( + rollupAddressToID[address(rollup.rollupContract)], + newLastVerifiedBatch, + currentPendingState.stateRoot, + currentPendingState.exitRoot, + pendingStateNum + ); + } + + ///////////////////////////////// + // Soundness protection functions + ///////////////////////////////// + + /** + * @notice Allows the trusted aggregator to override the pending state + * if it's possible to prove a different state root given the same batches + * @param rollupID Rollup identifier + * @param initPendingStateNum Init pending state, 0 if consolidated state is used + * @param finalPendingStateNum Final pending state, that will be used to compare with the newStateRoot + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param newStateRoot New State root once the batch is processed + * @param proof Fflonk proof + */ + function overridePendingState( + uint32 rollupID, + uint64 initPendingStateNum, + uint64 finalPendingStateNum, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + bytes32[24] calldata proof + ) external onlyRole(_TRUSTED_AGGREGATOR_ROLE) { + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + _proveDistinctPendingState( + rollup, + initPendingStateNum, + finalPendingStateNum, + initNumBatch, + finalNewBatch, + newLocalExitRoot, + newStateRoot, + proof + ); + + // Consolidate state + rollup.lastVerifiedBatch = finalNewBatch; + rollup.batchNumToStateRoot[finalNewBatch] = newStateRoot; + rollup.lastLocalExitRoot = newLocalExitRoot; + + // Clean pending state if any + if (rollup.lastPendingState > 0) { + rollup.lastPendingState = 0; + rollup.lastPendingStateConsolidated = 0; + } + + // Interact with globalExitRootManager + globalExitRootManager.updateExitRoot(getRollupExitRoot()); + + // Update trusted aggregator timeout to max + trustedAggregatorTimeout = _HALT_AGGREGATION_TIMEOUT; + + emit OverridePendingState( + rollupID, + finalNewBatch, + newStateRoot, + newLocalExitRoot, + msg.sender + ); + } + + /** + * @notice Allows activate the emergency state if its possible to prove a different state root given the same batches + * @param rollupID Rollup identifier + * @param initPendingStateNum Init pending state, 0 if consolidated state is used + * @param finalPendingStateNum Final pending state, that will be used to compare with the newStateRoot + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param newStateRoot New State root once the batch is processed + * @param proof Fflonk proof + */ + function proveNonDeterministicPendingState( + uint32 rollupID, + uint64 initPendingStateNum, + uint64 finalPendingStateNum, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + bytes32[24] calldata proof + ) external ifNotEmergencyState { + RollupData storage rollup = rollupIDToRollupData[rollupID]; + + _proveDistinctPendingState( + rollup, + initPendingStateNum, + finalPendingStateNum, + initNumBatch, + finalNewBatch, + newLocalExitRoot, + newStateRoot, + proof + ); + + emit ProveNonDeterministicPendingState( + rollup.pendingStateTransitions[finalPendingStateNum].stateRoot, + newStateRoot + ); + + // Activate emergency state + _activateEmergencyState(); + } + + /** + * @notice Internal function that proves a different state root given the same batches to verify + * @param rollup Rollup Data struct that will be checked + * @param initPendingStateNum Init pending state, 0 if consolidated state is used + * @param finalPendingStateNum Final pending state, that will be used to compare with the newStateRoot + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param newStateRoot New State root once the batch is processed + * @param proof Fflonk proof + */ + function _proveDistinctPendingState( + RollupData storage rollup, + uint64 initPendingStateNum, + uint64 finalPendingStateNum, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 newStateRoot, + bytes32[24] calldata proof + ) internal view virtual { + bytes32 oldStateRoot; + + if (initNumBatch < rollup.lastVerifiedBatchBeforeUpgrade) { + revert InitBatchMustMatchCurrentForkID(); + } + + // Use pending state if specified, otherwise use consolidated state + if (initPendingStateNum != 0) { + // Check that pending state exist + // Already consolidated pending states can be used aswell + if (initPendingStateNum > rollup.lastPendingState) { + revert PendingStateDoesNotExist(); + } + + // Check choosen pending state + PendingState storage initPendingState = rollup + .pendingStateTransitions[initPendingStateNum]; + + // Get oldStateRoot from init pending state + oldStateRoot = initPendingState.stateRoot; + + // Check initNumBatch matches the init pending state + if (initNumBatch != initPendingState.lastVerifiedBatch) { + revert InitNumBatchDoesNotMatchPendingState(); + } + } else { + // Use consolidated state + oldStateRoot = rollup.batchNumToStateRoot[initNumBatch]; + if (oldStateRoot == bytes32(0)) { + revert OldStateRootDoesNotExist(); + } + + // Check initNumBatch is inside the range, sanity check + if (initNumBatch > rollup.lastVerifiedBatch) { + revert InitNumBatchAboveLastVerifiedBatch(); + } + } + + // Assert final pending state num is in correct range + // - exist ( has been added) + // - bigger than the initPendingstate + // - not consolidated + if ( + finalPendingStateNum > rollup.lastPendingState || + finalPendingStateNum <= initPendingStateNum || + finalPendingStateNum <= rollup.lastPendingStateConsolidated + ) { + revert FinalPendingStateNumInvalid(); + } + + // Check final num batch + if ( + finalNewBatch != + rollup + .pendingStateTransitions[finalPendingStateNum] + .lastVerifiedBatch + ) { + revert FinalNumBatchDoesNotMatchPendingState(); + } + + // Get snark bytes + bytes memory snarkHashBytes = _getInputSnarkBytes( + rollup, + initNumBatch, + finalNewBatch, + newLocalExitRoot, + oldStateRoot, + newStateRoot + ); + + // Calulate the snark input + uint256 inputSnark = uint256(sha256(snarkHashBytes)) % _RFIELD; + + // Verify proof + if (!rollup.verifier.verifyProof(proof, [inputSnark])) { + revert InvalidProof(); + } + + if ( + rollup.pendingStateTransitions[finalPendingStateNum].stateRoot == + newStateRoot + ) { + revert StoredRootMustBeDifferentThanNewRoot(); + } + } + + /** + * @notice Function to update the batch fee based on the new verified batches + * The batch fee will not be updated when the trusted aggregator verifies batches + * @param newLastVerifiedBatch New last verified batch + */ + function _updateBatchFee( + RollupData storage rollup, + uint64 newLastVerifiedBatch + ) internal { + uint64 currentLastVerifiedBatch = _getLastVerifiedBatch(rollup); + uint64 currentBatch = newLastVerifiedBatch; + + uint256 totalBatchesAboveTarget; + uint256 newBatchesVerified = newLastVerifiedBatch - + currentLastVerifiedBatch; + + uint256 targetTimestamp = block.timestamp - verifyBatchTimeTarget; + + while (currentBatch != currentLastVerifiedBatch) { + // Load sequenced batchdata + SequencedBatchData storage currentSequencedBatchData = rollup + .sequencedBatches[currentBatch]; + + // Check if timestamp is below the verifyBatchTimeTarget + if ( + targetTimestamp < currentSequencedBatchData.sequencedTimestamp + ) { + // update currentBatch + currentBatch = currentSequencedBatchData + .previousLastBatchSequenced; + } else { + // The rest of batches will be above + totalBatchesAboveTarget = + currentBatch - + currentLastVerifiedBatch; + break; + } + } + + uint256 totalBatchesBelowTarget = newBatchesVerified - + totalBatchesAboveTarget; + + // _MAX_BATCH_FEE --> (< 70 bits) + // multiplierBatchFee --> (< 10 bits) + // _MAX_BATCH_MULTIPLIER = 12 + // multiplierBatchFee ** _MAX_BATCH_MULTIPLIER --> (< 128 bits) + // batchFee * (multiplierBatchFee ** _MAX_BATCH_MULTIPLIER)--> + // (< 70 bits) * (< 128 bits) = < 256 bits + + // Since all the following operations cannot overflow, we can optimize this operations with unchecked + unchecked { + if (totalBatchesBelowTarget < totalBatchesAboveTarget) { + // There are more batches above target, fee is multiplied + uint256 diffBatches = totalBatchesAboveTarget - + totalBatchesBelowTarget; + + diffBatches = diffBatches > _MAX_BATCH_MULTIPLIER + ? _MAX_BATCH_MULTIPLIER + : diffBatches; + + // For every multiplierBatchFee multiplication we must shift 3 zeroes since we have 3 decimals + _batchFee = + (_batchFee * (uint256(multiplierBatchFee) ** diffBatches)) / + (uint256(1000) ** diffBatches); + } else { + // There are more batches below target, fee is divided + uint256 diffBatches = totalBatchesBelowTarget - + totalBatchesAboveTarget; + + diffBatches = diffBatches > _MAX_BATCH_MULTIPLIER + ? _MAX_BATCH_MULTIPLIER + : diffBatches; + + // For every multiplierBatchFee multiplication we must shift 3 zeroes since we have 3 decimals + uint256 accDivisor = (uint256(1 ether) * + (uint256(multiplierBatchFee) ** diffBatches)) / + (uint256(1000) ** diffBatches); + + // multiplyFactor = multiplierBatchFee ** diffBatches / 10 ** (diffBatches * 3) + // accDivisor = 1E18 * multiplyFactor + // 1E18 * batchFee / accDivisor = batchFee / multiplyFactor + // < 60 bits * < 70 bits / ~60 bits --> overflow not possible + _batchFee = (uint256(1 ether) * _batchFee) / accDivisor; + } + } + + // Batch fee must remain inside a range + if (_batchFee > _MAX_BATCH_FEE) { + _batchFee = _MAX_BATCH_FEE; + } else if (_batchFee < _MIN_BATCH_FEE) { + _batchFee = _MIN_BATCH_FEE; + } + } + + //////////////////////// + // Emergency state functions + //////////////////////// + + /** + * @notice Function to activate emergency state, which also enables the emergency mode on both PolygonRollupManager and PolygonZkEVMBridge contracts + * If not called by the owner must not have been aggregated in a _HALT_AGGREGATION_TIMEOUT period and an emergency state was not happened in the same period + */ + function activateEmergencyState() external { + if (!hasRole(_EMERGENCY_COUNCIL_ROLE, msg.sender)) { + if ( + lastAggregationTimestamp == 0 || + lastAggregationTimestamp + _HALT_AGGREGATION_TIMEOUT > + block.timestamp || + lastDeactivatedEmergencyStateTimestamp + + _HALT_AGGREGATION_TIMEOUT > + block.timestamp + ) { + revert HaltTimeoutNotExpired(); + } + } + _activateEmergencyState(); + } + + /** + * @notice Function to deactivate emergency state on both PolygonRollupManager and PolygonZkEVMBridge contracts + */ + function deactivateEmergencyState() + external + onlyRole(_STOP_EMERGENCY_ROLE) + { + // Set last deactivated emergency state + lastDeactivatedEmergencyStateTimestamp = uint64(block.timestamp); + + // Deactivate emergency state on PolygonZkEVMBridge + bridgeAddress.deactivateEmergencyState(); + + // Deactivate emergency state on this contract + super._deactivateEmergencyState(); + } + + /** + * @notice Internal function to activate emergency state on both PolygonRollupManager and PolygonZkEVMBridge contracts + */ + function _activateEmergencyState() internal override { + // Activate emergency state on PolygonZkEVM Bridge + bridgeAddress.activateEmergencyState(); + + // Activate emergency state on this contract + super._activateEmergencyState(); + } + + ////////////////// + // Setter functions + ////////////////// + + /** + * @notice Set a new pending state timeout + * The timeout can only be lowered, except if emergency state is active + * @param newTrustedAggregatorTimeout Trusted aggregator timeout + */ + function setTrustedAggregatorTimeout( + uint64 newTrustedAggregatorTimeout + ) external onlyRole(_TWEAK_PARAMETERS_ROLE) { + if (!isEmergencyState) { + if (newTrustedAggregatorTimeout >= trustedAggregatorTimeout) { + revert NewTrustedAggregatorTimeoutMustBeLower(); + } + } + + trustedAggregatorTimeout = newTrustedAggregatorTimeout; + emit SetTrustedAggregatorTimeout(newTrustedAggregatorTimeout); + } + + /** + * @notice Set a new trusted aggregator timeout + * The timeout can only be lowered, except if emergency state is active + * @param newPendingStateTimeout Trusted aggregator timeout + */ + function setPendingStateTimeout( + uint64 newPendingStateTimeout + ) external onlyRole(_TWEAK_PARAMETERS_ROLE) { + if (!isEmergencyState) { + if (newPendingStateTimeout >= pendingStateTimeout) { + revert NewPendingStateTimeoutMustBeLower(); + } + } + + pendingStateTimeout = newPendingStateTimeout; + emit SetPendingStateTimeout(newPendingStateTimeout); + } + + /** + * @notice Set a new multiplier batch fee + * @param newMultiplierBatchFee multiplier batch fee + */ + function setMultiplierBatchFee( + uint16 newMultiplierBatchFee + ) external onlyRole(_TWEAK_PARAMETERS_ROLE) { + if (newMultiplierBatchFee < 1000 || newMultiplierBatchFee > 1023) { + revert InvalidRangeMultiplierBatchFee(); + } + + multiplierBatchFee = newMultiplierBatchFee; + emit SetMultiplierBatchFee(newMultiplierBatchFee); + } + + /** + * @notice Set a new verify batch time target + * This value will only be relevant once the aggregation is decentralized, so + * the trustedAggregatorTimeout should be zero or very close to zero + * @param newVerifyBatchTimeTarget Verify batch time target + */ + function setVerifyBatchTimeTarget( + uint64 newVerifyBatchTimeTarget + ) external onlyRole(_TWEAK_PARAMETERS_ROLE) { + if (newVerifyBatchTimeTarget > 1 days) { + revert InvalidRangeBatchTimeTarget(); + } + verifyBatchTimeTarget = newVerifyBatchTimeTarget; + emit SetVerifyBatchTimeTarget(newVerifyBatchTimeTarget); + } + + /** + * @notice Set the current batch fee + * @param newBatchFee new batch fee + */ + function setBatchFee(uint256 newBatchFee) external onlyRole(_SET_FEE_ROLE) { + // check fees min and max + if (newBatchFee > _MAX_BATCH_FEE || newBatchFee < _MIN_BATCH_FEE) { + revert BatchFeeOutOfRange(); + } + _batchFee = newBatchFee; + emit SetBatchFee(newBatchFee); + } + + //////////////////////// + // view/pure functions + /////////////////////// + + /** + * @notice Get the current rollup exit root + * Compute using all the local exit roots of all rollups the rollup exit root + * Since it's expected to have no more than 10 rollups in this first version, even if this approach + * has a gas consumption that scales linearly with the rollups added, it's ok + * In a future versions this computation will be done inside the circuit + */ + function getRollupExitRoot() public view returns (bytes32) { + uint256 currentNodes = rollupCount; + + // If there are no nodes return 0 + if (currentNodes == 0) { + return bytes32(0); + } + + // This array will contain the nodes of the current iteration + bytes32[] memory tmpTree = new bytes32[](currentNodes); + + // In the first iteration the nodes will be the leafs which are the local exit roots of each network + for (uint256 i = 0; i < currentNodes; i++) { + // The first rollup ID starts on 1 + tmpTree[i] = rollupIDToRollupData[uint32(i + 1)].lastLocalExitRoot; + } + + // This variable will keep track of the zero hashes + bytes32 currentZeroHashHeight = 0; + + // This variable will keep track of the reamining levels to compute + uint256 remainingLevels = _EXIT_TREE_DEPTH; + + // Calculate the root of the sub-tree that contains all the localExitRoots + while (currentNodes != 1) { + uint256 nextIterationNodes = currentNodes / 2 + (currentNodes % 2); + bytes32[] memory nextTmpTree = new bytes32[](nextIterationNodes); + for (uint256 i = 0; i < nextIterationNodes; i++) { + // if we are on the last iteration of the current level and the nodes are odd + if (i == nextIterationNodes - 1 && (currentNodes % 2) == 1) { + nextTmpTree[i] = keccak256( + abi.encodePacked(tmpTree[i * 2], currentZeroHashHeight) + ); + } else { + nextTmpTree[i] = keccak256( + abi.encodePacked(tmpTree[i * 2], tmpTree[(i * 2) + 1]) + ); + } + } + + // Update tree variables + tmpTree = nextTmpTree; + currentNodes = nextIterationNodes; + currentZeroHashHeight = keccak256( + abi.encodePacked(currentZeroHashHeight, currentZeroHashHeight) + ); + remainingLevels--; + } + + bytes32 currentRoot = tmpTree[0]; + + // Calculate remaining levels, since it's a sequencial merkle tree, the rest of the tree are zeroes + for (uint256 i = 0; i < remainingLevels; i++) { + currentRoot = keccak256( + abi.encodePacked(currentRoot, currentZeroHashHeight) + ); + currentZeroHashHeight = keccak256( + abi.encodePacked(currentZeroHashHeight, currentZeroHashHeight) + ); + } + return currentRoot; + } + + /** + * @notice Get the last verified batch + */ + function getLastVerifiedBatch( + uint32 rollupID + ) public view returns (uint64) { + return _getLastVerifiedBatch(rollupIDToRollupData[rollupID]); + } + + /** + * @notice Get the last verified batch + */ + function _getLastVerifiedBatch( + RollupData storage rollup + ) internal view returns (uint64) { + if (rollup.lastPendingState > 0) { + return + rollup + .pendingStateTransitions[rollup.lastPendingState] + .lastVerifiedBatch; + } else { + return rollup.lastVerifiedBatch; + } + } + + /** + * @notice Returns a boolean that indicates if the pendingStateNum is or not consolidable + * @param rollupID Rollup id + * @param pendingStateNum Pending state number to check + * Note that his function does not check if the pending state currently exists, or if it's consolidated already + */ + function isPendingStateConsolidable( + uint32 rollupID, + uint64 pendingStateNum + ) public view returns (bool) { + return + _isPendingStateConsolidable( + rollupIDToRollupData[rollupID], + pendingStateNum + ); + } + + /** + * @notice Returns a boolean that indicates if the pendingStateNum is or not consolidable + * @param rollup Rollup data storage pointer + * @param pendingStateNum Pending state number to check + * Note that his function does not check if the pending state currently exists, or if it's consolidated already + */ + function _isPendingStateConsolidable( + RollupData storage rollup, + uint64 pendingStateNum + ) internal view returns (bool) { + return (rollup.pendingStateTransitions[pendingStateNum].timestamp + + pendingStateTimeout <= + block.timestamp); + } + + /** + * @notice Function to calculate the reward to verify a single batch + */ + function calculateRewardPerBatch() public view returns (uint256) { + uint256 currentBalance = pol.balanceOf(address(this)); + + // Total Batches to be verified = total Sequenced Batches - total verified Batches + uint256 totalBatchesToVerify = totalSequencedBatches - + totalVerifiedBatches; + + if (totalBatchesToVerify == 0) return 0; + return currentBalance / totalBatchesToVerify; + } + + /** + * @notice Get batch fee + * This function is used instad of the automatic public view one, + * because in a future might change the behaviour and we will be able to mantain the interface + */ + function getBatchFee() public view returns (uint256) { + return _batchFee; + } + + /** + * @notice Get forced batch fee + */ + function getForcedBatchFee() public view returns (uint256) { + return _batchFee * 100; + } + + /** + * @notice Function to calculate the input snark bytes + * @param rollupID Rollup id used to calculate the input snark bytes + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param oldStateRoot State root before batch is processed + * @param newStateRoot New State root once the batch is processed + */ + function getInputSnarkBytes( + uint32 rollupID, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 oldStateRoot, + bytes32 newStateRoot + ) public view returns (bytes memory) { + return + _getInputSnarkBytes( + rollupIDToRollupData[rollupID], + initNumBatch, + finalNewBatch, + newLocalExitRoot, + oldStateRoot, + newStateRoot + ); + } + + /** + * @notice Function to calculate the input snark bytes + * @param rollup Rollup data storage pointer + * @param initNumBatch Batch which the aggregator starts the verification + * @param finalNewBatch Last batch aggregator intends to verify + * @param newLocalExitRoot New local exit root once the batch is processed + * @param oldStateRoot State root before batch is processed + * @param newStateRoot New State root once the batch is processed + */ + function _getInputSnarkBytes( + RollupData storage rollup, + uint64 initNumBatch, + uint64 finalNewBatch, + bytes32 newLocalExitRoot, + bytes32 oldStateRoot, + bytes32 newStateRoot + ) internal view returns (bytes memory) { + // Sanity check + bytes32 oldAccInputHash = rollup + .sequencedBatches[initNumBatch] + .accInputHash; + + bytes32 newAccInputHash = rollup + .sequencedBatches[finalNewBatch] + .accInputHash; + + // Sanity check + if (initNumBatch != 0 && oldAccInputHash == bytes32(0)) { + revert OldAccInputHashDoesNotExist(); + } + + if (newAccInputHash == bytes32(0)) { + revert NewAccInputHashDoesNotExist(); + } + + // Check that new state root is inside goldilocks field + if (!_checkStateRootInsidePrime(uint256(newStateRoot))) { + revert NewStateRootNotInsidePrime(); + } + + return + abi.encodePacked( + msg.sender, + oldStateRoot, + oldAccInputHash, + initNumBatch, + rollup.chainID, + rollup.forkID, + newStateRoot, + newAccInputHash, + newLocalExitRoot, + finalNewBatch + ); + } + + /** + * @notice Function to check if the state root is inside of the prime field + * @param newStateRoot New State root once the batch is processed + */ + function _checkStateRootInsidePrime( + uint256 newStateRoot + ) internal pure returns (bool) { + if ( + ((newStateRoot & _MAX_UINT_64) < _GOLDILOCKS_PRIME_FIELD) && + (((newStateRoot >> 64) & _MAX_UINT_64) < _GOLDILOCKS_PRIME_FIELD) && + (((newStateRoot >> 128) & _MAX_UINT_64) < + _GOLDILOCKS_PRIME_FIELD) && + ((newStateRoot >> 192) < _GOLDILOCKS_PRIME_FIELD) + ) { + return true; + } else { + return false; + } + } + + /** + * @notice Get rollup state root given a batch number + * @param rollupID Rollup identifier + * @param batchNum Batch number + */ + function getRollupBatchNumToStateRoot( + uint32 rollupID, + uint64 batchNum + ) public view returns (bytes32) { + return rollupIDToRollupData[rollupID].batchNumToStateRoot[batchNum]; + } + + /** + * @notice Get rollup sequence batches struct given a batch number + * @param rollupID Rollup identifier + * @param batchNum Batch number + */ + function getRollupSequencedBatches( + uint32 rollupID, + uint64 batchNum + ) public view returns (SequencedBatchData memory) { + return rollupIDToRollupData[rollupID].sequencedBatches[batchNum]; + } + + /** + * @notice Get rollup sequence pending state struct given a batch number + * @param rollupID Rollup identifier + * @param batchNum Batch number + */ + function getRollupPendingStateTransitions( + uint32 rollupID, + uint64 batchNum + ) public view returns (PendingState memory) { + return rollupIDToRollupData[rollupID].pendingStateTransitions[batchNum]; + } +} diff --git a/contracts/v2/previousVersions/PolygonValidiumEtrogPrevious.sol b/contracts/v2/previousVersions/PolygonValidiumEtrogPrevious.sol index 01a2d9f4b..73ae05fd2 100644 --- a/contracts/v2/previousVersions/PolygonValidiumEtrogPrevious.sol +++ b/contracts/v2/previousVersions/PolygonValidiumEtrogPrevious.sol @@ -1,282 +1,314 @@ -// // SPDX-License-Identifier: AGPL-3.0 -// pragma solidity 0.8.20; - -// import "./PolygonRollupBaseEtrogPrevious.sol"; -// import "../interfaces/IDataAvailabilityProtocol.sol"; -// import "../interfaces/IPolygonValidium.sol"; - -// /** -// * Contract responsible for managing the states and the updates of L2 network. -// * There will be a trusted sequencer, which is able to send transactions. -// * Any user can force some transaction and the sequencer will have a timeout to add them in the queue. -// * The sequenced state is deterministic and can be precalculated before it's actually verified by a zkProof. -// * The aggregators will be able to verify the sequenced state with zkProofs and therefore make available the withdrawals from L2 network. -// * To enter and exit of the L2 network will be used a PolygonZkEVMBridge smart contract that will be deployed in both networks. -// * It is advised to use timelocks for the admin address in case of Validium since if can change the dataAvailabilityProtocol -// */ -// contract PolygonValidiumEtrogPrevious is -// PolygonRollupBaseEtrogPrevious, -// IPolygonValidium -// { -// using SafeERC20Upgradeable for IERC20Upgradeable; - -// /** -// * @notice Struct which will be used to call sequenceBatches -// * @param transactionsHash keccak256 hash of the L2 ethereum transactions EIP-155 or pre-EIP-155 with signature: -// * EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data, chainid, 0, 0,) || v || r || s -// * pre-EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data) || v || r || s -// * @param forcedGlobalExitRoot Global exit root, empty when sequencing a non forced batch -// * @param forcedTimestamp Minimum timestamp of the force batch data, empty when sequencing a non forced batch -// * @param forcedBlockHashL1 blockHash snapshot of the force batch data, empty when sequencing a non forced batch -// */ -// struct ValidiumBatchData { -// bytes32 transactionsHash; -// bytes32 forcedGlobalExitRoot; -// uint64 forcedTimestamp; -// bytes32 forcedBlockHashL1; -// } - -// // Data Availability Protocol Address -// IDataAvailabilityProtocol public dataAvailabilityProtocol; - -// // Indicates if sequence with data avialability is allowed -// // This allow the sequencer to post the data and skip the Data comittee -// bool public isSequenceWithDataAvailabilityAllowed; - -// /** -// * @dev Emitted when the admin updates the data availability protocol -// */ -// event SetDataAvailabilityProtocol(address newDataAvailabilityProtocol); - -// /** -// * @dev Emitted when switch the ability to sequence with data availability -// */ -// event SwitchSequenceWithDataAvailability(); - -// /** -// * @param _globalExitRootManager Global exit root manager address -// * @param _pol POL token address -// * @param _bridgeAddress Bridge address -// * @param _rollupManager Global exit root manager address -// */ -// constructor( -// IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, -// IERC20Upgradeable _pol, -// IPolygonZkEVMBridgeV2 _bridgeAddress, -// PolygonRollupManager _rollupManager -// ) -// PolygonRollupBaseEtrogPrevious( -// _globalExitRootManager, -// _pol, -// _bridgeAddress, -// _rollupManager -// ) -// {} - -// ///////////////////////////////////// -// // Sequence/Verify batches functions -// //////////////////////////////////// - -// /** -// * @notice Allows a sequencer to send multiple batches -// * @param batches Struct array which holds the necessary data to append new batches to the sequence -// * @param l2Coinbase Address that will receive the fees from L2 -// * @param dataAvailabilityMessage Byte array containing the signatures and all the addresses of the committee in ascending order -// * [signature 0, ..., signature requiredAmountOfSignatures -1, address 0, ... address N] -// * note that each ECDSA signatures are used, therefore each one must be 65 bytes -// * note Pol is not a reentrant token -// */ -// function sequenceBatchesValidium( -// ValidiumBatchData[] calldata batches, -// address l2Coinbase, -// bytes calldata dataAvailabilityMessage -// ) external onlyTrustedSequencer { -// uint256 batchesNum = batches.length; -// if (batchesNum == 0) { -// revert SequenceZeroBatches(); -// } - -// if (batchesNum > _MAX_VERIFY_BATCHES) { -// revert ExceedMaxVerifyBatches(); -// } - -// // Update global exit root if there are new deposits -// bridgeAddress.updateGlobalExitRoot(); - -// // Get global batch variables -// bytes32 l1InfoRoot = globalExitRootManager.getRoot(); - -// // Store storage variables in memory, to save gas, because will be overrided multiple times -// uint64 currentLastForceBatchSequenced = lastForceBatchSequenced; -// bytes32 currentAccInputHash = lastAccInputHash; - -// // Store in a temporal variable, for avoid access again the storage slot -// uint64 initLastForceBatchSequenced = currentLastForceBatchSequenced; - -// // Accumulated sequenced transaction hash to verify them afterward against the dataAvailabilityProtocol -// bytes32 accumulatedNonForcedTransactionsHash = bytes32(0); - -// for (uint256 i = 0; i < batchesNum; i++) { -// // Load current sequence -// ValidiumBatchData memory currentBatch = batches[i]; - -// // Check if it's a forced batch -// if (currentBatch.forcedTimestamp > 0) { -// currentLastForceBatchSequenced++; - -// // Check forced data matches -// bytes32 hashedForcedBatchData = keccak256( -// abi.encodePacked( -// currentBatch.transactionsHash, -// currentBatch.forcedGlobalExitRoot, -// currentBatch.forcedTimestamp, -// currentBatch.forcedBlockHashL1 -// ) -// ); - -// if ( -// hashedForcedBatchData != -// forcedBatches[currentLastForceBatchSequenced] -// ) { -// revert ForcedDataDoesNotMatch(); -// } - -// // Calculate next accumulated input hash -// currentAccInputHash = keccak256( -// abi.encodePacked( -// currentAccInputHash, -// currentBatch.transactionsHash, -// currentBatch.forcedGlobalExitRoot, -// currentBatch.forcedTimestamp, -// l2Coinbase, -// currentBatch.forcedBlockHashL1 -// ) -// ); - -// // Delete forceBatch data since won't be used anymore -// delete forcedBatches[currentLastForceBatchSequenced]; -// } else { -// // Accumulate non forced transactions hash -// accumulatedNonForcedTransactionsHash = keccak256( -// abi.encodePacked( -// accumulatedNonForcedTransactionsHash, -// currentBatch.transactionsHash -// ) -// ); - -// // Note that forcedGlobalExitRoot and forcedBlockHashL1 remain unused and unchecked in this path -// // The synchronizer should be aware of that - -// // Calculate next accumulated input hash -// currentAccInputHash = keccak256( -// abi.encodePacked( -// currentAccInputHash, -// currentBatch.transactionsHash, -// l1InfoRoot, -// uint64(block.timestamp), -// l2Coinbase, -// bytes32(0) -// ) -// ); -// } -// } - -// // Sanity check, should be unreachable -// if (currentLastForceBatchSequenced > lastForceBatch) { -// revert ForceBatchesOverflow(); -// } - -// // Store back the storage variables -// lastAccInputHash = currentAccInputHash; - -// uint256 nonForcedBatchesSequenced = batchesNum; - -// // Check if there has been forced batches -// if (currentLastForceBatchSequenced != initLastForceBatchSequenced) { -// uint64 forcedBatchesSequenced = currentLastForceBatchSequenced - -// initLastForceBatchSequenced; -// // substract forced batches -// nonForcedBatchesSequenced -= forcedBatchesSequenced; - -// // Transfer pol for every forced batch submitted -// pol.safeTransfer( -// address(rollupManager), -// calculatePolPerForceBatch() * (forcedBatchesSequenced) -// ); - -// // Store new last force batch sequenced -// lastForceBatchSequenced = currentLastForceBatchSequenced; -// } - -// // Pay collateral for every non-forced batch submitted -// if (nonForcedBatchesSequenced != 0) { -// pol.safeTransferFrom( -// msg.sender, -// address(rollupManager), -// rollupManager.getBatchFee() * nonForcedBatchesSequenced -// ); - -// // Validate that the data availability protocol accepts the dataAvailabilityMessage -// // note This is a view function, so there's not much risk even if this contract was vulnerable to reentrant attacks -// dataAvailabilityProtocol.verifyMessage( -// accumulatedNonForcedTransactionsHash, -// dataAvailabilityMessage -// ); -// } - -// uint64 currentBatchSequenced = rollupManager.onSequenceBatches( -// uint64(batchesNum), -// currentAccInputHash -// ); - -// emit SequenceBatches(currentBatchSequenced, l1InfoRoot); -// } - -// /** -// * @notice Allows a sequencer to send multiple batches sending all the data, and without using the dataAvailabilityProtocol -// * @param batches Struct array which holds the necessary data to append new batches to the sequence -// * @param l2Coinbase Address that will receive the fees from L2 -// */ -// function sequenceBatches( -// BatchData[] calldata batches, -// address l2Coinbase -// ) public override { -// if (!isSequenceWithDataAvailabilityAllowed) { -// revert SequenceWithDataAvailabilityNotAllowed(); -// } -// super.sequenceBatches(batches, l2Coinbase); -// } - -// ////////////////// -// // admin functions -// ////////////////// - -// /** -// * @notice Allow the admin to set a new data availability protocol -// * @param newDataAvailabilityProtocol Address of the new data availability protocol -// */ -// function setDataAvailabilityProtocol( -// IDataAvailabilityProtocol newDataAvailabilityProtocol -// ) external onlyAdmin { -// dataAvailabilityProtocol = newDataAvailabilityProtocol; - -// emit SetDataAvailabilityProtocol(address(newDataAvailabilityProtocol)); -// } - -// /** -// * @notice Allow the admin to switch the sequence with data availability -// * @param newIsSequenceWithDataAvailabilityAllowed Boolean to switch -// */ -// function switchSequenceWithDataAvailability( -// bool newIsSequenceWithDataAvailabilityAllowed -// ) external onlyAdmin { -// if ( -// newIsSequenceWithDataAvailabilityAllowed == -// isSequenceWithDataAvailabilityAllowed -// ) { -// revert SwitchToSameValue(); -// } -// isSequenceWithDataAvailabilityAllowed = newIsSequenceWithDataAvailabilityAllowed; -// emit SwitchSequenceWithDataAvailability(); -// } -// } +// SPDX-License-Identifier: AGPL-3.0 +pragma solidity 0.8.20; + +import "./PolygonRollupBaseEtrogPrevious.sol"; +import "../interfaces/IDataAvailabilityProtocol.sol"; +import "../interfaces/IPolygonValidium.sol"; + +/** + * Contract responsible for managing the states and the updates of L2 network. + * There will be a trusted sequencer, which is able to send transactions. + * Any user can force some transaction and the sequencer will have a timeout to add them in the queue. + * The sequenced state is deterministic and can be precalculated before it's actually verified by a zkProof. + * The aggregators will be able to verify the sequenced state with zkProofs and therefore make available the withdrawals from L2 network. + * To enter and exit of the L2 network will be used a PolygonZkEVMBridge smart contract that will be deployed in both networks. + * It is advised to use timelocks for the admin address in case of Validium since if can change the dataAvailabilityProtocol + */ +contract PolygonValidiumEtrogPrevious is + PolygonRollupBaseEtrogPrevious, + IPolygonValidium +{ + using SafeERC20Upgradeable for IERC20Upgradeable; + + /** + * @notice Struct which will be used to call sequenceBatches + * @param transactionsHash keccak256 hash of the L2 ethereum transactions EIP-155 or pre-EIP-155 with signature: + * EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data, chainid, 0, 0,) || v || r || s + * pre-EIP-155: rlp(nonce, gasprice, gasLimit, to, value, data) || v || r || s + * @param forcedGlobalExitRoot Global exit root, empty when sequencing a non forced batch + * @param forcedTimestamp Minimum timestamp of the force batch data, empty when sequencing a non forced batch + * @param forcedBlockHashL1 blockHash snapshot of the force batch data, empty when sequencing a non forced batch + */ + struct ValidiumBatchData { + bytes32 transactionsHash; + bytes32 forcedGlobalExitRoot; + uint64 forcedTimestamp; + bytes32 forcedBlockHashL1; + } + + // Data Availability Protocol Address + IDataAvailabilityProtocol public dataAvailabilityProtocol; + + // Indicates if sequence with data avialability is allowed + // This allow the sequencer to post the data and skip the Data comittee + bool public isSequenceWithDataAvailabilityAllowed; + + /** + * @dev Emitted when the admin updates the data availability protocol + */ + event SetDataAvailabilityProtocol(address newDataAvailabilityProtocol); + + /** + * @dev Emitted when switch the ability to sequence with data availability + */ + event SwitchSequenceWithDataAvailability(); + + /** + * @param _globalExitRootManager Global exit root manager address + * @param _pol POL token address + * @param _bridgeAddress Bridge address + * @param _rollupManager Global exit root manager address + */ + constructor( + IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, + IERC20Upgradeable _pol, + IPolygonZkEVMBridgeV2 _bridgeAddress, + PolygonRollupManager _rollupManager + ) + PolygonRollupBaseEtrogPrevious( + _globalExitRootManager, + _pol, + _bridgeAddress, + _rollupManager + ) + {} + + ///////////////////////////////////// + // Sequence/Verify batches functions + //////////////////////////////////// + + /** + * @notice Allows a sequencer to send multiple batches + * @param batches Struct array which holds the necessary data to append new batches to the sequence + * @param maxSequenceTimestamp Max timestamp of the sequence. This timestamp must be inside a safety range (actual + 36 seconds). + * This timestamp should be equal or higher of the last block inside the sequence, otherwise this batch will be invalidated by circuit. + * @param initSequencedBatch This parameter must match the current last batch sequenced. + * This will be a protection for the sequencer to avoid sending undesired data + * @param l2Coinbase Address that will receive the fees from L2 + * @param dataAvailabilityMessage Byte array containing the signatures and all the addresses of the committee in ascending order + * [signature 0, ..., signature requiredAmountOfSignatures -1, address 0, ... address N] + * note that each ECDSA signatures are used, therefore each one must be 65 bytes + * note Pol is not a reentrant token + */ + function sequenceBatchesValidium( + ValidiumBatchData[] calldata batches, + uint64 maxSequenceTimestamp, + uint64 initSequencedBatch, + address l2Coinbase, + bytes calldata dataAvailabilityMessage + ) external onlyTrustedSequencer { + uint256 batchesNum = batches.length; + if (batchesNum == 0) { + revert SequenceZeroBatches(); + } + + if (batchesNum > _MAX_VERIFY_BATCHES) { + revert ExceedMaxVerifyBatches(); + } + + // Check max sequence timestamp inside of range + if ( + uint256(maxSequenceTimestamp) > (block.timestamp + TIMESTAMP_RANGE) + ) { + revert MaxTimestampSequenceInvalid(); + } + + // Update global exit root if there are new deposits + bridgeAddress.updateGlobalExitRoot(); + + // Get global batch variables + bytes32 l1InfoRoot = globalExitRootManager.getRoot(); + + // Store storage variables in memory, to save gas, because will be overrided multiple times + uint64 currentLastForceBatchSequenced = lastForceBatchSequenced; + bytes32 currentAccInputHash = lastAccInputHash; + + // Store in a temporal variable, for avoid access again the storage slot + uint64 initLastForceBatchSequenced = currentLastForceBatchSequenced; + + // Accumulated sequenced transaction hash to verify them afterward against the dataAvailabilityProtocol + bytes32 accumulatedNonForcedTransactionsHash = bytes32(0); + + for (uint256 i = 0; i < batchesNum; i++) { + // Load current sequence + ValidiumBatchData memory currentBatch = batches[i]; + + // Check if it's a forced batch + if (currentBatch.forcedTimestamp > 0) { + currentLastForceBatchSequenced++; + + // Check forced data matches + bytes32 hashedForcedBatchData = keccak256( + abi.encodePacked( + currentBatch.transactionsHash, + currentBatch.forcedGlobalExitRoot, + currentBatch.forcedTimestamp, + currentBatch.forcedBlockHashL1 + ) + ); + + if ( + hashedForcedBatchData != + forcedBatches[currentLastForceBatchSequenced] + ) { + revert ForcedDataDoesNotMatch(); + } + + // Calculate next accumulated input hash + currentAccInputHash = keccak256( + abi.encodePacked( + currentAccInputHash, + currentBatch.transactionsHash, + currentBatch.forcedGlobalExitRoot, + currentBatch.forcedTimestamp, + l2Coinbase, + currentBatch.forcedBlockHashL1 + ) + ); + + // Delete forceBatch data since won't be used anymore + delete forcedBatches[currentLastForceBatchSequenced]; + } else { + // Accumulate non forced transactions hash + accumulatedNonForcedTransactionsHash = keccak256( + abi.encodePacked( + accumulatedNonForcedTransactionsHash, + currentBatch.transactionsHash + ) + ); + + // Note that forcedGlobalExitRoot and forcedBlockHashL1 remain unused and unchecked in this path + // The synchronizer should be aware of that + + // Calculate next accumulated input hash + currentAccInputHash = keccak256( + abi.encodePacked( + currentAccInputHash, + currentBatch.transactionsHash, + l1InfoRoot, + maxSequenceTimestamp, + l2Coinbase, + bytes32(0) + ) + ); + } + } + + // Sanity check, should be unreachable + if (currentLastForceBatchSequenced > lastForceBatch) { + revert ForceBatchesOverflow(); + } + + // Store back the storage variables + lastAccInputHash = currentAccInputHash; + + uint256 nonForcedBatchesSequenced = batchesNum; + + // Check if there has been forced batches + if (currentLastForceBatchSequenced != initLastForceBatchSequenced) { + uint64 forcedBatchesSequenced = currentLastForceBatchSequenced - + initLastForceBatchSequenced; + // substract forced batches + nonForcedBatchesSequenced -= forcedBatchesSequenced; + + // Transfer pol for every forced batch submitted + pol.safeTransfer( + address(rollupManager), + calculatePolPerForceBatch() * (forcedBatchesSequenced) + ); + + // Store new last force batch sequenced + lastForceBatchSequenced = currentLastForceBatchSequenced; + } + + // Pay collateral for every non-forced batch submitted + if (nonForcedBatchesSequenced != 0) { + pol.safeTransferFrom( + msg.sender, + address(rollupManager), + rollupManager.getBatchFee() * nonForcedBatchesSequenced + ); + + // Validate that the data availability protocol accepts the dataAvailabilityMessage + // note This is a view function, so there's not much risk even if this contract was vulnerable to reentrant attacks + dataAvailabilityProtocol.verifyMessage( + accumulatedNonForcedTransactionsHash, + dataAvailabilityMessage + ); + } + + uint64 currentBatchSequenced = rollupManager.onSequenceBatches( + uint64(batchesNum), + currentAccInputHash + ); + + // Check init sequenced batch + if ( + initSequencedBatch != (currentBatchSequenced - uint64(batchesNum)) + ) { + revert InitSequencedBatchDoesNotMatch(); + } + + emit SequenceBatches(currentBatchSequenced, l1InfoRoot); + } + + /** + * @notice Allows a sequencer to send multiple batches + * @param batches Struct array which holds the necessary data to append new batches to the sequence + * @param maxSequenceTimestamp Max timestamp of the sequence. This timestamp must be inside a safety range (actual + 36 seconds). + * This timestamp should be equal or higher of the last block inside the sequence, otherwise this batch will be invalidated by circuit. + * @param initSequencedBatch This parameter must match the current last batch sequenced. + * This will be a protection for the sequencer to avoid sending undesired data + * @param l2Coinbase Address that will receive the fees from L2 + * note Pol is not a reentrant token + */ + function sequenceBatches( + BatchData[] calldata batches, + uint64 maxSequenceTimestamp, + uint64 initSequencedBatch, + address l2Coinbase + ) public override { + if (!isSequenceWithDataAvailabilityAllowed) { + revert SequenceWithDataAvailabilityNotAllowed(); + } + super.sequenceBatches( + batches, + maxSequenceTimestamp, + initSequencedBatch, + l2Coinbase + ); + } + + ////////////////// + // admin functions + ////////////////// + + /** + * @notice Allow the admin to set a new data availability protocol + * @param newDataAvailabilityProtocol Address of the new data availability protocol + */ + function setDataAvailabilityProtocol( + IDataAvailabilityProtocol newDataAvailabilityProtocol + ) external onlyAdmin { + dataAvailabilityProtocol = newDataAvailabilityProtocol; + + emit SetDataAvailabilityProtocol(address(newDataAvailabilityProtocol)); + } + + /** + * @notice Allow the admin to switch the sequence with data availability + * @param newIsSequenceWithDataAvailabilityAllowed Boolean to switch + */ + function switchSequenceWithDataAvailability( + bool newIsSequenceWithDataAvailabilityAllowed + ) external onlyAdmin { + if ( + newIsSequenceWithDataAvailabilityAllowed == + isSequenceWithDataAvailabilityAllowed + ) { + revert SwitchToSameValue(); + } + isSequenceWithDataAvailabilityAllowed = newIsSequenceWithDataAvailabilityAllowed; + emit SwitchSequenceWithDataAvailability(); + } +} diff --git a/contracts/v2/previousVersions/PolygonZkEVMEtrogPrevious.sol b/contracts/v2/previousVersions/PolygonZkEVMEtrogPrevious.sol index 5ec312518..95e4e16ad 100644 --- a/contracts/v2/previousVersions/PolygonZkEVMEtrogPrevious.sol +++ b/contracts/v2/previousVersions/PolygonZkEVMEtrogPrevious.sol @@ -1,34 +1,34 @@ -// // SPDX-License-Identifier: AGPL-3.0 -// pragma solidity 0.8.20; +// SPDX-License-Identifier: AGPL-3.0 +pragma solidity 0.8.20; -// import "./PolygonRollupBaseEtrogPrevious.sol"; +import "./PolygonRollupBaseEtrogPrevious.sol"; -// /** -// * Contract responsible for managing the states and the updates of L2 network. -// * There will be a trusted sequencer, which is able to send transactions. -// * Any user can force some transaction and the sequencer will have a timeout to add them in the queue. -// * The sequenced state is deterministic and can be precalculated before it's actually verified by a zkProof. -// * The aggregators will be able to verify the sequenced state with zkProofs and therefore make available the withdrawals from L2 network. -// * To enter and exit of the L2 network will be used a PolygonZkEVMBridge smart contract that will be deployed in both networks. -// */ -// contract PolygonZkEVMEtrogPrevious is PolygonRollupBaseEtrogPrevious { -// /** -// * @param _globalExitRootManager Global exit root manager address -// * @param _pol POL token address -// * @param _bridgeAddress Bridge address -// * @param _rollupManager Global exit root manager address -// */ -// constructor( -// IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, -// IERC20Upgradeable _pol, -// IPolygonZkEVMBridgeV2 _bridgeAddress, -// PolygonRollupManager _rollupManager -// ) -// PolygonRollupBaseEtrogPrevious( -// _globalExitRootManager, -// _pol, -// _bridgeAddress, -// _rollupManager -// ) -// {} -// } +/** + * Contract responsible for managing the states and the updates of L2 network. + * There will be a trusted sequencer, which is able to send transactions. + * Any user can force some transaction and the sequencer will have a timeout to add them in the queue. + * The sequenced state is deterministic and can be precalculated before it's actually verified by a zkProof. + * The aggregators will be able to verify the sequenced state with zkProofs and therefore make available the withdrawals from L2 network. + * To enter and exit of the L2 network will be used a PolygonZkEVMBridge smart contract that will be deployed in both networks. + */ +contract PolygonZkEVMEtrogPrevious is PolygonRollupBaseEtrogPrevious { + /** + * @param _globalExitRootManager Global exit root manager address + * @param _pol POL token address + * @param _bridgeAddress Bridge address + * @param _rollupManager Global exit root manager address + */ + constructor( + IPolygonZkEVMGlobalExitRootV2 _globalExitRootManager, + IERC20Upgradeable _pol, + IPolygonZkEVMBridgeV2 _bridgeAddress, + PolygonRollupManager _rollupManager + ) + PolygonRollupBaseEtrogPrevious( + _globalExitRootManager, + _pol, + _bridgeAddress, + _rollupManager + ) + {} +} diff --git a/test/contractsv2/PolygonRollupManager.test.ts b/test/contractsv2/PolygonRollupManager.test.ts index d06e511ec..c2aa2a0de 100644 --- a/test/contractsv2/PolygonRollupManager.test.ts +++ b/test/contractsv2/PolygonRollupManager.test.ts @@ -2074,6 +2074,10 @@ describe("Polygon Rollup Manager", () => { ) ).to.be.revertedWithCustomError(rollupManagerContract, "NewAccInputHashDoesNotExist"); + await expect( + rollupManagerContract.connect(admin).updateRollupByRollupAdmin(newZKEVMAddress, 10) + ).to.be.revertedWithCustomError(rollupManagerContract, "AllSequencedMustBeVerified"); + // Calcualte new globalExitroot const merkleTreeRollups = new MerkleTreeBridge(height); merkleTreeRollups.add(newLocalExitRoot); @@ -2090,6 +2094,7 @@ describe("Polygon Rollup Manager", () => { const merkleTreeGLobalExitRoot = new MerkleTreeBridge(height); merkleTreeGLobalExitRoot.add(leafValueUpdateGER2); const currentL1InfoRoot = merkleTreeGLobalExitRoot.getRoot(); + await ethers.provider.send("evm_setNextBlockTimestamp", [lastBlock2?.timestamp + 5]); // Verify batch await expect( @@ -2302,7 +2307,9 @@ describe("Polygon Rollup Manager", () => { expect(createdEtrogRollupType).to.be.deep.equal(expectedEtrogRollupType); // Validate upgrade OZ - await upgrades.validateUpgrade(PolygonZKEVMV2Factory, PolygonZKEVMEtrogFactory, { + const PolygonPreviousFactory = await ethers.getContractFactory("PolygonZkEVMEtrogPrevious"); + + await upgrades.validateUpgrade(PolygonPreviousFactory, PolygonZKEVMEtrogFactory, { constructorArgs: [ polygonZkEVMGlobalExitRoot.target, polTokenContract.target, @@ -2323,6 +2330,15 @@ describe("Polygon Rollup Manager", () => { .updateRollup(polygonZkEVMGlobalExitRoot.target, etrogRollupType, "0x") ).to.be.revertedWithCustomError(rollupManagerContract, "RollupMustExist"); + // Try update random address + await expect( + rollupManagerContract.connect(timelock).updateRollupByRollupAdmin(newZKEVMAddress, etrogRollupType) + ).to.be.revertedWithCustomError(rollupManagerContract, "OnlyRollupAdmin"); + + await expect( + rollupManagerContract.connect(admin).updateRollupByRollupAdmin(newZKEVMAddress, 0) + ).to.be.revertedWithCustomError(rollupManagerContract, "UpdateToOldRollupTypeID"); + // Try update same type await expect( rollupManagerContract.connect(timelock).updateRollup(newZKEVMAddress, 1, "0x") @@ -2349,6 +2365,10 @@ describe("Polygon Rollup Manager", () => { rollupManagerContract.connect(timelock).updateRollup(newZKEVMAddress, etrogRollupType, "0x") ).to.be.revertedWithCustomError(rollupManagerContract, "RollupTypeObsolete"); + await expect( + rollupManagerContract.connect(admin).updateRollupByRollupAdmin(newZKEVMAddress, etrogRollupType) + ).to.be.revertedWithCustomError(rollupManagerContract, "RollupTypeObsolete"); + await snapshotUpdateRollup.restore(); expect(await upgrades.erc1967.getImplementationAddress(newZKEVMAddress as string)).to.be.equal( diff --git a/test/contractsv2/PolygonRollupManagerUpgrade.test.ts b/test/contractsv2/PolygonRollupManagerUpgrade.test.ts index d4f666cc1..745641356 100644 --- a/test/contractsv2/PolygonRollupManagerUpgrade.test.ts +++ b/test/contractsv2/PolygonRollupManagerUpgrade.test.ts @@ -145,7 +145,6 @@ describe("Polygon Rollup manager upgraded", () => { // deploy globalExitRoot const PolygonZkEVMGlobalExitRootFactory = await ethers.getContractFactory("PolygonZkEVMGlobalExitRootV2"); polygonZkEVMGlobalExitRoot = (await upgrades.deployProxy(PolygonZkEVMGlobalExitRootFactory, [], { - initializer: false, constructorArgs: [precalculatezkEVM, precalculateBridgeAddress], unsafeAllow: ["constructor", "state-variable-immutable"], })) as any; @@ -175,8 +174,9 @@ describe("Polygon Rollup manager upgraded", () => { expect(precalculateBridgeAddress).to.be.equal(polygonZkEVMBridgeContract.target); expect(precalculatezkEVM).to.be.equal(polygonZkEVMContract.target); - const PolygonRollupManagerFactory = await ethers.getContractFactory("PolygonRollupManagerMock"); - rollupManagerContract = PolygonRollupManagerFactory.attach(polygonZkEVMContract.target) as any; + const PolygonRollupManagerFactory = await ethers.getContractFactory("PolygonRollupManagerPrevious"); + const PolygonRollupManagerFactoryCurrent = await ethers.getContractFactory("PolygonRollupManagerMock"); + rollupManagerContract = PolygonRollupManagerFactoryCurrent.attach(polygonZkEVMContract.target) as any; await polygonZkEVMContract.initialize( { @@ -243,23 +243,20 @@ describe("Polygon Rollup manager upgraded", () => { ], }, }); - }); - it("Cannot initialize again", async () => { - await expect( - rollupManagerContract.initialize( - trustedAggregator.address, - pendingStateTimeoutDefault, - trustedAggregatorTimeout, - admin.address, - timelock.address, - emergencyCouncil.address, - timelock.address, - verifierContract.target, - forkID, - chainID - ) - ).to.be.revertedWith("Initializable: contract is already initialized"); + const txRollupManager2 = await upgrades.upgradeProxy( + polygonZkEVMContract.target, + PolygonRollupManagerFactoryCurrent, + { + constructorArgs: [ + polygonZkEVMGlobalExitRoot.target, + polTokenContract.target, + polygonZkEVMBridgeContract.target, + ], + unsafeAllow: ["constructor", "state-variable-immutable"], + unsafeAllowRenames: false, + } + ); }); it("should check the initalized parameters", async () => { @@ -636,20 +633,10 @@ describe("Polygon Rollup manager upgraded", () => { // Sequence Batches const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); let currentLastBatchSequenced = 1; - - await expect( - newZkEVMContract - .connect(trustedSequencer) - .sequenceBatches([sequence], currentTime, currentLastBatchSequenced++, trustedSequencer.address) - ).to.emit(newZkEVMContract, "SequenceBatches"); - - const lastBlock = await ethers.provider.getBlock("latest"); - const lastBlockHash = lastBlock?.parentHash; - const lastGlobalExitRootS = await polygonZkEVMGlobalExitRoot.getLastGlobalExitRoot(); + const indexL1infoRoot = 0; const height = 32; const merkleTreeGLobalExitRoot = new MerkleTreeBridge(height); - const leafValueJs = calculateGlobalExitRootLeaf(lastGlobalExitRootS, lastBlockHash, lastBlock?.timestamp); //merkleTreeGLobalExitRoot.add(leafValueJs); const rootSC = await polygonZkEVMGlobalExitRoot.getRoot(); @@ -665,6 +652,19 @@ describe("Polygon Rollup manager upgraded", () => { trustedSequencer.address, ethers.ZeroHash ); + + await expect( + newZkEVMContract + .connect(trustedSequencer) + .sequenceBatches( + [sequence], + indexL1infoRoot, + currentTime, + expectedAccInputHash2, + trustedSequencer.address + ) + ).to.emit(newZkEVMContract, "SequenceBatches"); + // calcualte accINputHash expect(await newZkEVMContract.lastAccInputHash()).to.be.equal(expectedAccInputHash2); @@ -780,9 +780,7 @@ describe("Polygon Rollup manager upgraded", () => { ) ) .to.emit(rollupManagerContract, "VerifyBatchesTrustedAggregator") - .withArgs(newCreatedRollupID, newVerifiedBatch, newStateRoot, newLocalExitRoot, trustedAggregator.address) - .to.emit(polygonZkEVMGlobalExitRoot, "UpdateL1InfoTree") - .withArgs(ethers.ZeroHash, rootRollups); + .withArgs(newCreatedRollupID, newVerifiedBatch, newStateRoot, newLocalExitRoot, trustedAggregator.address); const finalAggregatorMatic = await polTokenContract.balanceOf(beneficiary.address); @@ -981,13 +979,6 @@ describe("Polygon Rollup manager upgraded", () => { } as BatchDataStructEtrog; const snapshot3 = await takeSnapshot(); - // Sequence Batches - await expect( - newZkEVMContract - .connect(trustedSequencer) - .sequenceBatches([sequenceForced], currentTime, currentLastBatchSequenced++, trustedSequencer.address) - ).to.emit(newZkEVMContract, "SequenceBatches"); - const expectedAccInputHash3 = calculateAccInputHashetrog( expectedAccInputHash2, ethers.keccak256(l2txDataForceBatch), @@ -996,6 +987,20 @@ describe("Polygon Rollup manager upgraded", () => { trustedSequencer.address, forcedBlock?.parentHash ); + + // Sequence Batches + await expect( + newZkEVMContract + .connect(trustedSequencer) + .sequenceBatches( + [sequenceForced], + indexL1infoRoot, + currentTime, + expectedAccInputHash3, + trustedSequencer.address + ) + ).to.emit(newZkEVMContract, "SequenceBatches"); + // calcualte accINputHash expect(await newZkEVMContract.lastAccInputHash()).to.be.equal(expectedAccInputHash3); @@ -1350,24 +1355,12 @@ describe("Polygon Rollup manager upgraded", () => { const currentTime = Number((await ethers.provider.getBlock("latest"))?.timestamp); const currentLastBatchSequenced = 1; - await expect( - newZkEVMContract - .connect(trustedSequencer) - .sequenceBatches([sequence], currentTime, currentLastBatchSequenced, trustedSequencer.address) - ).to.emit(newZkEVMContract, "SequenceBatches"); - - const sequencedBatchData2 = await rollupManagerContract.getRollupSequencedBatches(newCreatedRollupID, 2); - - const currnetRollup = await rollupManagerContract.rollupIDToRollupData(newCreatedRollupID); - expect(currnetRollup.lastBatchSequenced).to.be.equal(2); - - const lastBlock = await ethers.provider.getBlock("latest"); const height = 32; - const merkleTreeGLobalExitRoot = new MerkleTreeBridge(height); const rootSC = await polygonZkEVMGlobalExitRoot.getRoot(); const rootJS = merkleTreeGLobalExitRoot.getRoot(); + const indexL1infoRoot = 0; expect(rootSC).to.be.equal(rootJS); @@ -1379,6 +1372,22 @@ describe("Polygon Rollup manager upgraded", () => { trustedSequencer.address, ethers.ZeroHash ); + + await expect( + newZkEVMContract + .connect(trustedSequencer) + .sequenceBatches( + [sequence], + indexL1infoRoot, + currentTime, + expectedAccInputHash2, + trustedSequencer.address + ) + ).to.emit(newZkEVMContract, "SequenceBatches"); + + const currnetRollup = await rollupManagerContract.rollupIDToRollupData(newCreatedRollupID); + expect(currnetRollup.lastBatchSequenced).to.be.equal(2); + // calcualte accINputHash expect(await newZkEVMContract.lastAccInputHash()).to.be.equal(expectedAccInputHash2); From 5184e5b8f01b04d9a7fbb9f7bb7c2ced31389fc5 Mon Sep 17 00:00:00 2001 From: invocamanman Date: Tue, 18 Jun 2024 14:13:28 +0200 Subject: [PATCH 4/4] fix update totalVerifiedBatches! --- contracts/v2/PolygonRollupManager.sol | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/contracts/v2/PolygonRollupManager.sol b/contracts/v2/PolygonRollupManager.sol index 4b04307d1..33b4fe196 100644 --- a/contracts/v2/PolygonRollupManager.sol +++ b/contracts/v2/PolygonRollupManager.sol @@ -791,13 +791,19 @@ contract PolygonRollupManager is // Update totalSequencedBatches totalSequencedBatches -= lastBatchSequenced - targetBatch; - // Clean pending state if any + // Check pending state if (rollup.lastPendingState > 0) { + // update total verified batches + uint64 currentLastVerifiedBatch = _getLastVerifiedBatch(rollup); + totalVerifiedBatches -= + currentLastVerifiedBatch - + rollup.lastVerifiedBatch; + rollup.lastPendingState = 0; rollup.lastPendingStateConsolidated = 0; } - // Callback the consensus contract + // Clean pending state if any rollupContract.rollbackBatches( targetBatch, rollup.sequencedBatches[targetBatch].accInputHash