From ec3f63440e6e4f8d39966446c37e71ff6db4be75 Mon Sep 17 00:00:00 2001 From: Inphi Date: Tue, 24 Sep 2024 10:33:25 -0400 Subject: [PATCH 001/116] ci: Sanitize op-program for unsupported instructions (#12007) --- .circleci/config.yml | 3 +++ cannon/Makefile | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index dccd91867d5b..6cab0308e927 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1048,6 +1048,9 @@ jobs: - restore_cache: name: Restore cannon prestate cache key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} + - run: + name: Sanitize op-program guest + command: make -f cannon/Makefile sanitize-program GUEST_PROGRAM=op-program/bin/op-program-client.elf - run: name: generate cannon prestate command: make cannon-prestate diff --git a/cannon/Makefile b/cannon/Makefile index 0f3836fb62fb..d6a1d85eff89 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -22,6 +22,14 @@ clean: elf: make -C ./testdata/example elf +sanitize-program: + @if ! { mips-linux-gnu-objdump -d -j .text $$GUEST_PROGRAM | awk '{print $3}' | grep -Ew -m1 '(bgezal|bltzal)'; }; then \ + echo "guest program is sanitized for unsupported instructions"; \ + else \ + echo "found unsupported instructions in the guest program"; \ + exit 1; \ + fi + contract: cd ../packages/contracts-bedrock && forge build From 308ce74c7ab82a7a4a4fb5dd709224af316d2921 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Tue, 24 Sep 2024 10:45:42 -0400 Subject: [PATCH 002/116] OPSM: Deploy Permissioned Game (#12064) * chore: fix semver lock * fix: no permissionless root, remove hash from 0xdead * fix: use 0xdead root properly * feat: add remaining fault proof support * chore: Update semver-lock * fix: Remove extra anchor root definition and restore aritfactsFs argument * feat: Add wip big blueprint code * Don't wrap input to deployBigBytecode with preamble * fix: off by one in deployBigBytecode * feat: more gas efficient blueprint deployment for permissioned game * Get the big deployments working * perf: more efficient preamble parsing * chore: snapshots + fix revert * test: skip FaultDisputeGameAddress since we don't deploy it yet * chore: cleanup --------- Co-authored-by: Matt Solomon Co-authored-by: Matthew Slipper --- .../deployer/integration_test/apply_test.go | 9 +- .../scripts/DeployImplementations.s.sol | 52 ++++++-- .../scripts/DeployOPChain.s.sol | 38 ++++-- packages/contracts-bedrock/semver-lock.json | 4 +- .../snapshots/abi/OPStackManager.json | 25 ++++ .../snapshots/abi/OPStackManagerInterop.json | 25 ++++ .../storageLayout/OPStackManager.json | 27 ++-- .../storageLayout/OPStackManagerInterop.json | 27 ++-- .../src/L1/OPStackManager.sol | 115 ++++++++++++++---- .../src/libraries/Blueprint.sol | 55 ++++++++- .../test/libraries/Blueprint.t.sol | 2 +- 11 files changed, 309 insertions(+), 70 deletions(-) diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index 6d673ed03791..b69595b67761 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -27,6 +27,8 @@ import ( const TestParams = ` participants: - el_type: geth + el_extra_params: + - "--gcmode=archive" cl_type: lighthouse network_params: prefunded_accounts: '{ "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { "balance": "1000000ETH" } }' @@ -41,6 +43,7 @@ network_params: }' network_id: "77799777" seconds_per_slot: 3 + genesis_delay: 0 ` type deployerKey struct{} @@ -56,7 +59,7 @@ func (d *deployerKey) String() string { func TestEndToEndApply(t *testing.T) { kurtosisutil.Test(t) - lgr := testlog.Logger(t, slog.LevelInfo) + lgr := testlog.Logger(t, slog.LevelDebug) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -189,6 +192,10 @@ func TestEndToEndApply(t *testing.T) { {"DelayedWETHPermissionlessGameProxyAddress", chainState.DelayedWETHPermissionlessGameProxyAddress}, } for _, addr := range chainAddrs { + // TODO Delete this `if`` block once FaultDisputeGameAddress is deployed. + if addr.name == "FaultDisputeGameAddress" { + continue + } t.Run(fmt.Sprintf("chain %s - %s", chainState.ID, addr.name), func(t *testing.T) { code, err := l1Client.CodeAt(ctx, addr.addr, nil) require.NoError(t, err) diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 433b1573efe4..81cafa89c272 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -10,6 +10,7 @@ import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; +import { Bytes } from "src/libraries/Bytes.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; import { Proxy } from "src/universal/Proxy.sol"; @@ -23,6 +24,7 @@ import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; import { MIPS } from "src/cannon/MIPS.sol"; import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; +import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; @@ -514,10 +516,11 @@ contract DeployImplementations is Script { blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(type(L1ChugSplashProxy).creationCode), salt); blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(type(ResolvedDelegateProxy).creationCode), salt); blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(type(AnchorStateRegistry).creationCode), salt); + (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(type(PermissionedDisputeGame).creationCode, salt); vm.stopBroadcast(); // forgefmt: disable-end - OPStackManager.ImplementationSetter[] memory setters = new OPStackManager.ImplementationSetter[](7); + OPStackManager.ImplementationSetter[] memory setters = new OPStackManager.ImplementationSetter[](9); setters[0] = OPStackManager.ImplementationSetter({ name: "L1ERC721Bridge", info: OPStackManager.Implementation(address(_dio.l1ERC721BridgeImpl()), L1ERC721Bridge.initialize.selector) @@ -543,13 +546,22 @@ contract DeployImplementations is Script { name: "L1StandardBridge", info: OPStackManager.Implementation(address(_dio.l1StandardBridgeImpl()), L1StandardBridge.initialize.selector) }); - setters[6] = OPStackManager.ImplementationSetter({ name: "DisputeGameFactory", info: OPStackManager.Implementation( address(_dio.disputeGameFactoryImpl()), DisputeGameFactory.initialize.selector ) }); + setters[7] = OPStackManager.ImplementationSetter({ + name: "DelayedWETH", + info: OPStackManager.Implementation(address(_dio.delayedWETHImpl()), DelayedWETH.initialize.selector) + }); + setters[8] = OPStackManager.ImplementationSetter({ + name: "MIPS", + // MIPS is a singleton for all chains, so it doesn't need to be initialized, so the + // selector is just `bytes4(0)`. + info: OPStackManager.Implementation(address(_dio.mipsSingleton()), bytes4(0)) + }); // This call contains a broadcast to deploy OPSM which is proxied. OPStackManager opsmProxy = createOPSMContract(_dii, _dio, blueprints, release, setters); @@ -617,14 +629,14 @@ contract DeployImplementations is Script { // The fault proofs contracts are configured as follows: // | Contract | Proxied | Deployment | MCP Ready | // |-------------------------|---------|-----------------------------------|------------| - // | DisputeGameFactory | Yes | Bespoke | Yes | X - // | AnchorStateRegistry | Yes | Bespoke | No | X - // | FaultDisputeGame | No | Bespoke | No | Todo - // | PermissionedDisputeGame | No | Bespoke | No | Todo - // | DelayedWETH | Yes | Two bespoke (one per DisputeGame) | No | Todo: Proxies. - // | PreimageOracle | No | Shared | N/A | X - // | MIPS | No | Shared | N/A | X - // | OptimismPortal2 | Yes | Shared | No | X + // | DisputeGameFactory | Yes | Bespoke | Yes | + // | AnchorStateRegistry | Yes | Bespoke | No | + // | FaultDisputeGame | No | Bespoke | No | Not yet supported by OPCM + // | PermissionedDisputeGame | No | Bespoke | No | + // | DelayedWETH | Yes | Two bespoke (one per DisputeGame) | No | + // | PreimageOracle | No | Shared | N/A | + // | MIPS | No | Shared | N/A | + // | OptimismPortal2 | Yes | Shared | No | // // This script only deploys the shared contracts. The bespoke contracts are deployed by // `DeployOPChain.s.sol`. When the shared contracts are proxied, the contracts deployed here are @@ -731,6 +743,26 @@ contract DeployImplementations is Script { } require(newContract_ != address(0), "DeployImplementations: create2 failed"); } + + function deployBigBytecode( + bytes memory _bytecode, + bytes32 _salt + ) + public + returns (address newContract1_, address newContract2_) + { + // Preamble needs 3 bytes. + uint256 maxInitCodeSize = 24576 - 3; + require(_bytecode.length > maxInitCodeSize, "DeployImplementations: Use deployBytecode instead"); + + bytes memory part1Slice = Bytes.slice(_bytecode, 0, maxInitCodeSize); + bytes memory part1 = Blueprint.blueprintDeployerBytecode(part1Slice); + bytes memory part2Slice = Bytes.slice(_bytecode, maxInitCodeSize, _bytecode.length - maxInitCodeSize); + bytes memory part2 = Blueprint.blueprintDeployerBytecode(part2Slice); + + newContract1_ = deployBytecode(part1, _salt); + newContract2_ = deployBytecode(part2, _salt); + } } // Similar to how DeploySuperchain.s.sol contains a lot of comments to thoroughly document the script diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 50fd9060ae91..e0df48cc6029 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -11,6 +11,7 @@ import { BaseDeployIO } from "scripts/utils/BaseDeployIO.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -23,7 +24,7 @@ import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; -import { GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; +import { Claim, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; import { OPStackManager } from "src/L1/OPStackManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; @@ -201,7 +202,7 @@ contract DeployOPChainOutput is BaseDeployIO { address(_disputeGameFactoryProxy), address(_anchorStateRegistryProxy), address(_anchorStateRegistryImpl), - address(_faultDisputeGame), + // address(_faultDisputeGame), address(_permissionedDisputeGame), address(_delayedWETHPermissionedGameProxy), address(_delayedWETHPermissionlessGameProxy) @@ -289,8 +290,8 @@ contract DeployOPChainOutput is BaseDeployIO { // -------- Deployment Assertions -------- function assertValidDeploy(DeployOPChainInput _doi) internal { - assertValidAnchorStateRegistryProxy(_doi); assertValidAnchorStateRegistryImpl(_doi); + assertValidAnchorStateRegistryProxy(_doi); assertValidDelayedWETHs(_doi); assertValidDisputeGameFactory(_doi); assertValidL1CrossDomainMessenger(_doi); @@ -298,9 +299,23 @@ contract DeployOPChainOutput is BaseDeployIO { assertValidL1StandardBridge(_doi); assertValidOptimismMintableERC20Factory(_doi); assertValidOptimismPortal(_doi); + assertValidPermissionedDisputeGame(_doi); assertValidSystemConfig(_doi); - // TODO Other FP assertions like the dispute games, anchor state registry, etc. - // TODO add initialization assertions + } + + function assertValidPermissionedDisputeGame(DeployOPChainInput _doi) internal view { + PermissionedDisputeGame game = permissionedDisputeGame(); + + require(GameType.unwrap(game.gameType()) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON), "DPG-10"); + require(Claim.unwrap(game.absolutePrestate()) == bytes32(hex"dead"), "DPG-20"); + + OPStackManager opsm = _doi.opsmProxy(); + (address mips,) = opsm.implementations(opsm.latestRelease(), "MIPS"); + require(game.vm() == IBigStepper(mips), "DPG-30"); + + require(address(game.weth()) == address(delayedWETHPermissionedGameProxy()), "DPG-40"); + require(address(game.anchorStateRegistry()) == address(anchorStateRegistryProxy()), "DPG-50"); + require(game.l2ChainId() == _doi.l2ChainId(), "DPG-60"); } function assertValidAnchorStateRegistryProxy(DeployOPChainInput) internal { @@ -436,7 +451,14 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidDisputeGameFactory(DeployOPChainInput) internal view { - // TODO add in once FP support is added. + DisputeGameFactory factory = disputeGameFactoryProxy(); + + DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); + + require( + address(factory.gameImpls(GameTypes.PERMISSIONED_CANNON)) == address(permissionedDisputeGame()), "DF-10" + ); + require(factory.owner() == address(opChainProxyAdmin()), "DF-20"); } function assertValidDelayedWETHs(DeployOPChainInput) internal view { @@ -480,7 +502,7 @@ contract DeployOPChain is Script { vm.label(address(deployOutput.disputeGameFactoryProxy), "disputeGameFactoryProxy"); vm.label(address(deployOutput.anchorStateRegistryProxy), "anchorStateRegistryProxy"); vm.label(address(deployOutput.anchorStateRegistryImpl), "anchorStateRegistryImpl"); - vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); + // vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); vm.label(address(deployOutput.permissionedDisputeGame), "permissionedDisputeGame"); vm.label(address(deployOutput.delayedWETHPermissionedGameProxy), "delayedWETHPermissionedGameProxy"); vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); @@ -498,7 +520,7 @@ contract DeployOPChain is Script { _doo.set(_doo.disputeGameFactoryProxy.selector, address(deployOutput.disputeGameFactoryProxy)); _doo.set(_doo.anchorStateRegistryProxy.selector, address(deployOutput.anchorStateRegistryProxy)); _doo.set(_doo.anchorStateRegistryImpl.selector, address(deployOutput.anchorStateRegistryImpl)); - _doo.set(_doo.faultDisputeGame.selector, address(deployOutput.faultDisputeGame)); + // _doo.set(_doo.faultDisputeGame.selector, address(deployOutput.faultDisputeGame)); _doo.set(_doo.permissionedDisputeGame.selector, address(deployOutput.permissionedDisputeGame)); _doo.set(_doo.delayedWETHPermissionedGameProxy.selector, address(deployOutput.delayedWETHPermissionedGameProxy)); _doo.set( diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index db35c5b37429..7312208bee3e 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPStackManager.sol": { - "initCodeHash": "0x4bffecbd95e63f9bd04ab8e3c6a804cc25e0cd151ebeb7f8d6b9330332e6eb20", - "sourceCodeHash": "0x850f1eacc77f1a5c680625196618bc4b4332cb68924d9eddd57c749bedcd7c94" + "initCodeHash": "0x5b451782192b8429f6822c88270c4f0dbd10342518c5695ecf4dff7b5ebfb4e4", + "sourceCodeHash": "0x4a9c242ce96471437ec97662d2365a7bda376db765c630a41cbe238811f1df51" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPStackManager.json b/packages/contracts-bedrock/snapshots/abi/OPStackManager.json index 2ad0a4d1dc2c..9654f8f084ab 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPStackManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPStackManager.json @@ -50,6 +50,16 @@ "internalType": "address", "name": "anchorStateRegistry", "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" } ], "internalType": "struct OPStackManager.Blueprints", @@ -298,6 +308,16 @@ "internalType": "address", "name": "anchorStateRegistry", "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" } ], "internalType": "struct OPStackManager.Blueprints", @@ -499,6 +519,11 @@ "name": "EmptyInitcode", "type": "error" }, + { + "inputs": [], + "name": "IdentityPrecompileCallFailed", + "type": "error" + }, { "inputs": [], "name": "InvalidChainId", diff --git a/packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json index 2ad0a4d1dc2c..9654f8f084ab 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json @@ -50,6 +50,16 @@ "internalType": "address", "name": "anchorStateRegistry", "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" } ], "internalType": "struct OPStackManager.Blueprints", @@ -298,6 +308,16 @@ "internalType": "address", "name": "anchorStateRegistry", "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" } ], "internalType": "struct OPStackManager.Blueprints", @@ -499,6 +519,11 @@ "name": "EmptyInitcode", "type": "error" }, + { + "inputs": [], + "name": "IdentityPrecompileCallFailed", + "type": "error" + }, { "inputs": [], "name": "InvalidChainId", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json index 881871a50dd1..c22ed7c2c8da 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json @@ -13,32 +13,39 @@ "slot": "0", "type": "bool" }, - { - "bytes": "192", - "label": "blueprint", - "offset": 0, - "slot": "1", - "type": "struct OPStackManager.Blueprints" - }, { "bytes": "32", "label": "latestRelease", "offset": 0, - "slot": "7", + "slot": "1", "type": "string" }, { "bytes": "32", "label": "implementations", "offset": 0, - "slot": "8", + "slot": "2", "type": "mapping(string => mapping(string => struct OPStackManager.Implementation))" }, { "bytes": "32", "label": "systemConfigs", "offset": 0, - "slot": "9", + "slot": "3", "type": "mapping(uint256 => contract SystemConfig)" + }, + { + "bytes": "256", + "label": "blueprint", + "offset": 0, + "slot": "4", + "type": "struct OPStackManager.Blueprints" + }, + { + "bytes": "1600", + "label": "__gap", + "offset": 0, + "slot": "12", + "type": "uint256[50]" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json index 881871a50dd1..c22ed7c2c8da 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json @@ -13,32 +13,39 @@ "slot": "0", "type": "bool" }, - { - "bytes": "192", - "label": "blueprint", - "offset": 0, - "slot": "1", - "type": "struct OPStackManager.Blueprints" - }, { "bytes": "32", "label": "latestRelease", "offset": 0, - "slot": "7", + "slot": "1", "type": "string" }, { "bytes": "32", "label": "implementations", "offset": 0, - "slot": "8", + "slot": "2", "type": "mapping(string => mapping(string => struct OPStackManager.Implementation))" }, { "bytes": "32", "label": "systemConfigs", "offset": 0, - "slot": "9", + "slot": "3", "type": "mapping(uint256 => contract SystemConfig)" + }, + { + "bytes": "256", + "label": "blueprint", + "offset": 0, + "slot": "4", + "type": "struct OPStackManager.Blueprints" + }, + { + "bytes": "1600", + "label": "__gap", + "offset": 0, + "slot": "12", + "type": "uint256[50]" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/OPStackManager.sol b/packages/contracts-bedrock/src/L1/OPStackManager.sol index f7d71233005e..1a81430ff1e6 100644 --- a/packages/contracts-bedrock/src/L1/OPStackManager.sol +++ b/packages/contracts-bedrock/src/L1/OPStackManager.sol @@ -8,6 +8,10 @@ import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; @@ -23,7 +27,7 @@ import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; -import { GameTypes } from "src/dispute/lib/Types.sol"; +import { Claim, Duration, GameType, GameTypes } from "src/dispute/lib/Types.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; @@ -105,6 +109,8 @@ contract OPStackManager is ISemver, Initializable { address l1ChugSplashProxy; address resolvedDelegateProxy; address anchorStateRegistry; + address permissionedDisputeGame1; + address permissionedDisputeGame2; } /// @notice Inputs required when initializing the OPStackManager. To avoid 'StackTooDeep' errors, @@ -118,8 +124,8 @@ contract OPStackManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.4 - string public constant version = "1.0.0-beta.4"; + /// @custom:semver 1.0.0-beta.5 + string public constant version = "1.0.0-beta.5"; /// @notice Address of the SuperchainConfig contract shared by all chains. SuperchainConfig public immutable superchainConfig; @@ -127,12 +133,6 @@ contract OPStackManager is ISemver, Initializable { /// @notice Address of the ProtocolVersions contract shared by all chains. ProtocolVersions public immutable protocolVersions; - /// @notice Addresses of the Blueprint contracts. - /// This is internal because if public the autogenerated getter method would return a tuple of - /// addresses, but we want it to return a struct. This is also set via `initialize` because - /// we can't make this an immutable variable as it is a non-value type. - Blueprints internal blueprint; - /// @notice The latest release of the OP Stack Manager, as a string of the format `op-contracts/vX.Y.Z`. string public latestRelease; @@ -142,6 +142,16 @@ contract OPStackManager is ISemver, Initializable { /// @notice Maps an L2 Chain ID to the SystemConfig for that chain. mapping(uint256 => SystemConfig) public systemConfigs; + /// @notice Addresses of the Blueprint contracts. + /// This is internal because if public the autogenerated getter method would return a tuple of + /// addresses, but we want it to return a struct. This is also set via `initialize` because + /// we can't make this an immutable variable as it is a non-value type. + Blueprints internal blueprint; + + /// @notice Storage gap for future modifications, so we can expand the number of blueprints + /// without affecting other storage variables. + uint256[50] private __gap; + // -------- Events -------- /// @notice Emitted when a new OP Stack chain is deployed. @@ -206,16 +216,6 @@ contract OPStackManager is ISemver, Initializable { bytes32 salt = bytes32(_input.l2ChainId); DeployOutput memory output; - // -------- TODO: Placeholders -------- - // For contracts we don't yet deploy, we set the outputs to dummy proxies so they have code to pass assertions. - // We do these first, that way the disputeGameFactoryProxy is set when passed to the SystemConfig input. - output.faultDisputeGame = FaultDisputeGame(deployProxy(l2ChainId, output.opChainProxyAdmin, "5")); - output.permissionedDisputeGame = PermissionedDisputeGame(deployProxy(l2ChainId, output.opChainProxyAdmin, "6")); - output.delayedWETHPermissionedGameProxy = - DelayedWETH(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "7"))); - output.delayedWETHPermissionlessGameProxy = - DelayedWETH(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "8"))); - // -------- Deploy Chain Singletons -------- // The ProxyAdmin is the owner of all proxies for the chain. We temporarily set the owner to @@ -266,6 +266,22 @@ contract OPStackManager is ISemver, Initializable { Blueprint.deployFrom(blueprint.anchorStateRegistry, salt, abi.encode(output.disputeGameFactoryProxy)) ); + // We have two delayed WETH contracts per chain, one for each of the permissioned and permissionless games. + output.delayedWETHPermissionlessGameProxy = + DelayedWETH(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "DelayedWETHPermissionlessGame"))); + output.delayedWETHPermissionedGameProxy = + DelayedWETH(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "DelayedWETHPermissionedGame"))); + + // While not a proxy, we deploy the PermissionedDisputeGame here as well because it's bespoke per chain. + output.permissionedDisputeGame = PermissionedDisputeGame( + Blueprint.deployFrom( + blueprint.permissionedDisputeGame1, + blueprint.permissionedDisputeGame2, + salt, + encodePermissionedDisputeGameConstructor(_input, output) + ) + ); + // -------- Set and Initialize Proxy Implementations -------- Implementation memory impl; bytes memory data; @@ -294,10 +310,20 @@ contract OPStackManager is ISemver, Initializable { data = encodeL1StandardBridgeInitializer(impl.initializer, output); upgradeAndCall(output.opChainProxyAdmin, address(output.l1StandardBridgeProxy), impl.logic, data); - // TODO: also call setImplementation() once the dispute games are deployed. + impl = getLatestImplementation("DelayedWETH"); + data = encodeDelayedWETHInitializer(impl.initializer, _input); + upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionedGameProxy), impl.logic, data); + upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionlessGameProxy), impl.logic, data); + + // We set the initial owner to this contract, set game implementations, then transfer ownership. impl = getLatestImplementation("DisputeGameFactory"); data = encodeDisputeGameFactoryInitializer(impl.initializer, _input); upgradeAndCall(output.opChainProxyAdmin, address(output.disputeGameFactoryProxy), impl.logic, data); + output.disputeGameFactoryProxy.setImplementation( + GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(output.permissionedDisputeGame)) + ); + output.disputeGameFactoryProxy.setInitBond(GameTypes.PERMISSIONED_CANNON, 0.08 ether); + output.disputeGameFactoryProxy.transferOwnership(address(output.opChainProxyAdmin)); impl.logic = address(output.anchorStateRegistryImpl); impl.initializer = AnchorStateRegistry.initialize.selector; @@ -387,7 +413,11 @@ contract OPStackManager is ISemver, Initializable { _output; // TODO make GameTypes.CANNON an input once FPs are supported return abi.encodeWithSelector( - _selector, _output.disputeGameFactoryProxy, _output.systemConfigProxy, superchainConfig, GameTypes.CANNON + _selector, + _output.disputeGameFactoryProxy, + _output.systemConfigProxy, + superchainConfig, + GameTypes.PERMISSIONED_CANNON ); } @@ -463,14 +493,16 @@ contract OPStackManager is ISemver, Initializable { function encodeDisputeGameFactoryInitializer( bytes4 _selector, - DeployInput memory _input + DeployInput memory ) internal view virtual returns (bytes memory) { - return abi.encodeWithSelector(_selector, _input.roles.opChainProxyAdminOwner); + // This contract must be the initial owner so we can set game implementations, then + // ownership is transferred after. + return abi.encodeWithSelector(_selector, address(this)); } function encodeAnchorStateRegistryInitializer( @@ -488,6 +520,43 @@ contract OPStackManager is ISemver, Initializable { return abi.encodeWithSelector(_selector, startingAnchorRoots, superchainConfig); } + function encodeDelayedWETHInitializer( + bytes4 _selector, + DeployInput memory _input + ) + internal + view + virtual + returns (bytes memory) + { + return abi.encodeWithSelector(_selector, _input.roles.opChainProxyAdminOwner, superchainConfig); + } + + function encodePermissionedDisputeGameConstructor( + DeployInput memory _input, + DeployOutput memory _output + ) + internal + view + virtual + returns (bytes memory) + { + return abi.encode( + GameType.wrap(1), // Permissioned Cannon + Claim.wrap(bytes32(hex"dead")), // absolutePrestate + 73, // maxGameDepth + 30, // splitDepth + Duration.wrap(3 hours), // clockExtension + Duration.wrap(3.5 days), // maxClockDuration + IBigStepper(getLatestImplementation("MIPS").logic), + IDelayedWETH(payable(address(_output.delayedWETHPermissionedGameProxy))), + IAnchorStateRegistry(address(_output.anchorStateRegistryProxy)), + _input.l2ChainId, + _input.roles.proposer, + _input.roles.challenger + ); + } + /// @notice Returns default, standard config arguments for the SystemConfig initializer. /// This is used by subclasses to reduce code duplication. function defaultSystemConfigParams( diff --git a/packages/contracts-bedrock/src/libraries/Blueprint.sol b/packages/contracts-bedrock/src/libraries/Blueprint.sol index 2e0979e1c6bf..a7ddf1f9009b 100644 --- a/packages/contracts-bedrock/src/libraries/Blueprint.sol +++ b/packages/contracts-bedrock/src/libraries/Blueprint.sol @@ -20,6 +20,9 @@ library Blueprint { /// @notice Thrown when parsing a blueprint preamble and the resulting initcode is empty. error EmptyInitcode(); + /// @notice Thrown when call to the identity precompile fails. + error IdentityPrecompileCallFailed(); + /// @notice Thrown when parsing a blueprint preamble and the bytecode does not contain the expected prefix bytes. error NotABlueprint(); @@ -56,7 +59,7 @@ library Blueprint { /// @notice Given bytecode as a sequence of bytes, parse the blueprint preamble and deconstruct /// the bytecode into the ERC version, preamble data and initcode. Reverts if the bytecode is /// not a valid blueprint contract according to ERC-5202. - function parseBlueprintPreamble(bytes memory _bytecode) internal pure returns (Preamble memory) { + function parseBlueprintPreamble(bytes memory _bytecode) internal view returns (Preamble memory) { if (_bytecode.length < 2 || _bytecode[0] != 0xFE || _bytecode[1] != 0x71) { revert NotABlueprint(); } @@ -77,18 +80,34 @@ library Blueprint { bytes memory preambleData = new bytes(dataLength); if (nLengthBytes != 0) { uint256 dataStart = 3 + nLengthBytes; + // This loop is very small, so not worth using the identity precompile like we do with initcode below. for (uint256 i = 0; i < dataLength; i++) { preambleData[i] = _bytecode[dataStart + i]; } } + // Parsing the initcode byte-by-byte is too costly for long initcode, so we perform a staticcall + // to the identity precompile at address(0x04) to copy the initcode. uint256 initcodeStart = 3 + nLengthBytes + dataLength; - bytes memory initcode = new bytes(_bytecode.length - initcodeStart); - for (uint256 i = 0; i < initcode.length; i++) { - initcode[i] = _bytecode[initcodeStart + i]; + uint256 initcodeLength = _bytecode.length - initcodeStart; + if (initcodeLength == 0) revert EmptyInitcode(); + + bytes memory initcode = new bytes(initcodeLength); + bool success; + assembly ("memory-safe") { + // Calculate the memory address of the input data (initcode) within _bytecode. + // - add(_bytecode, 32): Moves past the length field to the start of _bytecode's data. + // - add(..., initcodeStart): Adds the offset to reach the initcode within _bytecode. + let inputData := add(add(_bytecode, 32), initcodeStart) + + // Calculate the memory address for the output data in initcode. + let outputData := add(initcode, 32) + + // Perform the staticcall to the identity precompile. + success := staticcall(gas(), 0x04, inputData, initcodeLength, outputData, initcodeLength) } - if (initcode.length == 0) revert EmptyInitcode(); + if (!success) revert IdentityPrecompileCallFailed(); return Preamble(ercVersion, preambleData, initcode); } @@ -112,6 +131,32 @@ library Blueprint { if (newContract_ == address(0)) revert DeploymentFailed(); } + /// @notice Parses the code at two target addresses as individual blueprints, concatentates them and then deploys + /// the resulting initcode with the given `_data` appended, i.e. `_data` is the ABI-encoded constructor arguments. + function deployFrom( + address _target1, + address _target2, + bytes32 _salt, + bytes memory _data + ) + internal + returns (address newContract_) + { + Preamble memory preamble1 = parseBlueprintPreamble(address(_target1).code); + if (preamble1.ercVersion != 0) revert UnsupportedERCVersion(preamble1.ercVersion); + if (preamble1.preambleData.length != 0) revert UnexpectedPreambleData(preamble1.preambleData); + + Preamble memory preamble2 = parseBlueprintPreamble(address(_target2).code); + if (preamble2.ercVersion != 0) revert UnsupportedERCVersion(preamble2.ercVersion); + if (preamble2.preambleData.length != 0) revert UnexpectedPreambleData(preamble2.preambleData); + + bytes memory initcode = bytes.concat(preamble1.initcode, preamble2.initcode, _data); + assembly ("memory-safe") { + newContract_ := create2(0, add(initcode, 0x20), mload(initcode), _salt) + } + if (newContract_ == address(0)) revert DeploymentFailed(); + } + /// @notice Convert a bytes array to a uint256. function bytesToUint(bytes memory _b) internal pure returns (uint256) { if (_b.length > 32) revert BytesArrayTooLong(); diff --git a/packages/contracts-bedrock/test/libraries/Blueprint.t.sol b/packages/contracts-bedrock/test/libraries/Blueprint.t.sol index 94a30ab99fb0..c94616a88e4f 100644 --- a/packages/contracts-bedrock/test/libraries/Blueprint.t.sol +++ b/packages/contracts-bedrock/test/libraries/Blueprint.t.sol @@ -22,7 +22,7 @@ contract BlueprintHarness { return Blueprint.blueprintDeployerBytecode(_initcode); } - function parseBlueprintPreamble(bytes memory _bytecode) public pure returns (Blueprint.Preamble memory) { + function parseBlueprintPreamble(bytes memory _bytecode) public view returns (Blueprint.Preamble memory) { return Blueprint.parseBlueprintPreamble(_bytecode); } From 43224ed7b8c8ad9d1f2fea3fe3fffd5d83a6943f Mon Sep 17 00:00:00 2001 From: Sam Stokes <35908605+bitwiseguy@users.noreply.github.com> Date: Tue, 24 Sep 2024 12:07:30 -0400 Subject: [PATCH 003/116] op-service: remove test print statement (#12083) --- op-service/client/lazy_dial.go | 1 - 1 file changed, 1 deletion(-) diff --git a/op-service/client/lazy_dial.go b/op-service/client/lazy_dial.go index 9a4e7cf6d872..9064fbe1fe09 100644 --- a/op-service/client/lazy_dial.go +++ b/op-service/client/lazy_dial.go @@ -66,7 +66,6 @@ func (l *LazyRPC) CallContext(ctx context.Context, result any, method string, ar if err := l.dial(ctx); err != nil { return err } - fmt.Println("checkpoin 1") return l.inner.CallContext(ctx, result, method, args...) } From 8b7693137fde7e31a350d6c794ab99d2d9e58910 Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Tue, 24 Sep 2024 13:39:50 -0400 Subject: [PATCH 004/116] feat: Implement release-based contract deployment (#12035) * forge install: superchain-registry v0.1.2 * fix: better clarity around when we're in a broadcast context. --- op-chain-ops/Makefile | 6 +- .../deployer/integration_test/apply_test.go | 1 + op-chain-ops/deployer/opsm/implementations.go | 2 + .../deployer/opsm/standard-versions.toml | 47 ++ op-chain-ops/deployer/opsm/standard.go | 8 + .../deployer/pipeline/implementations.go | 3 +- op-chain-ops/deployer/state/intent.go | 2 + op-chain-ops/interopgen/configs.go | 2 + op-chain-ops/interopgen/deploy.go | 1 + op-chain-ops/interopgen/deployments.go | 1 + op-chain-ops/interopgen/recipe.go | 7 +- op-chain-ops/script/cheatcodes_utilities.go | 75 +++ .../script/cheatcodes_utilities_test.go | 59 +++ packages/contracts-bedrock/.testdata/.gitkeep | 0 .../scripts/DeployImplementations.s.sol | 440 ++++++++++++++---- .../test/DeployImplementations.t.sol | 144 +++++- .../test/DeployOPChain.t.sol | 7 +- .../test/fixtures/standard-versions.toml | 47 ++ .../fixtures/test-deploy-auth-system-in.toml | 11 - .../fixtures/test-deploy-auth-system-out.toml | 1 - .../fixtures/test-deploy-superchain-in.toml | 8 - .../fixtures/test-deploy-superchain-out.toml | 5 - 22 files changed, 748 insertions(+), 129 deletions(-) create mode 100644 op-chain-ops/deployer/opsm/standard-versions.toml create mode 100644 op-chain-ops/deployer/opsm/standard.go create mode 100644 op-chain-ops/script/cheatcodes_utilities_test.go delete mode 100644 packages/contracts-bedrock/.testdata/.gitkeep create mode 100644 packages/contracts-bedrock/test/fixtures/standard-versions.toml delete mode 100644 packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-in.toml delete mode 100644 packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-out.toml delete mode 100644 packages/contracts-bedrock/test/fixtures/test-deploy-superchain-in.toml delete mode 100644 packages/contracts-bedrock/test/fixtures/test-deploy-superchain-out.toml diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index 630167f7b60e..262a989bc465 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -44,4 +44,8 @@ fuzz: go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzAliasing ./crossdomain go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzVersionedNonce ./crossdomain -.PHONY: test fuzz op-deployer \ No newline at end of file + +sync-standard-version: + curl -Lo ./deployer/opsm/standard-versions.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions.toml + +.PHONY: test fuzz op-deployer sync-standard-version \ No newline at end of file diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index b69595b67761..f3bcd5b3e4bd 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -115,6 +115,7 @@ func TestEndToEndApply(t *testing.T) { UseFaultProofs: true, FundDevAccounts: true, ContractArtifactsURL: (*state.ArtifactsURL)(artifactsURL), + ContractsRelease: "dev", Chains: []*state.ChainIntent{ { ID: id.Bytes32(), diff --git a/op-chain-ops/deployer/opsm/implementations.go b/op-chain-ops/deployer/opsm/implementations.go index ed20b55f1522..d60330440abc 100644 --- a/op-chain-ops/deployer/opsm/implementations.go +++ b/op-chain-ops/deployer/opsm/implementations.go @@ -23,6 +23,7 @@ type DeployImplementationsInput struct { UseInterop bool // if true, deploy Interop implementations SuperchainProxyAdmin common.Address + StandardVersionsToml string // contents of 'standard-versions.toml' file } func (input *DeployImplementationsInput) InputSet() bool { @@ -31,6 +32,7 @@ func (input *DeployImplementationsInput) InputSet() bool { type DeployImplementationsOutput struct { OpsmProxy common.Address + OpsmImpl common.Address DelayedWETHImpl common.Address OptimismPortalImpl common.Address PreimageOracleSingleton common.Address diff --git a/op-chain-ops/deployer/opsm/standard-versions.toml b/op-chain-ops/deployer/opsm/standard-versions.toml new file mode 100644 index 000000000000..cb4d336a7336 --- /dev/null +++ b/op-chain-ops/deployer/opsm/standard-versions.toml @@ -0,0 +1,47 @@ +standard_release = "op-contracts/v1.6.0" + +[releases] + +# Contracts which are +# * unproxied singletons: specify a standard "address" +# * proxied : specify a standard "implementation_address" +# * neither : specify neither a standard "address" nor "implementation_address" + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +[releases."op-contracts/v1.6.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } +system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } +anchor_state_registry = { version = "2.0.0" } +delayed_weth = { version = "1.1.0", implementation_address = "0x71e966Ae981d1ce531a7b6d23DC0f27B38409087" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } +fault_dispute_game = { version = "1.3.0" } +permissioned_dispute_game = { version = "1.3.0" } +mips = { version = "1.1.0", address = "0x16e83cE5Ce29BF90AD9Da06D2fE6a15d5f344ce4" } +preimage_oracle = { version = "1.1.2", address = "0x9c065e11870B891D214Bc2Da7EF1f9DDFA1BE277" } +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } +# l2_output_oracle -- This contract not used in fault proofs +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.4.0 +[releases."op-contracts/v1.4.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } +system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } +anchor_state_registry = { version = "1.0.0" } +delayed_weth = { version = "1.0.0", implementation_address = "0x97988d5624F1ba266E1da305117BCf20713bee08" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } +fault_dispute_game = { version = "1.2.0" } +permissioned_dispute_game = { version = "1.2.0" } +mips = { version = "1.0.1", address = "0x0f8EdFbDdD3c0256A80AD8C0F2560B1807873C9c" } +preimage_oracle = { version = "1.0.0", address = "0xD326E10B8186e90F4E2adc5c13a2d0C137ee8b34" } + +# MCP https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.3.0 +[releases."op-contracts/v1.3.0"] +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } +l2_output_oracle = { version = "1.8.0", implementation_address = "0xF243BEd163251380e78068d317ae10f26042B292" } +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } +optimism_portal = { version = "2.5.0", implementation_address = "0x2D778797049FE9259d947D1ED8e5442226dFB589" } +system_config = { version = "1.12.0", implementation_address = "0xba2492e52F45651B60B8B38d4Ea5E2390C64Ffb1" } diff --git a/op-chain-ops/deployer/opsm/standard.go b/op-chain-ops/deployer/opsm/standard.go new file mode 100644 index 000000000000..56f0d7ada37b --- /dev/null +++ b/op-chain-ops/deployer/opsm/standard.go @@ -0,0 +1,8 @@ +package opsm + +import "embed" + +//go:embed standard-versions.toml +var StandardVersionsData string + +var _ embed.FS diff --git a/op-chain-ops/deployer/pipeline/implementations.go b/op-chain-ops/deployer/pipeline/implementations.go index f9e125e4150b..0dcda8feea19 100644 --- a/op-chain-ops/deployer/pipeline/implementations.go +++ b/op-chain-ops/deployer/pipeline/implementations.go @@ -46,10 +46,11 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St ChallengePeriodSeconds: big.NewInt(86400), ProofMaturityDelaySeconds: big.NewInt(604800), DisputeGameFinalityDelaySeconds: big.NewInt(302400), - Release: "op-contracts/v1.6.0", + Release: intent.ContractsRelease, SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, SuperchainProxyAdmin: st.SuperchainDeployment.ProxyAdminAddress, + StandardVersionsToml: opsm.StandardVersionsData, UseInterop: false, }, ) diff --git a/op-chain-ops/deployer/state/intent.go b/op-chain-ops/deployer/state/intent.go index c737dab37dd0..17bedacd77b5 100644 --- a/op-chain-ops/deployer/state/intent.go +++ b/op-chain-ops/deployer/state/intent.go @@ -24,6 +24,8 @@ type Intent struct { ContractArtifactsURL *ArtifactsURL `json:"contractArtifactsURL" toml:"contractArtifactsURL"` + ContractsRelease string `json:"contractsVersion" toml:"contractsVersion"` + Chains []*ChainIntent `json:"chains" toml:"chains"` GlobalDeployOverrides map[string]any `json:"globalDeployOverrides" toml:"globalDeployOverrides"` diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index 0bc939ec0351..9abe9880fe8b 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -39,6 +39,8 @@ type OPSMImplementationsConfig struct { FaultProof SuperFaultProofConfig UseInterop bool // to deploy Interop implementation contracts, instead of the regular ones. + + StandardVersionsToml string // serialized string of superchain-registry 'standard-versions.toml' file } type SuperchainConfig struct { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index fde7485e04da..9da41e9894d8 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -172,6 +172,7 @@ func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, SuperchainProxyAdmin: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, + StandardVersionsToml: opsm.StandardVersionsData, }) if err != nil { return nil, fmt.Errorf("failed to deploy Implementations contracts: %w", err) diff --git a/op-chain-ops/interopgen/deployments.go b/op-chain-ops/interopgen/deployments.go index 5b54c2286f9a..b6bb124d8e85 100644 --- a/op-chain-ops/interopgen/deployments.go +++ b/op-chain-ops/interopgen/deployments.go @@ -10,6 +10,7 @@ type L1Deployment struct { type Implementations struct { OpsmProxy common.Address `json:"OPSMProxy"` + OpsmImpl common.Address `json:"OPSMImpl"` DelayedWETHImpl common.Address `json:"DelayedWETHImpl"` OptimismPortalImpl common.Address `json:"OptimismPortalImpl"` PreimageOracleSingleton common.Address `json:"PreimageOracleSingleton"` diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index a1761f9f0dc8..4dbe58e4ca1f 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" ) @@ -61,12 +62,13 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) l1Cfg.Prefund[superchainDeployer] = Ether(10_000_000) l1Cfg.Prefund[superchainProxyAdmin] = Ether(10_000_000) l1Cfg.Prefund[superchainConfigGuardian] = Ether(10_000_000) + superchainCfg := &SuperchainConfig{ ProxyAdminOwner: superchainProxyAdmin, ProtocolVersionsOwner: superchainProtocolVersionsOwner, Deployer: superchainDeployer, Implementations: OPSMImplementationsConfig{ - Release: "op-contracts/0.0.1", + Release: "dev", FaultProof: SuperFaultProofConfig{ WithdrawalDelaySeconds: big.NewInt(604800), MinProposalSizeBytes: big.NewInt(10000), @@ -74,7 +76,8 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) ProofMaturityDelaySeconds: big.NewInt(12), DisputeGameFinalityDelaySeconds: big.NewInt(6), }, - UseInterop: true, + UseInterop: true, + StandardVersionsToml: opsm.StandardVersionsData, }, SuperchainL1DeployConfig: genesis.SuperchainL1DeployConfig{ RequiredProtocolVersion: params.OPStackSupport, diff --git a/op-chain-ops/script/cheatcodes_utilities.go b/op-chain-ops/script/cheatcodes_utilities.go index 2ae53a52e910..022befa60627 100644 --- a/op-chain-ops/script/cheatcodes_utilities.go +++ b/op-chain-ops/script/cheatcodes_utilities.go @@ -3,9 +3,12 @@ package script import ( "fmt" "math/big" + "regexp" "strconv" "strings" + "github.com/BurntSushi/toml" + hdwallet "github.com/ethereum-optimism/go-ethereum-hdwallet" "github.com/ethereum/go-ethereum/accounts" @@ -188,5 +191,77 @@ func (c *CheatCodesPrecompile) Breakpoint_f7d39a8d(name string, v bool) { } } +// ParseTomlAddress_65e7c844 implements https://book.getfoundry.sh/cheatcodes/parse-toml. This +// method is not well optimized or implemented. It's optimized for quickly delivering OPCM. We +// can come back and clean it up more later. +func (c *CheatCodesPrecompile) ParseTomlAddress_65e7c844(tomlStr string, key string) (common.Address, error) { + var data map[string]any + if err := toml.Unmarshal([]byte(tomlStr), &data); err != nil { + return common.Address{}, fmt.Errorf("failed to parse TOML: %w", err) + } + + keys, err := SplitJSONPathKeys(key) + if err != nil { + return common.Address{}, fmt.Errorf("failed to split keys: %w", err) + } + + loc := data + for i, k := range keys { + value, ok := loc[k] + if !ok { + return common.Address{}, fmt.Errorf("key %q not found in TOML", k) + } + + if i == len(keys)-1 { + addrStr, ok := value.(string) + if !ok { + return common.Address{}, fmt.Errorf("key %q is not a string", key) + } + if !common.IsHexAddress(addrStr) { + return common.Address{}, fmt.Errorf("key %q is not a valid address", key) + } + return common.HexToAddress(addrStr), nil + } + + next, ok := value.(map[string]any) + if !ok { + return common.Address{}, fmt.Errorf("key %q is not a nested map", key) + } + loc = next + } + + panic("should never get here") +} + // unsupported //func (c *CheatCodesPrecompile) CreateWallet() {} + +// SplitJSONPathKeys splits a JSON path into keys. It supports bracket notation. There is a much +// better way to implement this, but I'm keeping this simple for now. +func SplitJSONPathKeys(path string) ([]string, error) { + var out []string + bracketSplit := regexp.MustCompile(`[\[\]]`).Split(path, -1) + for _, split := range bracketSplit { + if len(split) == 0 { + continue + } + + split = strings.ReplaceAll(split, "\"", "") + split = strings.ReplaceAll(split, " ", "") + + if !strings.HasPrefix(split, ".") { + out = append(out, split) + continue + } + + keys := strings.Split(split, ".") + for _, key := range keys { + if len(key) == 0 { + continue + } + out = append(out, key) + } + } + + return out, nil +} diff --git a/op-chain-ops/script/cheatcodes_utilities_test.go b/op-chain-ops/script/cheatcodes_utilities_test.go new file mode 100644 index 000000000000..23936a10e344 --- /dev/null +++ b/op-chain-ops/script/cheatcodes_utilities_test.go @@ -0,0 +1,59 @@ +package script + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +const tomlTest = ` +foo = "0x0d4CE7B6a91A35c31D7D62b327D19617c8da6F23" + +[foomap] +[foomap."bar.bump"] +baz = "0xff4ce7b6a91a35c31d7d62b327d19617c8da6f23" +` + +func TestSplitJSONPathKeys(t *testing.T) { + tests := []struct { + name string + path string + expected []string + }{ + { + "simple", + ".foo.bar", + []string{"foo", "bar"}, + }, + { + "bracket keys", + ".foo[\"hey\"].bar", + []string{"foo", "hey", "bar"}, + }, + { + "bracket keys with dots", + ".foo[\"hey.there\"].bar", + []string{"foo", "hey.there", "bar"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SplitJSONPathKeys(tt.path) + require.NoError(t, err) + require.Equal(t, tt.expected, got) + }) + } +} + +func TestParseTomlAddress(t *testing.T) { + c := &CheatCodesPrecompile{} + + addr, err := c.ParseTomlAddress_65e7c844(tomlTest, "foo") + require.NoError(t, err) + require.Equal(t, common.HexToAddress("0x0d4ce7b6a91a35c31d7d62b327d19617c8da6f23"), addr) + + addr, err = c.ParseTomlAddress_65e7c844(tomlTest, "foomap[\"bar.bump\"].baz") + require.NoError(t, err) + require.Equal(t, common.HexToAddress("0xff4ce7b6a91a35c31d7d62b327d19617c8da6f23"), addr) +} diff --git a/packages/contracts-bedrock/.testdata/.gitkeep b/packages/contracts-bedrock/.testdata/.gitkeep deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 81cafa89c272..df950fed71f2 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -62,6 +62,8 @@ contract DeployImplementationsInput is BaseDeployIO { SuperchainConfig internal _superchainConfigProxy; ProtocolVersions internal _protocolVersionsProxy; + string internal _standardVersionsToml; + function set(bytes4 sel, uint256 _value) public { require(_value != 0, "DeployImplementationsInput: cannot set zero value"); @@ -84,6 +86,7 @@ contract DeployImplementationsInput is BaseDeployIO { function set(bytes4 sel, string memory _value) public { require(!LibString.eq(_value, ""), "DeployImplementationsInput: cannot set empty string"); if (sel == this.release.selector) _release = _value; + else if (sel == this.standardVersionsToml.selector) _standardVersionsToml = _value; else revert("DeployImplementationsInput: unknown selector"); } @@ -137,6 +140,11 @@ contract DeployImplementationsInput is BaseDeployIO { return _release; } + function standardVersionsToml() public view returns (string memory) { + require(!LibString.eq(_standardVersionsToml, ""), "DeployImplementationsInput: not set"); + return _standardVersionsToml; + } + function superchainConfigProxy() public view returns (SuperchainConfig) { require(address(_superchainConfigProxy) != address(0), "DeployImplementationsInput: not set"); return _superchainConfigProxy; @@ -159,6 +167,7 @@ contract DeployImplementationsInput is BaseDeployIO { contract DeployImplementationsOutput is BaseDeployIO { OPStackManager internal _opsmProxy; + OPStackManager internal _opsmImpl; DelayedWETH internal _delayedWETHImpl; OptimismPortal2 internal _optimismPortalImpl; PreimageOracle internal _preimageOracleSingleton; @@ -175,6 +184,7 @@ contract DeployImplementationsOutput is BaseDeployIO { // forgefmt: disable-start if (sel == this.opsmProxy.selector) _opsmProxy = OPStackManager(payable(_addr)); + else if (sel == this.opsmImpl.selector) _opsmImpl = OPStackManager(payable(_addr)); else if (sel == this.optimismPortalImpl.selector) _optimismPortalImpl = OptimismPortal2(payable(_addr)); else if (sel == this.delayedWETHImpl.selector) _delayedWETHImpl = DelayedWETH(payable(_addr)); else if (sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = PreimageOracle(_addr); @@ -190,12 +200,18 @@ contract DeployImplementationsOutput is BaseDeployIO { } function checkOutput(DeployImplementationsInput _dii) public { - address[] memory addrs = Solarray.addresses( + // With 12 addresses, we'd get a stack too deep error if we tried to do this inline as a + // single call to `Solarray.addresses`. So we split it into two calls. + address[] memory addrs1 = Solarray.addresses( address(this.opsmProxy()), + address(this.opsmImpl()), address(this.optimismPortalImpl()), address(this.delayedWETHImpl()), address(this.preimageOracleSingleton()), - address(this.mipsSingleton()), + address(this.mipsSingleton()) + ); + + address[] memory addrs2 = Solarray.addresses( address(this.systemConfigImpl()), address(this.l1CrossDomainMessengerImpl()), address(this.l1ERC721BridgeImpl()), @@ -203,7 +219,8 @@ contract DeployImplementationsOutput is BaseDeployIO { address(this.optimismMintableERC20FactoryImpl()), address(this.disputeGameFactoryImpl()) ); - DeployUtils.assertValidContractAddresses(addrs); + + DeployUtils.assertValidContractAddresses(Solarray.extend(addrs1, addrs2)); assertValidDeploy(_dii); } @@ -214,6 +231,11 @@ contract DeployImplementationsOutput is BaseDeployIO { return _opsmProxy; } + function opsmImpl() public view returns (OPStackManager) { + DeployUtils.assertValidContractAddress(address(_opsmImpl)); + return _opsmImpl; + } + function optimismPortalImpl() public view returns (OptimismPortal2) { DeployUtils.assertValidContractAddress(address(_optimismPortalImpl)); return _optimismPortalImpl; @@ -474,26 +496,30 @@ contract DeployImplementations is Script { // Deploy and initialize a proxied OPStackManager. function createOPSMContract( DeployImplementationsInput _dii, - DeployImplementationsOutput, - OPStackManager.Blueprints memory blueprints, - string memory release, - OPStackManager.ImplementationSetter[] memory setters + DeployImplementationsOutput _dio, + OPStackManager.Blueprints memory _blueprints, + string memory _release, + OPStackManager.ImplementationSetter[] memory _setters ) internal virtual returns (OPStackManager opsmProxy_) { - SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); - vm.startBroadcast(msg.sender); + vm.broadcast(msg.sender); Proxy proxy = new Proxy(address(msg.sender)); - OPStackManager opsm = new OPStackManager(superchainConfigProxy, protocolVersionsProxy); + + deployOPContractsManagerImpl(_dii, _dio); + OPStackManager opsmImpl = _dio.opsmImpl(); OPStackManager.InitializerInputs memory initializerInputs = - OPStackManager.InitializerInputs(blueprints, setters, release, true); - proxy.upgradeToAndCall(address(opsm), abi.encodeWithSelector(opsm.initialize.selector, initializerInputs)); + OPStackManager.InitializerInputs(_blueprints, _setters, _release, true); + + vm.startBroadcast(msg.sender); + proxy.upgradeToAndCall( + address(opsmImpl), abi.encodeWithSelector(opsmImpl.initialize.selector, initializerInputs) + ); proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); @@ -572,56 +598,148 @@ contract DeployImplementations is Script { // --- Core Contracts --- - function deploySystemConfigImpl(DeployImplementationsInput, DeployImplementationsOutput _dio) public virtual { - vm.broadcast(msg.sender); - SystemConfig systemConfigImpl = new SystemConfig(); + function deploySystemConfigImpl(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + // Using snake case for contract name to match the TOML file in superchain-registry. + string memory contractName = "system_config"; + SystemConfig impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = SystemConfig(existingImplementation); + } else if (isDevelopRelease(release)) { + // Deploy a new implementation for development builds. + vm.broadcast(msg.sender); + impl = new SystemConfig(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(systemConfigImpl), "SystemConfigImpl"); - _dio.set(_dio.systemConfigImpl.selector, address(systemConfigImpl)); + vm.label(address(impl), "SystemConfigImpl"); + _dio.set(_dio.systemConfigImpl.selector, address(impl)); } function deployL1CrossDomainMessengerImpl( - DeployImplementationsInput, + DeployImplementationsInput _dii, DeployImplementationsOutput _dio ) public virtual { - vm.broadcast(msg.sender); - L1CrossDomainMessenger l1CrossDomainMessengerImpl = new L1CrossDomainMessenger(); + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "l1_cross_domain_messenger"; + L1CrossDomainMessenger impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = L1CrossDomainMessenger(existingImplementation); + } else if (isDevelopRelease(release)) { + vm.broadcast(msg.sender); + impl = new L1CrossDomainMessenger(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(l1CrossDomainMessengerImpl), "L1CrossDomainMessengerImpl"); - _dio.set(_dio.l1CrossDomainMessengerImpl.selector, address(l1CrossDomainMessengerImpl)); + vm.label(address(impl), "L1CrossDomainMessengerImpl"); + _dio.set(_dio.l1CrossDomainMessengerImpl.selector, address(impl)); } - function deployL1ERC721BridgeImpl(DeployImplementationsInput, DeployImplementationsOutput _dio) public virtual { - vm.broadcast(msg.sender); - L1ERC721Bridge l1ERC721BridgeImpl = new L1ERC721Bridge(); + function deployL1ERC721BridgeImpl( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + public + virtual + { + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "l1_erc721_bridge"; + L1ERC721Bridge impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = L1ERC721Bridge(existingImplementation); + } else if (isDevelopRelease(release)) { + vm.broadcast(msg.sender); + impl = new L1ERC721Bridge(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(l1ERC721BridgeImpl), "L1ERC721BridgeImpl"); - _dio.set(_dio.l1ERC721BridgeImpl.selector, address(l1ERC721BridgeImpl)); + vm.label(address(impl), "L1ERC721BridgeImpl"); + _dio.set(_dio.l1ERC721BridgeImpl.selector, address(impl)); } - function deployL1StandardBridgeImpl(DeployImplementationsInput, DeployImplementationsOutput _dio) public virtual { - vm.broadcast(msg.sender); - L1StandardBridge l1StandardBridgeImpl = new L1StandardBridge(); + function deployL1StandardBridgeImpl( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + public + virtual + { + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "l1_standard_bridge"; + L1StandardBridge impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = L1StandardBridge(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + vm.broadcast(msg.sender); + impl = new L1StandardBridge(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(l1StandardBridgeImpl), "L1StandardBridgeImpl"); - _dio.set(_dio.l1StandardBridgeImpl.selector, address(l1StandardBridgeImpl)); + vm.label(address(impl), "L1StandardBridgeImpl"); + _dio.set(_dio.l1StandardBridgeImpl.selector, address(impl)); } function deployOptimismMintableERC20FactoryImpl( - DeployImplementationsInput, + DeployImplementationsInput _dii, DeployImplementationsOutput _dio ) public virtual { + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "optimism_mintable_erc20_factory"; + OptimismMintableERC20Factory impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = OptimismMintableERC20Factory(existingImplementation); + } else if (isDevelopRelease(release)) { + vm.broadcast(msg.sender); + impl = new OptimismMintableERC20Factory(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } + + vm.label(address(impl), "OptimismMintableERC20FactoryImpl"); + _dio.set(_dio.optimismMintableERC20FactoryImpl.selector, address(impl)); + } + + function deployOPContractsManagerImpl( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + public + virtual + { + SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + vm.broadcast(msg.sender); - OptimismMintableERC20Factory optimismMintableERC20FactoryImpl = new OptimismMintableERC20Factory(); + // TODO: Eventually we will want to select the correct implementation based on the release. + OPStackManager impl = new OPStackManager(superchainConfigProxy, protocolVersionsProxy); - vm.label(address(optimismMintableERC20FactoryImpl), "OptimismMintableERC20FactoryImpl"); - _dio.set(_dio.optimismMintableERC20FactoryImpl.selector, address(optimismMintableERC20FactoryImpl)); + vm.label(address(impl), "OPStackManagerImpl"); + _dio.set(_dio.opsmImpl.selector, address(impl)); } // --- Fault Proofs Contracts --- @@ -659,27 +777,46 @@ contract DeployImplementations is Script { public virtual { - uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); - uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); - - vm.broadcast(msg.sender); - OptimismPortal2 optimismPortalImpl = new OptimismPortal2({ - _proofMaturityDelaySeconds: proofMaturityDelaySeconds, - _disputeGameFinalityDelaySeconds: disputeGameFinalityDelaySeconds - }); + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "optimism_portal"; + OptimismPortal2 impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = OptimismPortal2(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); + uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); + vm.broadcast(msg.sender); + impl = new OptimismPortal2(proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(optimismPortalImpl), "OptimismPortalImpl"); - _dio.set(_dio.optimismPortalImpl.selector, address(optimismPortalImpl)); + vm.label(address(impl), "OptimismPortalImpl"); + _dio.set(_dio.optimismPortalImpl.selector, address(impl)); } function deployDelayedWETHImpl(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - uint256 withdrawalDelaySeconds = _dii.withdrawalDelaySeconds(); - - vm.broadcast(msg.sender); - DelayedWETH delayedWETHImpl = new DelayedWETH({ _delay: withdrawalDelaySeconds }); + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "delayed_weth"; + DelayedWETH impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = DelayedWETH(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + uint256 withdrawalDelaySeconds = _dii.withdrawalDelaySeconds(); + vm.broadcast(msg.sender); + impl = new DelayedWETH(withdrawalDelaySeconds); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(delayedWETHImpl), "DelayedWETHImpl"); - _dio.set(_dio.delayedWETHImpl.selector, address(delayedWETHImpl)); + vm.label(address(impl), "DelayedWETHImpl"); + _dio.set(_dio.delayedWETHImpl.selector, address(impl)); } function deployPreimageOracleSingleton( @@ -689,39 +826,72 @@ contract DeployImplementations is Script { public virtual { - uint256 minProposalSizeBytes = _dii.minProposalSizeBytes(); - uint256 challengePeriodSeconds = _dii.challengePeriodSeconds(); - - vm.broadcast(msg.sender); - PreimageOracle preimageOracleSingleton = - new PreimageOracle({ _minProposalSize: minProposalSizeBytes, _challengePeriod: challengePeriodSeconds }); + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "preimage_oracle"; + PreimageOracle singleton; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + singleton = PreimageOracle(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + uint256 minProposalSizeBytes = _dii.minProposalSizeBytes(); + uint256 challengePeriodSeconds = _dii.challengePeriodSeconds(); + vm.broadcast(msg.sender); + singleton = new PreimageOracle(minProposalSizeBytes, challengePeriodSeconds); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(preimageOracleSingleton), "PreimageOracleSingleton"); - _dio.set(_dio.preimageOracleSingleton.selector, address(preimageOracleSingleton)); + vm.label(address(singleton), "PreimageOracleSingleton"); + _dio.set(_dio.preimageOracleSingleton.selector, address(singleton)); } - function deployMipsSingleton(DeployImplementationsInput, DeployImplementationsOutput _dio) public virtual { - IPreimageOracle preimageOracle = IPreimageOracle(_dio.preimageOracleSingleton()); - - vm.broadcast(msg.sender); - MIPS mipsSingleton = new MIPS(preimageOracle); + function deployMipsSingleton(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "mips"; + MIPS singleton; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + singleton = MIPS(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + IPreimageOracle preimageOracle = IPreimageOracle(_dio.preimageOracleSingleton()); + vm.broadcast(msg.sender); + singleton = new MIPS(preimageOracle); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(mipsSingleton), "MIPSSingleton"); - _dio.set(_dio.mipsSingleton.selector, address(mipsSingleton)); + vm.label(address(singleton), "MIPSSingleton"); + _dio.set(_dio.mipsSingleton.selector, address(singleton)); } function deployDisputeGameFactoryImpl( - DeployImplementationsInput, + DeployImplementationsInput _dii, DeployImplementationsOutput _dio ) public virtual { - vm.broadcast(msg.sender); - DisputeGameFactory disputeGameFactoryImpl = new DisputeGameFactory(); + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "dispute_game_factory"; + DisputeGameFactory impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = DisputeGameFactory(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + vm.broadcast(msg.sender); + impl = new DisputeGameFactory(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.label(address(disputeGameFactoryImpl), "DisputeGameFactoryImpl"); - _dio.set(_dio.disputeGameFactoryImpl.selector, address(disputeGameFactoryImpl)); + vm.label(address(impl), "DisputeGameFactoryImpl"); + _dio.set(_dio.disputeGameFactoryImpl.selector, address(impl)); } // -------- Utilities -------- @@ -763,6 +933,35 @@ contract DeployImplementations is Script { newContract1_ = deployBytecode(part1, _salt); newContract2_ = deployBytecode(part2, _salt); } + + // Zero address is returned if the address is not found in '_standardVersionsToml'. + function getReleaseAddress( + string memory _version, + string memory _contractName, + string memory _standardVersionsToml + ) + internal + pure + returns (address addr_) + { + string memory baseKey = string.concat('.releases["', _version, '"].', _contractName); + string memory implAddressKey = string.concat(baseKey, ".implementation_address"); + string memory addressKey = string.concat(baseKey, ".address"); + try vm.parseTomlAddress(_standardVersionsToml, implAddressKey) returns (address parsedAddr_) { + addr_ = parsedAddr_; + } catch { + try vm.parseTomlAddress(_standardVersionsToml, addressKey) returns (address parsedAddr_) { + addr_ = parsedAddr_; + } catch { + addr_ = address(0); + } + } + } + + // A release is considered a 'develop' release if it does not start with 'op-contracts'. + function isDevelopRelease(string memory _release) internal pure returns (bool) { + return !LibString.startsWith(_release, "op-contracts"); + } } // Similar to how DeploySuperchain.s.sol contains a lot of comments to thoroughly document the script @@ -800,26 +999,30 @@ contract DeployImplementations is Script { contract DeployImplementationsInterop is DeployImplementations { function createOPSMContract( DeployImplementationsInput _dii, - DeployImplementationsOutput, - OPStackManager.Blueprints memory blueprints, - string memory release, - OPStackManager.ImplementationSetter[] memory setters + DeployImplementationsOutput _dio, + OPStackManager.Blueprints memory _blueprints, + string memory _release, + OPStackManager.ImplementationSetter[] memory _setters ) internal override returns (OPStackManager opsmProxy_) { - SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); - vm.startBroadcast(msg.sender); + vm.broadcast(msg.sender); Proxy proxy = new Proxy(address(msg.sender)); - OPStackManager opsm = new OPStackManagerInterop(superchainConfigProxy, protocolVersionsProxy); + + deployOPContractsManagerImpl(_dii, _dio); // overriding function + OPStackManager opsmImpl = _dio.opsmImpl(); OPStackManager.InitializerInputs memory initializerInputs = - OPStackManager.InitializerInputs(blueprints, setters, release, true); - proxy.upgradeToAndCall(address(opsm), abi.encodeWithSelector(opsm.initialize.selector, initializerInputs)); + OPStackManager.InitializerInputs(_blueprints, _setters, _release, true); + + vm.startBroadcast(msg.sender); + proxy.upgradeToAndCall( + address(opsmImpl), abi.encodeWithSelector(opsmImpl.initialize.selector, initializerInputs) + ); proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); @@ -834,25 +1037,70 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); - uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "optimism_portal"; + OptimismPortal2 impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = OptimismPortalInterop(payable(existingImplementation)); + } else if (isDevelopRelease(release)) { + uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); + uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); + vm.broadcast(msg.sender); + impl = new OptimismPortalInterop(proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } - vm.broadcast(msg.sender); - OptimismPortalInterop optimismPortalImpl = new OptimismPortalInterop({ - _proofMaturityDelaySeconds: proofMaturityDelaySeconds, - _disputeGameFinalityDelaySeconds: disputeGameFinalityDelaySeconds - }); + vm.label(address(impl), "OptimismPortalImpl"); + _dio.set(_dio.optimismPortalImpl.selector, address(impl)); + } - vm.label(address(optimismPortalImpl), "OptimismPortalImpl"); - _dio.set(_dio.optimismPortalImpl.selector, address(optimismPortalImpl)); + function deploySystemConfigImpl( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + public + override + { + string memory release = _dii.release(); + string memory stdVerToml = _dii.standardVersionsToml(); + + string memory contractName = "system_config"; + SystemConfig impl; + + address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); + if (existingImplementation != address(0)) { + impl = SystemConfigInterop(existingImplementation); + } else if (isDevelopRelease(release)) { + vm.broadcast(msg.sender); + impl = new SystemConfigInterop(); + } else { + revert(string.concat("DeployImplementations: failed to deploy release ", release)); + } + + vm.label(address(impl), "SystemConfigImpl"); + _dio.set(_dio.systemConfigImpl.selector, address(impl)); } - function deploySystemConfigImpl(DeployImplementationsInput, DeployImplementationsOutput _dio) public override { + function deployOPContractsManagerImpl( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + public + override + { + SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + vm.broadcast(msg.sender); - SystemConfigInterop systemConfigImpl = new SystemConfigInterop(); + // TODO: Eventually we will want to select the correct implementation based on the release. + OPStackManager impl = new OPStackManagerInterop(superchainConfigProxy, protocolVersionsProxy); - vm.label(address(systemConfigImpl), "SystemConfigImpl"); - _dio.set(_dio.systemConfigImpl.selector, address(systemConfigImpl)); + vm.label(address(impl), "OPStackManagerImpl"); + _dio.set(_dio.opsmImpl.selector, address(impl)); } function opsmSystemConfigSetter( diff --git a/packages/contracts-bedrock/test/DeployImplementations.t.sol b/packages/contracts-bedrock/test/DeployImplementations.t.sol index 957eb2830435..1eac67c6fdf3 100644 --- a/packages/contracts-bedrock/test/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/DeployImplementations.t.sol @@ -35,7 +35,7 @@ contract DeployImplementationsInput_Test is Test { uint256 challengePeriodSeconds = 300; uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; - string release = "op-contracts/latest"; + string release = "dev-release"; // this means implementation contracts will be deployed SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfigProxy")); ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersionsProxy")); @@ -70,6 +70,9 @@ contract DeployImplementationsInput_Test is Test { vm.expectRevert("DeployImplementationsInput: not set"); dii.superchainProxyAdmin(); + + vm.expectRevert("DeployImplementationsInput: not set"); + dii.standardVersionsToml(); } function test_superchainProxyAdmin_whenNotSet_reverts() public { @@ -247,13 +250,18 @@ contract DeployImplementations_Test is Test { uint256 challengePeriodSeconds = 300; uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; - string release = "op-contracts/latest"; SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfigProxy")); ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersionsProxy")); function setUp() public virtual { deployImplementations = new DeployImplementations(); (dii, dio) = deployImplementations.etchIOContracts(); + + // End users of the DeployImplementations contract will need to set the `standardVersionsToml`. + string memory standardVersionsTomlPath = + string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml"); + string memory standardVersionsToml = vm.readFile(standardVersionsTomlPath); + dii.set(dii.standardVersionsToml.selector, standardVersionsToml); } // By deploying the `DeployImplementations` contract with this virtual function, we provide a @@ -267,13 +275,142 @@ contract DeployImplementations_Test is Test { return keccak256(abi.encode(_seed, _i)); } + function test_deployImplementation_succeeds() public { + string memory deployContractsRelease = "dev-release"; + dii.set(dii.release.selector, deployContractsRelease); + deployImplementations.deploySystemConfigImpl(dii, dio); + assertTrue(address(0) != address(dio.systemConfigImpl())); + } + + function test_reuseImplementation_succeeds() public { + // All hardcoded addresses below are taken from the superchain-registry config: + // https://github.com/ethereum-optimism/superchain-registry/blob/be65d22f8128cf0c4e5b4e1f677daf86843426bf/validation/standard/standard-versions.toml#L11 + string memory testRelease = "op-contracts/v1.6.0"; + dii.set(dii.release.selector, testRelease); + + deployImplementations.deploySystemConfigImpl(dii, dio); + address srSystemConfigImpl = address(0xF56D96B2535B932656d3c04Ebf51baBff241D886); + vm.etch(address(srSystemConfigImpl), hex"01"); + assertEq(srSystemConfigImpl, address(dio.systemConfigImpl())); + + address srL1CrossDomainMessengerImpl = address(0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65); + vm.etch(address(srL1CrossDomainMessengerImpl), hex"01"); + deployImplementations.deployL1CrossDomainMessengerImpl(dii, dio); + assertEq(srL1CrossDomainMessengerImpl, address(dio.l1CrossDomainMessengerImpl())); + + address srL1ERC721BridgeImpl = address(0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d); + vm.etch(address(srL1ERC721BridgeImpl), hex"01"); + deployImplementations.deployL1ERC721BridgeImpl(dii, dio); + assertEq(srL1ERC721BridgeImpl, address(dio.l1ERC721BridgeImpl())); + + address srL1StandardBridgeImpl = address(0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF); + vm.etch(address(srL1StandardBridgeImpl), hex"01"); + deployImplementations.deployL1StandardBridgeImpl(dii, dio); + assertEq(srL1StandardBridgeImpl, address(dio.l1StandardBridgeImpl())); + + address srOptimismMintableERC20FactoryImpl = address(0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846); + vm.etch(address(srOptimismMintableERC20FactoryImpl), hex"01"); + deployImplementations.deployOptimismMintableERC20FactoryImpl(dii, dio); + assertEq(srOptimismMintableERC20FactoryImpl, address(dio.optimismMintableERC20FactoryImpl())); + + address srOptimismPortalImpl = address(0xe2F826324b2faf99E513D16D266c3F80aE87832B); + vm.etch(address(srOptimismPortalImpl), hex"01"); + deployImplementations.deployOptimismPortalImpl(dii, dio); + assertEq(srOptimismPortalImpl, address(dio.optimismPortalImpl())); + + address srDelayedWETHImpl = address(0x71e966Ae981d1ce531a7b6d23DC0f27B38409087); + vm.etch(address(srDelayedWETHImpl), hex"01"); + deployImplementations.deployDelayedWETHImpl(dii, dio); + assertEq(srDelayedWETHImpl, address(dio.delayedWETHImpl())); + + address srPreimageOracleSingleton = address(0x9c065e11870B891D214Bc2Da7EF1f9DDFA1BE277); + vm.etch(address(srPreimageOracleSingleton), hex"01"); + deployImplementations.deployPreimageOracleSingleton(dii, dio); + assertEq(srPreimageOracleSingleton, address(dio.preimageOracleSingleton())); + + address srMipsSingleton = address(0x16e83cE5Ce29BF90AD9Da06D2fE6a15d5f344ce4); + vm.etch(address(srMipsSingleton), hex"01"); + deployImplementations.deployMipsSingleton(dii, dio); + assertEq(srMipsSingleton, address(dio.mipsSingleton())); + + address srDisputeGameFactoryImpl = address(0xc641A33cab81C559F2bd4b21EA34C290E2440C2B); + vm.etch(address(srDisputeGameFactoryImpl), hex"01"); + deployImplementations.deployDisputeGameFactoryImpl(dii, dio); + assertEq(srDisputeGameFactoryImpl, address(dio.disputeGameFactoryImpl())); + } + + function test_deployAtNonExistentRelease_reverts() public { + string memory unknownRelease = "op-contracts/v0.0.0"; + dii.set(dii.release.selector, unknownRelease); + + bytes memory expectedErr = + bytes(string.concat("DeployImplementations: failed to deploy release ", unknownRelease)); + + vm.expectRevert(expectedErr); + deployImplementations.deploySystemConfigImpl(dii, dio); + + vm.expectRevert(expectedErr); + deployImplementations.deployL1CrossDomainMessengerImpl(dii, dio); + + vm.expectRevert(expectedErr); + deployImplementations.deployL1ERC721BridgeImpl(dii, dio); + + vm.expectRevert(expectedErr); + deployImplementations.deployL1StandardBridgeImpl(dii, dio); + + vm.expectRevert(expectedErr); + deployImplementations.deployOptimismMintableERC20FactoryImpl(dii, dio); + + // TODO: Uncomment the code below when OPContractsManager is deployed based on release. Superchain-registry + // doesn't contain OPContractsManager yet. + // dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); + // dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); + // vm.etch(address(superchainConfigProxy), hex"01"); + // vm.etch(address(protocolVersionsProxy), hex"01"); + // vm.expectRevert(expectedErr); + // deployImplementations.deployOPContractsManagerImpl(dii, dio); + + dii.set(dii.proofMaturityDelaySeconds.selector, 1); + dii.set(dii.disputeGameFinalityDelaySeconds.selector, 2); + vm.expectRevert(expectedErr); + deployImplementations.deployOptimismPortalImpl(dii, dio); + + dii.set(dii.withdrawalDelaySeconds.selector, 1); + vm.expectRevert(expectedErr); + deployImplementations.deployDelayedWETHImpl(dii, dio); + + dii.set(dii.minProposalSizeBytes.selector, 1); + dii.set(dii.challengePeriodSeconds.selector, 2); + vm.expectRevert(expectedErr); + deployImplementations.deployPreimageOracleSingleton(dii, dio); + + address preImageOracleSingleton = makeAddr("preImageOracleSingleton"); + vm.etch(address(preImageOracleSingleton), hex"01"); + dio.set(dio.preimageOracleSingleton.selector, preImageOracleSingleton); + vm.expectRevert(expectedErr); + deployImplementations.deployMipsSingleton(dii, dio); + + vm.expectRevert(expectedErr); // fault proof contracts don't exist at this release + deployImplementations.deployDisputeGameFactoryImpl(dii, dio); + } + + function test_noContractExistsAtRelease_reverts() public { + string memory unknownRelease = "op-contracts/v1.3.0"; + dii.set(dii.release.selector, unknownRelease); + bytes memory expectedErr = + bytes(string.concat("DeployImplementations: failed to deploy release ", unknownRelease)); + + vm.expectRevert(expectedErr); // fault proof contracts don't exist at this release + deployImplementations.deployDisputeGameFactoryImpl(dii, dio); + } + function testFuzz_run_memory_succeeds(bytes32 _seed) public { withdrawalDelaySeconds = uint256(hash(_seed, 0)); minProposalSizeBytes = uint256(hash(_seed, 1)); challengePeriodSeconds = bound(uint256(hash(_seed, 2)), 0, type(uint64).max); proofMaturityDelaySeconds = uint256(hash(_seed, 3)); disputeGameFinalityDelaySeconds = uint256(hash(_seed, 4)); - release = string(bytes.concat(hash(_seed, 5))); + string memory release = string(bytes.concat(hash(_seed, 5))); protocolVersionsProxy = ProtocolVersions(address(uint160(uint256(hash(_seed, 7))))); // Must configure the ProxyAdmin contract which is used to upgrade the OPSM's proxy contract. @@ -325,6 +462,7 @@ contract DeployImplementations_Test is Test { dii.set(dii.challengePeriodSeconds.selector, challengePeriodSeconds); dii.set(dii.proofMaturityDelaySeconds.selector, proofMaturityDelaySeconds); dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); + string memory release = "dev-release"; dii.set(dii.release.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); diff --git a/packages/contracts-bedrock/test/DeployOPChain.t.sol b/packages/contracts-bedrock/test/DeployOPChain.t.sol index ef8fc06cc626..5f4525fde158 100644 --- a/packages/contracts-bedrock/test/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/DeployOPChain.t.sol @@ -323,7 +323,7 @@ contract DeployOPChain_TestBase is Test { uint256 challengePeriodSeconds = 300; uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; - string release = "op-contracts/latest"; + string release = "dev-release"; // this means implementation contracts will be deployed SuperchainConfig superchainConfigProxy; ProtocolVersions protocolVersionsProxy; @@ -393,6 +393,11 @@ contract DeployOPChain_TestBase is Test { dii.set(dii.release.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); + // End users of the DeployImplementations contract will need to set the `standardVersionsToml`. + string memory standardVersionsTomlPath = + string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml"); + string memory standardVersionsToml = vm.readFile(standardVersionsTomlPath); + dii.set(dii.standardVersionsToml.selector, standardVersionsToml); deployImplementations.run(dii, dio); // Set the OPStackManager input for DeployOPChain. diff --git a/packages/contracts-bedrock/test/fixtures/standard-versions.toml b/packages/contracts-bedrock/test/fixtures/standard-versions.toml new file mode 100644 index 000000000000..cb4d336a7336 --- /dev/null +++ b/packages/contracts-bedrock/test/fixtures/standard-versions.toml @@ -0,0 +1,47 @@ +standard_release = "op-contracts/v1.6.0" + +[releases] + +# Contracts which are +# * unproxied singletons: specify a standard "address" +# * proxied : specify a standard "implementation_address" +# * neither : specify neither a standard "address" nor "implementation_address" + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +[releases."op-contracts/v1.6.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } +system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } +anchor_state_registry = { version = "2.0.0" } +delayed_weth = { version = "1.1.0", implementation_address = "0x71e966Ae981d1ce531a7b6d23DC0f27B38409087" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } +fault_dispute_game = { version = "1.3.0" } +permissioned_dispute_game = { version = "1.3.0" } +mips = { version = "1.1.0", address = "0x16e83cE5Ce29BF90AD9Da06D2fE6a15d5f344ce4" } +preimage_oracle = { version = "1.1.2", address = "0x9c065e11870B891D214Bc2Da7EF1f9DDFA1BE277" } +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } +# l2_output_oracle -- This contract not used in fault proofs +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.4.0 +[releases."op-contracts/v1.4.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } +system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } +anchor_state_registry = { version = "1.0.0" } +delayed_weth = { version = "1.0.0", implementation_address = "0x97988d5624F1ba266E1da305117BCf20713bee08" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } +fault_dispute_game = { version = "1.2.0" } +permissioned_dispute_game = { version = "1.2.0" } +mips = { version = "1.0.1", address = "0x0f8EdFbDdD3c0256A80AD8C0F2560B1807873C9c" } +preimage_oracle = { version = "1.0.0", address = "0xD326E10B8186e90F4E2adc5c13a2d0C137ee8b34" } + +# MCP https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.3.0 +[releases."op-contracts/v1.3.0"] +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } +l2_output_oracle = { version = "1.8.0", implementation_address = "0xF243BEd163251380e78068d317ae10f26042B292" } +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } +optimism_portal = { version = "2.5.0", implementation_address = "0x2D778797049FE9259d947D1ED8e5442226dFB589" } +system_config = { version = "1.12.0", implementation_address = "0xba2492e52F45651B60B8B38d4Ea5E2390C64Ffb1" } diff --git a/packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-in.toml b/packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-in.toml deleted file mode 100644 index 4f0df83e1af2..000000000000 --- a/packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-in.toml +++ /dev/null @@ -1,11 +0,0 @@ -[safe] -threshold = 5 -owners = [ - "0x1111111111111111111111111111111111111111", - "0x2222222222222222222222222222222222222222", - "0x3333333333333333333333333333333333333333", - "0x4444444444444444444444444444444444444444", - "0x5555555555555555555555555555555555555555", - "0x6666666666666666666666666666666666666666", - "0x7777777777777777777777777777777777777777" -] diff --git a/packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-out.toml b/packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-out.toml deleted file mode 100644 index 35465cae1942..000000000000 --- a/packages/contracts-bedrock/test/fixtures/test-deploy-auth-system-out.toml +++ /dev/null @@ -1 +0,0 @@ -safe = "0xDC93f9959c0F9c3849461B6468B4592a19567E09" diff --git a/packages/contracts-bedrock/test/fixtures/test-deploy-superchain-in.toml b/packages/contracts-bedrock/test/fixtures/test-deploy-superchain-in.toml deleted file mode 100644 index 0900e71635d7..000000000000 --- a/packages/contracts-bedrock/test/fixtures/test-deploy-superchain-in.toml +++ /dev/null @@ -1,8 +0,0 @@ -paused = false -requiredProtocolVersion = 1 -recommendedProtocolVersion = 2 - -[roles] -proxyAdminOwner = "0x51f0348a9fA2aAbaB45E82825Fbd13d406e04497" -protocolVersionsOwner = "0xeEB4cc05dC0dE43c465f97cfc703D165418CA93A" -guardian = "0xE5DbA98c65F4B9EB0aeEBb3674fE64f88509a1eC" diff --git a/packages/contracts-bedrock/test/fixtures/test-deploy-superchain-out.toml b/packages/contracts-bedrock/test/fixtures/test-deploy-superchain-out.toml deleted file mode 100644 index ceb558a79d5a..000000000000 --- a/packages/contracts-bedrock/test/fixtures/test-deploy-superchain-out.toml +++ /dev/null @@ -1,5 +0,0 @@ -protocolVersionsImpl = "0x5991A2dF15A8F6A256D3Ec51E99254Cd3fb576A9" -protocolVersionsProxy = "0x1d1499e622D69689cdf9004d05Ec547d650Ff211" -superchainConfigImpl = "0xF62849F9A0B5Bf2913b396098F7c7019b51A820a" -superchainConfigProxy = "0xc7183455a4C133Ae270771860664b6B7ec320bB1" -superchainProxyAdmin = "0x2e234DAe75C793f67A35089C9d99245E1C58470b" From 1ee7ea1feca356347e12dafff53d601270c71ed2 Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Tue, 24 Sep 2024 15:17:50 -0400 Subject: [PATCH 005/116] fix: justfile clean command (#12092) * fix: justfile clean command * fix: removed stale comment --- packages/contracts-bedrock/justfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index e660d7a1c7ba..901ce17daa68 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -28,10 +28,8 @@ build-go-ffi: cd scripts/go-ffi && go build # Cleans build artifacts and deployments. -# Removes everything inside of .testdata (except the .gitkeep file). clean: rm -rf ./artifacts ./forge-artifacts ./cache ./scripts/go-ffi/go-ffi ./deployments/hardhat/* - find ./.testdata -mindepth 1 -not -name '.gitkeep' -delete ######################################################## From 5b9b3b8c5bfc67c17123fb8a26ed8f2bc615bc9f Mon Sep 17 00:00:00 2001 From: Maurelian Date: Tue, 24 Sep 2024 15:24:53 -0400 Subject: [PATCH 006/116] Add permissioned game output assertions (#12093) * test: Add permissioned game output assertions * test: reorder DeployOpChainTest_Base for clarity * Update packages/contracts-bedrock/test/DeployOPChain.t.sol Co-authored-by: Matt Solomon --------- Co-authored-by: Matt Solomon --- .../test/DeployOPChain.t.sol | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/packages/contracts-bedrock/test/DeployOPChain.t.sol b/packages/contracts-bedrock/test/DeployOPChain.t.sol index 5f4525fde158..732eea8b05bf 100644 --- a/packages/contracts-bedrock/test/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/DeployOPChain.t.sol @@ -361,9 +361,10 @@ contract DeployOPChain_TestBase is Test { }) ); - // Initialize deploy scripts. + // Configure and deploy Superchain contracts DeploySuperchain deploySuperchain = new DeploySuperchain(); (DeploySuperchainInput dsi, DeploySuperchainOutput dso) = deploySuperchain.etchIOContracts(); + dsi.set(dsi.proxyAdminOwner.selector, proxyAdminOwner); dsi.set(dsi.protocolVersionsOwner.selector, protocolVersionsOwner); dsi.set(dsi.guardian.selector, guardian); @@ -371,20 +372,16 @@ contract DeployOPChain_TestBase is Test { dsi.set(dsi.requiredProtocolVersion.selector, requiredProtocolVersion); dsi.set(dsi.recommendedProtocolVersion.selector, recommendedProtocolVersion); - DeployImplementations deployImplementations = createDeployImplementationsContract(); - (DeployImplementationsInput dii, DeployImplementationsOutput dio) = deployImplementations.etchIOContracts(); - - deployOPChain = new DeployOPChain(); - (doi, doo) = deployOPChain.etchIOContracts(); - - // Deploy the superchain contracts. deploySuperchain.run(dsi, dso); // Populate the inputs for DeployImplementations based on the output of DeploySuperchain. superchainConfigProxy = dso.superchainConfigProxy(); protocolVersionsProxy = dso.protocolVersionsProxy(); - // Deploy the implementations. + // Configure and deploy Implementation contracts + DeployImplementations deployImplementations = createDeployImplementationsContract(); + (DeployImplementationsInput dii, DeployImplementationsOutput dio) = deployImplementations.etchIOContracts(); + dii.set(dii.withdrawalDelaySeconds.selector, withdrawalDelaySeconds); dii.set(dii.minProposalSizeBytes.selector, minProposalSizeBytes); dii.set(dii.challengePeriodSeconds.selector, challengePeriodSeconds); @@ -400,7 +397,11 @@ contract DeployOPChain_TestBase is Test { dii.set(dii.standardVersionsToml.selector, standardVersionsToml); deployImplementations.run(dii, dio); - // Set the OPStackManager input for DeployOPChain. + // Deploy DeployOpChain, but defer populating the input values to the test suites inheriting this contract. + deployOPChain = new DeployOPChain(); + (doi, doo) = deployOPChain.etchIOContracts(); + + // Set the OPStackManager address as input to DeployOPChain. opsm = dio.opsmProxy(); } @@ -478,8 +479,12 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { address batcherActual = address(uint160(uint256(doo.systemConfigProxy().batcherHash()))); assertEq(batcherActual, batcher, "2300"); assertEq(address(doo.systemConfigProxy().unsafeBlockSigner()), unsafeBlockSigner, "2400"); - // assertEq(address(...proposer()), proposer, "2500"); // TODO once we deploy dispute games. - // assertEq(address(...challenger()), challenger, "2600"); // TODO once we deploy dispute games. + assertEq(address(doo.permissionedDisputeGame().proposer()), proposer, "2500"); + assertEq(address(doo.permissionedDisputeGame().challenger()), challenger, "2600"); + + // TODO once we deploy the Permissionless Dispute Game + // assertEq(address(doo.faultDisputeGame().proposer()), proposer, "2700"); + // assertEq(address(doo.faultDisputeGame().challenger()), challenger, "2800"); // Most architecture assertions are handled within the OP Stack Manager itself and therefore // we only assert on the things that are not visible onchain. From 712b760f7aa266864137cb55d450c2e3a300d799 Mon Sep 17 00:00:00 2001 From: mbaxter Date: Tue, 24 Sep 2024 16:09:21 -0400 Subject: [PATCH 007/116] cannon: Drop unnecessary wakeup field reset (#12095) * cannon: Cut unnecessary wakeup reset * cannon: Cut now-extraneous state argument * cannon: Run semver lock --- cannon/mipsevm/multithreaded/mips.go | 4 +--- packages/contracts-bedrock/semver-lock.json | 4 ++-- .../contracts-bedrock/src/cannon/MIPS2.sol | 19 ++++++------------- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index daa36d05c0ff..b06ad3917724 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -363,6 +363,7 @@ func (m *InstrumentedState) handleRMWOps(insn, opcode uint32) error { } func (m *InstrumentedState) onWaitComplete(thread *ThreadState, isTimedOut bool) { + // Note: no need to reset m.state.Wakeup. If we're here, the Wakeup field has already been reset // Clear the futex state thread.FutexAddr = exec.FutexEmptyAddr thread.FutexVal = 0 @@ -376,9 +377,6 @@ func (m *InstrumentedState) onWaitComplete(thread *ThreadState, isTimedOut bool) v1 = exec.MipsETIMEDOUT } exec.HandleSyscallUpdates(&thread.Cpu, &thread.Registers, v0, v1) - - // Clear wakeup signal - m.state.Wakeup = exec.FutexEmptyAddr } func (m *InstrumentedState) preemptThread(thread *ThreadState) bool { diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 7312208bee3e..3a60a92d73e3 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -144,8 +144,8 @@ "sourceCodeHash": "0xba4674e1846afbbc708877332a38dfabd4b8d1e48ce07d8ebf0a45c9f27f16b0" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0xd9da47f735b7a655a25ae0e867b467620a2cb537eb65d184a361f5ea4174d384", - "sourceCodeHash": "0x3a6d83a7d46eb267f6778f8ae116383fe3c14ad553d90b6c761fafeef22ae29c" + "initCodeHash": "0x67fb4107e25561ffcb3a9b6653f695e125773408d626a92036ea4b0814797021", + "sourceCodeHash": "0x5f4851e04dc9369552c94fb23aee8e8ca4ea9a9602917f0abb3b5f1347460bd5" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0x801e52f9c8439fcf7089575fa93272dfb874641dbfc7d82f36d979c987271c0b", diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index 45811d9b46c8..487ea0aac6df 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -57,8 +57,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.10 - string public constant version = "1.0.0-beta.10"; + /// @custom:semver 1.0.0-beta.11 + string public constant version = "1.0.0-beta.11"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -202,7 +202,7 @@ contract MIPS2 is ISemver { // check timeout first if (state.step > thread.futexTimeoutStep) { // timeout! Allow execution - return onWaitComplete(state, thread, true); + return onWaitComplete(thread, true); } else { uint32 mem = MIPSMemory.readMem( state.memRoot, thread.futexAddr & 0xFFffFFfc, MIPSMemory.memoryProofOffset(MEM_PROOF_OFFSET, 1) @@ -214,7 +214,7 @@ contract MIPS2 is ISemver { } else { // wake thread up, the value at its address changed! // Userspace can turn thread back to sleep if it was too sporadic. - return onWaitComplete(state, thread, false); + return onWaitComplete(thread, false); } } } @@ -690,14 +690,8 @@ contract MIPS2 is ISemver { } /// @notice Completes the FUTEX_WAIT syscall. - function onWaitComplete( - State memory _state, - ThreadState memory _thread, - bool _isTimedOut - ) - internal - returns (bytes32 out_) - { + function onWaitComplete(ThreadState memory _thread, bool _isTimedOut) internal returns (bytes32 out_) { + // Note: no need to reset State.wakeup. If we're here, the wakeup field has already been reset // Clear the futex state _thread.futexAddr = sys.FUTEX_EMPTY_ADDR; _thread.futexVal = 0; @@ -711,7 +705,6 @@ contract MIPS2 is ISemver { sys.handleSyscallUpdates(cpu, _thread.registers, v0, v1); setStateCpuScalars(_thread, cpu); - _state.wakeup = sys.FUTEX_EMPTY_ADDR; updateCurrentThreadRoot(); out_ = outputState(); } From 9c02f544ec32cf0a1c437773f444d2cfea65d8fa Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Tue, 24 Sep 2024 15:46:11 -0600 Subject: [PATCH 008/116] chore: emit event on deploy (#12090) --- packages/contracts-bedrock/semver-lock.json | 4 ++-- packages/contracts-bedrock/src/L1/OPStackManager.sol | 5 +++-- packages/contracts-bedrock/test/L1/OPStackManager.t.sol | 5 +++++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 3a60a92d73e3..6d1b97c01f0d 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPStackManager.sol": { - "initCodeHash": "0x5b451782192b8429f6822c88270c4f0dbd10342518c5695ecf4dff7b5ebfb4e4", - "sourceCodeHash": "0x4a9c242ce96471437ec97662d2365a7bda376db765c630a41cbe238811f1df51" + "initCodeHash": "0x92c72b75206e756742df25d67d295e4479e65db1473948b8f53cb4ca642025d5", + "sourceCodeHash": "0x3cbd30c68cad0dd18d49165bd21d94422b7403174f91a733e2398539dadf8656" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPStackManager.sol b/packages/contracts-bedrock/src/L1/OPStackManager.sol index 1a81430ff1e6..12e9a6f5cbdb 100644 --- a/packages/contracts-bedrock/src/L1/OPStackManager.sol +++ b/packages/contracts-bedrock/src/L1/OPStackManager.sol @@ -124,8 +124,8 @@ contract OPStackManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.5 - string public constant version = "1.0.0-beta.5"; + /// @custom:semver 1.0.0-beta.6 + string public constant version = "1.0.0-beta.6"; /// @notice Address of the SuperchainConfig contract shared by all chains. SuperchainConfig public immutable superchainConfig; @@ -334,6 +334,7 @@ contract OPStackManager is ISemver, Initializable { // Transfer ownership of the ProxyAdmin from this contract to the specified owner. output.opChainProxyAdmin.transferOwnership(_input.roles.opChainProxyAdminOwner); + emit Deployed(l2ChainId, output.systemConfigProxy); return output; } diff --git a/packages/contracts-bedrock/test/L1/OPStackManager.t.sol b/packages/contracts-bedrock/test/L1/OPStackManager.t.sol index 6d9d7d134c33..ea26a6dae0b3 100644 --- a/packages/contracts-bedrock/test/L1/OPStackManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPStackManager.t.sol @@ -9,6 +9,7 @@ import { DeployOPChain_TestBase } from "test/DeployOPChain.t.sol"; import { OPStackManager } from "src/L1/OPStackManager.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; +import { SystemConfig } from "src/L1/SystemConfig.sol"; // Exposes internal functions for testing. contract OPStackManager_Harness is OPStackManager { @@ -33,6 +34,8 @@ contract OPStackManager_Harness is OPStackManager { contract OPStackManager_Deploy_Test is DeployOPChain_TestBase { using stdStorage for StdStorage; + event Deployed(uint256 indexed l2ChainId, SystemConfig indexed systemConfig); + function setUp() public override { DeployOPChain_TestBase.setUp(); @@ -83,6 +86,8 @@ contract OPStackManager_Deploy_Test is DeployOPChain_TestBase { } function test_deploy_succeeds() public { + vm.expectEmit(true, false, true, true); // TODO precompute the system config address. + emit Deployed(doi.l2ChainId(), SystemConfig(address(1))); opsm.deploy(toOPSMDeployInput(doi)); } } From 22094b4dd71921232cb009e7956ff5f0e55973ba Mon Sep 17 00:00:00 2001 From: Hamdi Allam Date: Tue, 24 Sep 2024 19:28:00 -0400 Subject: [PATCH 009/116] check deposit on validateMessage (#12088) --- packages/contracts-bedrock/semver-lock.json | 4 +- .../contracts-bedrock/src/L2/CrossL2Inbox.sol | 7 ++- .../test/L2/CrossL2Inbox.t.sol | 48 +++++++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 6d1b97c01f0d..d88cbdba6bfd 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -68,8 +68,8 @@ "sourceCodeHash": "0x2dc2284cf7c68e743da50e4113e96ffeab435de2390aeba2eab2f1e8ca411ce9" }, "src/L2/CrossL2Inbox.sol": { - "initCodeHash": "0x79c5deb404605b42ef917b5e7308a9015dacfb71225d957a634e6d0a3a5bc621", - "sourceCodeHash": "0xd219408d99f627770dfcdb3243a183dec7429372787f0aec3bdbff5b3c294f2a" + "initCodeHash": "0x0ee27866b4bf864a0b68ab25ea9559d7f2722b0396d02f2e8e089c6a1a5a6a93", + "sourceCodeHash": "0xe6f453049035e0d77e4d7a92904b448bc17e04dd3d99e738b9af20e20986ce64" }, "src/L2/ETHLiquidity.sol": { "initCodeHash": "0x713c18f95a6a746d0703f475f3ae10c106c9b9ecb64d881a2e61b8969b581371", diff --git a/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol b/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol index 6f86717c4e4d..437e0c62a2e3 100644 --- a/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol +++ b/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol @@ -65,8 +65,8 @@ contract CrossL2Inbox is ICrossL2Inbox, ISemver, TransientReentrancyAware { address internal constant DEPOSITOR_ACCOUNT = 0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001; /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.6 - string public constant version = "1.0.0-beta.6"; + /// @custom:semver 1.0.0-beta.7 + string public constant version = "1.0.0-beta.7"; /// @notice Emitted when a cross chain message is being executed. /// @param msgHash Hash of message payload being executed. @@ -164,6 +164,9 @@ contract CrossL2Inbox is ICrossL2Inbox, ISemver, TransientReentrancyAware { /// @param _id Identifier of the message. /// @param _msgHash Hash of the message payload to call target with. function validateMessage(Identifier calldata _id, bytes32 _msgHash) external { + // We need to know if this is being called on a depositTx + if (IL1BlockIsthmus(Predeploys.L1_BLOCK_ATTRIBUTES).isDeposit()) revert NoExecutingDeposits(); + // Check the Identifier. _checkIdentifier(_id); diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 99704860c00d..0d3175d41ed1 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -461,6 +461,13 @@ contract CrossL2InboxTest is Test { returnData: abi.encode(true) }); + // Ensure is not a deposit transaction + vm.mockCall({ + callee: Predeploys.L1_BLOCK_ATTRIBUTES, + data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + returnData: abi.encode(false) + }); + // Look for the emit ExecutingMessage event vm.expectEmit(Predeploys.CROSS_L2_INBOX); emit CrossL2Inbox.ExecutingMessage(_messageHash, _id); @@ -469,6 +476,26 @@ contract CrossL2InboxTest is Test { crossL2Inbox.validateMessage(_id, _messageHash); } + function testFuzz_validateMessage_isDeposit_reverts( + ICrossL2Inbox.Identifier calldata _id, + bytes32 _messageHash + ) + external + { + // Ensure it is a deposit transaction + vm.mockCall({ + callee: Predeploys.L1_BLOCK_ATTRIBUTES, + data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + returnData: abi.encode(true) + }); + + // Expect a revert with the NoExecutingDeposits selector + vm.expectRevert(NoExecutingDeposits.selector); + + // Call the executeMessage function + crossL2Inbox.validateMessage(_id, _messageHash); + } + /// @dev Tests that the `validateMessage` function reverts when called with an identifier with a timestamp later /// than current block.timestamp. function testFuzz_validateMessage_invalidTimestamp_reverts( @@ -478,6 +505,13 @@ contract CrossL2InboxTest is Test { external setInteropStart { + // Ensure is not a deposit transaction + vm.mockCall({ + callee: Predeploys.L1_BLOCK_ATTRIBUTES, + data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + returnData: abi.encode(false) + }); + // Ensure that the id's timestamp is invalid (greater than the current block timestamp) vm.assume(_id.timestamp > block.timestamp); @@ -500,6 +534,13 @@ contract CrossL2InboxTest is Test { // Ensure that the id's timestamp is invalid (less than or equal to interopStartTime) _id.timestamp = bound(_id.timestamp, 0, crossL2Inbox.interopStart()); + // Ensure is not a deposit transaction + vm.mockCall({ + callee: Predeploys.L1_BLOCK_ATTRIBUTES, + data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + returnData: abi.encode(false) + }); + // Expect a revert with the InvalidTimestamp selector vm.expectRevert(InvalidTimestamp.selector); @@ -527,6 +568,13 @@ contract CrossL2InboxTest is Test { returnData: abi.encode(false) }); + // Ensure is not a deposit transaction + vm.mockCall({ + callee: Predeploys.L1_BLOCK_ATTRIBUTES, + data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + returnData: abi.encode(false) + }); + // Expect a revert with the InvalidChainId selector vm.expectRevert(InvalidChainId.selector); From cbbb45eca2e73eb301d0f37c24e7a2a98c1f1856 Mon Sep 17 00:00:00 2001 From: Paul Lange Date: Wed, 25 Sep 2024 02:13:43 +0200 Subject: [PATCH 010/116] op-node: Remove unused field in `ChannelBank` (#12001) --- op-node/rollup/derive/channel_bank.go | 6 ++---- op-node/rollup/derive/channel_bank_test.go | 8 ++++---- op-node/rollup/derive/pipeline.go | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/op-node/rollup/derive/channel_bank.go b/op-node/rollup/derive/channel_bank.go index b2efb0d3ce16..8dd689dfadaa 100644 --- a/op-node/rollup/derive/channel_bank.go +++ b/op-node/rollup/derive/channel_bank.go @@ -37,14 +37,13 @@ type ChannelBank struct { channels map[ChannelID]*Channel // channels by ID channelQueue []ChannelID // channels in FIFO order - prev NextFrameProvider - fetcher L1Fetcher + prev NextFrameProvider } var _ ResettableStage = (*ChannelBank)(nil) // NewChannelBank creates a ChannelBank, which should be Reset(origin) before use. -func NewChannelBank(log log.Logger, cfg *rollup.Config, prev NextFrameProvider, fetcher L1Fetcher, m Metrics) *ChannelBank { +func NewChannelBank(log log.Logger, cfg *rollup.Config, prev NextFrameProvider, m Metrics) *ChannelBank { return &ChannelBank{ log: log, spec: rollup.NewChainSpec(cfg), @@ -52,7 +51,6 @@ func NewChannelBank(log log.Logger, cfg *rollup.Config, prev NextFrameProvider, channels: make(map[ChannelID]*Channel), channelQueue: make([]ChannelID, 0, 10), prev: prev, - fetcher: fetcher, } } diff --git a/op-node/rollup/derive/channel_bank_test.go b/op-node/rollup/derive/channel_bank_test.go index 59c82c308f01..33763c23c5e0 100644 --- a/op-node/rollup/derive/channel_bank_test.go +++ b/op-node/rollup/derive/channel_bank_test.go @@ -102,7 +102,7 @@ func TestChannelBankSimple(t *testing.T) { cfg := &rollup.Config{ChannelTimeoutBedrock: 10} - cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, nil, metrics.NoopMetrics) + cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, metrics.NoopMetrics) // Load the first frame out, err := cb.NextData(context.Background()) @@ -146,7 +146,7 @@ func TestChannelBankInterleavedPreCanyon(t *testing.T) { cfg := &rollup.Config{ChannelTimeoutBedrock: 10, CanyonTime: nil} - cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, nil, metrics.NoopMetrics) + cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, metrics.NoopMetrics) // Load a:0 out, err := cb.NextData(context.Background()) @@ -211,7 +211,7 @@ func TestChannelBankInterleaved(t *testing.T) { ct := uint64(0) cfg := &rollup.Config{ChannelTimeoutBedrock: 10, CanyonTime: &ct} - cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, nil, metrics.NoopMetrics) + cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, metrics.NoopMetrics) // Load a:0 out, err := cb.NextData(context.Background()) @@ -271,7 +271,7 @@ func TestChannelBankDuplicates(t *testing.T) { cfg := &rollup.Config{ChannelTimeoutBedrock: 10} - cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, nil, metrics.NoopMetrics) + cb := NewChannelBank(testlog.Logger(t, log.LevelCrit), cfg, input, metrics.NoopMetrics) // Load the first frame out, err := cb.NextData(context.Background()) diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index e4eae7e20303..a06640086fde 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -84,7 +84,7 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L dataSrc := NewDataSourceFactory(log, rollupCfg, l1Fetcher, l1Blobs, altDA) // auxiliary stage for L1Retrieval l1Src := NewL1Retrieval(log, dataSrc, l1Traversal) frameQueue := NewFrameQueue(log, l1Src) - bank := NewChannelBank(log, rollupCfg, frameQueue, l1Fetcher, metrics) + bank := NewChannelBank(log, rollupCfg, frameQueue, metrics) chInReader := NewChannelInReader(rollupCfg, log, bank, metrics) batchQueue := NewBatchQueue(log, rollupCfg, chInReader, l2Source) attrBuilder := NewFetchingAttributesBuilder(rollupCfg, l1Fetcher, l2Source) From 30725498eba13839240e793e36eae47c270322a5 Mon Sep 17 00:00:00 2001 From: Chen Kai <281165273grape@gmail.com> Date: Wed, 25 Sep 2024 12:17:14 +0800 Subject: [PATCH 011/116] MTCannon: Add add/addi/addu/addiu opcodes tests (#12085) * feat:add cannon add/addi/addu/addiu opcodes test Signed-off-by: Chen Kai <281165273grape@gmail.com> * feat:add cannon add/addu/addi/addiu opcodes test Signed-off-by: Chen Kai <281165273grape@gmail.com> --------- Signed-off-by: Chen Kai <281165273grape@gmail.com> --- cannon/mipsevm/tests/evm_common_test.go | 65 ++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index ea6c7b2de957..21aea97a7a14 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -115,7 +115,7 @@ func TestEVM(t *testing.T) { } } -func TestEVMSingleStep(t *testing.T) { +func TestEVMSingleStep_Jump(t *testing.T) { var tracer *tracing.Hooks versions := GetMipsVersionTestCases(t) @@ -162,6 +162,69 @@ func TestEVMSingleStep(t *testing.T) { } } +func TestEVMSingleStep_Add(t *testing.T) { + var tracer *tracing.Hooks + + versions := GetMipsVersionTestCases(t) + cases := []struct { + name string + insn uint32 + ifImm bool + rs uint32 + rt uint32 + imm uint16 + expectRD uint32 + expectImm uint32 + }{ + {name: "add", insn: 0x02_32_40_20, ifImm: false, rs: uint32(12), rt: uint32(20), expectRD: uint32(32)}, // add t0, s1, s2 + {name: "addu", insn: 0x02_32_40_21, ifImm: false, rs: uint32(12), rt: uint32(20), expectRD: uint32(32)}, // addu t0, s1, s2 + {name: "addi", insn: 0x22_28_00_28, ifImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectImm: uint32(44)}, // addi t0, s1, 40 + {name: "addi sign", insn: 0x22_28_ff_fe, ifImm: true, rs: uint32(2), rt: uint32(1), imm: uint16(0xfffe), expectImm: uint32(0)}, // addi t0, s1, -2 + {name: "addiu", insn: 0x26_28_00_28, ifImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectImm: uint32(44)}, // addiu t0, s1, 40 + } + + for _, v := range versions { + for i, tt := range cases { + testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) + t.Run(testName, func(t *testing.T) { + goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(0), testutil.WithNextPC(4)) + state := goVm.GetState() + if tt.ifImm { + state.GetRegistersRef()[8] = tt.rt + state.GetRegistersRef()[17] = tt.rs + } else { + state.GetRegistersRef()[17] = tt.rs + state.GetRegistersRef()[18] = tt.rt + } + state.GetMemory().SetMemory(0, tt.insn) + step := state.GetStep() + + // Setup expectations + expected := testutil.NewExpectedState(state) + expected.Step += 1 + expected.PC = 4 + expected.NextPC = 8 + + if tt.ifImm { + expected.Registers[8] = tt.expectImm + expected.Registers[17] = tt.rs + } else { + expected.Registers[8] = tt.expectRD + expected.Registers[17] = tt.rs + expected.Registers[18] = tt.rt + } + + stepWitness, err := goVm.Step(true) + require.NoError(t, err) + + // Check expectations + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) + }) + } + } +} + func TestEVM_MMap(t *testing.T) { var tracer *tracing.Hooks From 56502ddc8773dc13b057445111e9aeaa5b8d14f0 Mon Sep 17 00:00:00 2001 From: Inphi Date: Wed, 25 Sep 2024 00:34:05 -0400 Subject: [PATCH 012/116] cannon: Multi VM executor (#12072) * cannon: Multi VM executor * fix run subcmd arg fwding * fix mt prestate * add list subcmd; multicannon in op-stack-go * remove cannon-latest * safer strconv * lint * include .gitkeep in embed * fix .git copy * add detect.go tests * add nosemgrep * review comments * list filtering * add note to MIPS.sol in version stf ref * use fork-exec * minimal flag parsing * load old cannon binaries from docker images * note * --help flag defaults * remove redundant copy from cannon-builder-0 --- Makefile | 4 +- cannon/.gitignore | 1 + cannon/Makefile | 11 +++- cannon/README.md | 2 +- cannon/cmd/load_elf.go | 64 ++++++++++---------- cannon/cmd/run.go | 48 ++++++++------- cannon/cmd/witness.go | 22 ++++--- cannon/mipsevm/versions/detect.go | 35 +++++++++++ cannon/mipsevm/versions/detect_test.go | 65 ++++++++++++++++++++ cannon/mipsevm/versions/state.go | 25 ++++++++ cannon/multicannon/embeds/.gitkeep | 0 cannon/multicannon/exec.go | 83 ++++++++++++++++++++++++++ cannon/multicannon/list.go | 73 ++++++++++++++++++++++ cannon/multicannon/load_elf.go | 31 ++++++++++ cannon/multicannon/main.go | 35 +++++++++++ cannon/multicannon/run.go | 39 ++++++++++++ cannon/multicannon/util.go | 37 ++++++++++++ cannon/multicannon/util_test.go | 68 +++++++++++++++++++++ cannon/multicannon/witness.go | 32 ++++++++++ op-program/Dockerfile.repro | 4 +- ops/docker/op-stack-go/Dockerfile | 12 +++- 21 files changed, 618 insertions(+), 73 deletions(-) create mode 100644 cannon/mipsevm/versions/detect.go create mode 100644 cannon/mipsevm/versions/detect_test.go create mode 100644 cannon/multicannon/embeds/.gitkeep create mode 100644 cannon/multicannon/exec.go create mode 100644 cannon/multicannon/list.go create mode 100644 cannon/multicannon/load_elf.go create mode 100644 cannon/multicannon/main.go create mode 100644 cannon/multicannon/run.go create mode 100644 cannon/multicannon/util.go create mode 100644 cannon/multicannon/util_test.go create mode 100644 cannon/multicannon/witness.go diff --git a/Makefile b/Makefile index 6b1abdd37f00..4f329a4241e2 100644 --- a/Makefile +++ b/Makefile @@ -142,13 +142,13 @@ $(DEVNET_CANNON_PRESTATE_FILES): make cannon-prestate-mt cannon-prestate: op-program cannon ## Generates prestate using cannon and op-program - ./cannon/bin/cannon load-elf --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.json --meta op-program/bin/meta.json + ./cannon/bin/cannon load-elf --type singlethreaded --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.json --meta op-program/bin/meta.json ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.json --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output "" mv op-program/bin/0.json op-program/bin/prestate-proof.json .PHONY: cannon-prestate cannon-prestate-mt: op-program cannon ## Generates prestate using cannon and op-program in the multithreaded cannon format - ./cannon/bin/cannon load-elf --type cannon-mt --path op-program/bin/op-program-client.elf --out op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json + ./cannon/bin/cannon load-elf --type multithreaded --path op-program/bin/op-program-client.elf --out op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json --proof-fmt 'op-program/bin/%d-mt.json' --output "" mv op-program/bin/0-mt.json op-program/bin/prestate-proof-mt.json .PHONY: cannon-prestate-mt diff --git a/cannon/.gitignore b/cannon/.gitignore index c3e45199f0ed..68424370890f 100644 --- a/cannon/.gitignore +++ b/cannon/.gitignore @@ -13,3 +13,4 @@ state.json *.pprof *.out bin +multicannon/embeds/cannon* diff --git a/cannon/Makefile b/cannon/Makefile index d6a1d85eff89..7540b88e58f2 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -13,8 +13,15 @@ ifeq ($(shell uname),Darwin) FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic endif -cannon: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon . +cannon-impl: + env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon-impl . + +cannon-embeds: cannon-impl + @cp bin/cannon-impl ./multicannon/embeds/cannon-0 + @cp bin/cannon-impl ./multicannon/embeds/cannon-1 + +cannon: cannon-embeds + env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon ./multicannon/ clean: rm -rf bin diff --git a/cannon/README.md b/cannon/README.md index e9e751ce2ffe..a3b917193901 100644 --- a/cannon/README.md +++ b/cannon/README.md @@ -30,7 +30,7 @@ make cannon # Transform MIPS op-program client binary into first VM state. # This outputs state.json (VM state) and meta.json (for debug symbols). -./bin/cannon load-elf --path=../op-program/bin/op-program-client.elf +./bin/cannon load-elf --type singlethreaded --path=../op-program/bin/op-program-client.elf # Run cannon emulator (with example inputs) # Note that the server-mode op-program command is passed into cannon (after the --), diff --git a/cannon/cmd/load_elf.go b/cannon/cmd/load_elf.go index a6b9e0e5897a..816eb7c02e46 100644 --- a/cannon/cmd/load_elf.go +++ b/cannon/cmd/load_elf.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" "github.com/ethereum-optimism/optimism/cannon/serialize" + openum "github.com/ethereum-optimism/optimism/op-service/enum" "github.com/ethereum-optimism/optimism/op-service/ioutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil" ) @@ -19,9 +20,8 @@ import ( var ( LoadELFVMTypeFlag = &cli.StringFlag{ Name: "type", - Usage: "VM type to create state for. Options are 'cannon' (default), 'cannon-mt'", - Value: "cannon", - Required: false, + Usage: "VM type to create state for. Valid options: " + openum.EnumString(stateVersions()), + Required: true, } LoadELFPathFlag = &cli.PathFlag{ Name: "path", @@ -43,21 +43,12 @@ var ( } ) -type VMType string - -var ( - cannonVMType VMType = "cannon" - mtVMType VMType = "cannon-mt" -) - -func vmTypeFromString(ctx *cli.Context) (VMType, error) { - if vmTypeStr := ctx.String(LoadELFVMTypeFlag.Name); vmTypeStr == string(cannonVMType) { - return cannonVMType, nil - } else if vmTypeStr == string(mtVMType) { - return mtVMType, nil - } else { - return "", fmt.Errorf("unknown VM type %q", vmTypeStr) +func stateVersions() []string { + vers := make([]string, len(versions.StateVersionTypes)) + for i, v := range versions.StateVersionTypes { + vers[i] = v.String() } + return vers } func LoadELF(ctx *cli.Context) error { @@ -73,9 +64,12 @@ func LoadELF(ctx *cli.Context) error { var createInitialState func(f *elf.File) (mipsevm.FPVMState, error) var patcher = program.PatchStack - if vmType, err := vmTypeFromString(ctx); err != nil { + ver, err := versions.ParseStateVersion(ctx.String(LoadELFVMTypeFlag.Name)) + if err != nil { return err - } else if vmType == cannonVMType { + } + switch ver { + case versions.VersionSingleThreaded: createInitialState = func(f *elf.File) (mipsevm.FPVMState, error) { return program.LoadELF(f, singlethreaded.CreateInitialState) } @@ -86,12 +80,12 @@ func LoadELF(ctx *cli.Context) error { } return program.PatchStack(state) } - } else if vmType == mtVMType { + case versions.VersionMultiThreaded: createInitialState = func(f *elf.File) (mipsevm.FPVMState, error) { return program.LoadELF(f, multithreaded.CreateInitialState) } - } else { - return fmt.Errorf("invalid VM type: %q", vmType) + default: + return fmt.Errorf("unsupported state version: %d (%s)", ver, ver.String()) } state, err := createInitialState(elfProgram) @@ -118,15 +112,19 @@ func LoadELF(ctx *cli.Context) error { return serialize.Write(ctx.Path(LoadELFOutFlag.Name), versionedState, OutFilePerm) } -var LoadELFCommand = &cli.Command{ - Name: "load-elf", - Usage: "Load ELF file into Cannon state", - Description: "Load ELF file into Cannon state", - Action: LoadELF, - Flags: []cli.Flag{ - LoadELFVMTypeFlag, - LoadELFPathFlag, - LoadELFOutFlag, - LoadELFMetaFlag, - }, +func CreateLoadELFCommand(action cli.ActionFunc) *cli.Command { + return &cli.Command{ + Name: "load-elf", + Usage: "Load ELF file into Cannon state", + Description: "Load ELF file into Cannon state", + Action: action, + Flags: []cli.Flag{ + LoadELFVMTypeFlag, + LoadELFPathFlag, + LoadELFOutFlag, + LoadELFMetaFlag, + }, + } } + +var LoadELFCommand = CreateLoadELFCommand(LoadELF) diff --git a/cannon/cmd/run.go b/cannon/cmd/run.go index 03836d087d98..21f4f7c29825 100644 --- a/cannon/cmd/run.go +++ b/cannon/cmd/run.go @@ -496,26 +496,30 @@ func Run(ctx *cli.Context) error { return nil } -var RunCommand = &cli.Command{ - Name: "run", - Usage: "Run VM step(s) and generate proof data to replicate onchain.", - Description: "Run VM step(s) and generate proof data to replicate onchain. See flags to match when to output a proof, a snapshot, or to stop early.", - Action: Run, - Flags: []cli.Flag{ - RunInputFlag, - RunOutputFlag, - RunProofAtFlag, - RunProofFmtFlag, - RunSnapshotAtFlag, - RunSnapshotFmtFlag, - RunStopAtFlag, - RunStopAtPreimageFlag, - RunStopAtPreimageTypeFlag, - RunStopAtPreimageLargerThanFlag, - RunMetaFlag, - RunInfoAtFlag, - RunPProfCPU, - RunDebugFlag, - RunDebugInfoFlag, - }, +func CreateRunCommand(action cli.ActionFunc) *cli.Command { + return &cli.Command{ + Name: "run", + Usage: "Run VM step(s) and generate proof data to replicate onchain.", + Description: "Run VM step(s) and generate proof data to replicate onchain. See flags to match when to output a proof, a snapshot, or to stop early.", + Action: action, + Flags: []cli.Flag{ + RunInputFlag, + RunOutputFlag, + RunProofAtFlag, + RunProofFmtFlag, + RunSnapshotAtFlag, + RunSnapshotFmtFlag, + RunStopAtFlag, + RunStopAtPreimageFlag, + RunStopAtPreimageTypeFlag, + RunStopAtPreimageLargerThanFlag, + RunMetaFlag, + RunInfoAtFlag, + RunPProfCPU, + RunDebugFlag, + RunDebugInfoFlag, + }, + } } + +var RunCommand = CreateRunCommand(Run) diff --git a/cannon/cmd/witness.go b/cannon/cmd/witness.go index a4f2e60ab6a6..753438493f95 100644 --- a/cannon/cmd/witness.go +++ b/cannon/cmd/witness.go @@ -39,13 +39,17 @@ func Witness(ctx *cli.Context) error { return nil } -var WitnessCommand = &cli.Command{ - Name: "witness", - Usage: "Convert a Cannon JSON state into a binary witness", - Description: "Convert a Cannon JSON state into a binary witness. The hash of the witness is written to stdout", - Action: Witness, - Flags: []cli.Flag{ - WitnessInputFlag, - WitnessOutputFlag, - }, +func CreateWitnessCommand(action cli.ActionFunc) *cli.Command { + return &cli.Command{ + Name: "witness", + Usage: "Convert a Cannon JSON state into a binary witness", + Description: "Convert a Cannon JSON state into a binary witness. The hash of the witness is written to stdout", + Action: action, + Flags: []cli.Flag{ + WitnessInputFlag, + WitnessOutputFlag, + }, + } } + +var WitnessCommand = CreateWitnessCommand(Witness) diff --git a/cannon/mipsevm/versions/detect.go b/cannon/mipsevm/versions/detect.go new file mode 100644 index 000000000000..ca4b9be9c51d --- /dev/null +++ b/cannon/mipsevm/versions/detect.go @@ -0,0 +1,35 @@ +package versions + +import ( + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/cannon/serialize" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +) + +func DetectVersion(path string) (StateVersion, error) { + if !serialize.IsBinaryFile(path) { + return VersionSingleThreaded, nil + } + + var f io.ReadCloser + f, err := ioutil.OpenDecompressed(path) + if err != nil { + return 0, fmt.Errorf("failed to open file %q: %w", path, err) + } + defer f.Close() + + var ver StateVersion + bin := serialize.NewBinaryReader(f) + if err := bin.ReadUInt(&ver); err != nil { + return 0, err + } + + switch ver { + case VersionSingleThreaded, VersionMultiThreaded: + return ver, nil + default: + return 0, fmt.Errorf("%w: %d", ErrUnknownVersion, ver) + } +} diff --git a/cannon/mipsevm/versions/detect_test.go b/cannon/mipsevm/versions/detect_test.go new file mode 100644 index 000000000000..38a90f178694 --- /dev/null +++ b/cannon/mipsevm/versions/detect_test.go @@ -0,0 +1,65 @@ +package versions + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/stretchr/testify/require" +) + +func TestDetectVersion(t *testing.T) { + t.Run("SingleThreadedJSON", func(t *testing.T) { + state, err := NewFromState(singlethreaded.CreateEmptyState()) + require.NoError(t, err) + path := writeToFile(t, "state.json", state) + version, err := DetectVersion(path) + require.NoError(t, err) + require.Equal(t, VersionSingleThreaded, version) + }) + + t.Run("SingleThreadedBinary", func(t *testing.T) { + state, err := NewFromState(singlethreaded.CreateEmptyState()) + require.NoError(t, err) + path := writeToFile(t, "state.bin.gz", state) + version, err := DetectVersion(path) + require.NoError(t, err) + require.Equal(t, VersionSingleThreaded, version) + }) + + t.Run("MultiThreadedBinary", func(t *testing.T) { + state, err := NewFromState(multithreaded.CreateEmptyState()) + require.NoError(t, err) + path := writeToFile(t, "state.bin.gz", state) + version, err := DetectVersion(path) + require.NoError(t, err) + require.Equal(t, VersionMultiThreaded, version) + }) +} + +func TestDetectVersionInvalid(t *testing.T) { + t.Run("bad gzip", func(t *testing.T) { + dir := t.TempDir() + filename := "state.bin.gz" + path := filepath.Join(dir, filename) + require.NoError(t, os.WriteFile(path, []byte("ekans"), 0o644)) + + _, err := DetectVersion(path) + require.ErrorContains(t, err, "failed to open file") + }) + + t.Run("unknown version", func(t *testing.T) { + dir := t.TempDir() + filename := "state.bin.gz" + path := filepath.Join(dir, filename) + const badVersion = 0xFF + err := ioutil.WriteCompressedBytes(path, []byte{badVersion}, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + require.NoError(t, err) + + _, err = DetectVersion(path) + require.ErrorIs(t, err, ErrUnknownVersion) + }) +} diff --git a/cannon/mipsevm/versions/state.go b/cannon/mipsevm/versions/state.go index fcf7b1864f38..afd2a94204b3 100644 --- a/cannon/mipsevm/versions/state.go +++ b/cannon/mipsevm/versions/state.go @@ -16,6 +16,7 @@ import ( type StateVersion uint8 const ( + // VersionSingleThreaded is the version of the Cannon STF found in op-contracts/v1.6.0 - https://github.com/ethereum-optimism/optimism/blob/op-contracts/v1.6.0/packages/contracts-bedrock/src/cannon/MIPS.sol VersionSingleThreaded StateVersion = iota VersionMultiThreaded ) @@ -25,6 +26,8 @@ var ( ErrJsonNotSupported = errors.New("json not supported") ) +var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded} + func LoadStateFromFile(path string) (*VersionedState, error) { if !serialize.IsBinaryFile(path) { // Always use singlethreaded for JSON states @@ -103,3 +106,25 @@ func (s *VersionedState) MarshalJSON() ([]byte, error) { } return json.Marshal(s.FPVMState) } + +func (s StateVersion) String() string { + switch s { + case VersionSingleThreaded: + return "singlethreaded" + case VersionMultiThreaded: + return "multithreaded" + default: + return "unknown" + } +} + +func ParseStateVersion(ver string) (StateVersion, error) { + switch ver { + case "singlethreaded": + return VersionSingleThreaded, nil + case "multithreaded": + return VersionMultiThreaded, nil + default: + return StateVersion(0), errors.New("unknown state version") + } +} diff --git a/cannon/multicannon/embeds/.gitkeep b/cannon/multicannon/embeds/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/cannon/multicannon/exec.go b/cannon/multicannon/exec.go new file mode 100644 index 000000000000..1372c035f560 --- /dev/null +++ b/cannon/multicannon/exec.go @@ -0,0 +1,83 @@ +package main + +import ( + "context" + "embed" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" +) + +// use the all directive to ensure the .gitkeep file is retained and avoid compiler errors + +//go:embed all:embeds +var vmFS embed.FS + +const baseDir = "embeds" + +func ExecuteCannon(ctx context.Context, args []string, ver versions.StateVersion) error { + switch ver { + case versions.VersionSingleThreaded, versions.VersionMultiThreaded: + default: + return errors.New("unsupported version") + } + + cannonProgramName := vmFilename(ver) + cannonProgramBin, err := vmFS.ReadFile(cannonProgramName) + if err != nil { + return err + } + cannonProgramPath, err := extractTempFile(filepath.Base(cannonProgramName), cannonProgramBin) + if err != nil { + fmt.Fprintf(os.Stderr, "Error extracting %s: %v\n", cannonProgramName, err) + os.Exit(1) + } + defer os.Remove(cannonProgramPath) + + if err := os.Chmod(cannonProgramPath, 0755); err != nil { + fmt.Fprintf(os.Stderr, "Error setting execute permission for %s: %v\n", cannonProgramName, err) + os.Exit(1) + } + + // nosemgrep: go.lang.security.audit.dangerous-exec-command.dangerous-exec-command + cmd := exec.CommandContext(ctx, cannonProgramPath, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Start() + if err != nil { + return fmt.Errorf("unable to launch cannon-impl program: %w", err) + } + if err := cmd.Wait(); err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + // relay exit code to the parent process + os.Exit(exitErr.ExitCode()) + } else { + return fmt.Errorf("failed to wait for cannon-impl program: %w", err) + } + } + return nil +} + +func extractTempFile(name string, data []byte) (string, error) { + tempDir := os.TempDir() + tempFile, err := os.CreateTemp(tempDir, name+"-*") + if err != nil { + return "", err + } + defer tempFile.Close() + + if _, err := tempFile.Write(data); err != nil { + return "", err + } + + return tempFile.Name(), nil +} + +func vmFilename(ver versions.StateVersion) string { + return fmt.Sprintf("%s/cannon-%d", baseDir, ver) +} diff --git a/cannon/multicannon/list.go b/cannon/multicannon/list.go new file mode 100644 index 000000000000..6e9e8a68b65a --- /dev/null +++ b/cannon/multicannon/list.go @@ -0,0 +1,73 @@ +package main + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/urfave/cli/v2" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" +) + +func List(ctx *cli.Context) error { + return list() +} + +func list() error { + fmt.Println("Available cannon versions:") + artifacts, err := getArtifacts() + if err != nil { + return err + } + for _, art := range artifacts { + if art.isValid() { + fmt.Printf("filename: %s\tversion: %s (%d)\n", art.filename, versions.StateVersion(art.ver), art.ver) + } else { + fmt.Printf("filename: %s\tversion: %s\n", art.filename, "unknown") + } + } + return nil +} + +func getArtifacts() ([]artifact, error) { + var ret []artifact + entries, err := vmFS.ReadDir(baseDir) + if err != nil { + return nil, err + } + for _, entry := range entries { + filename := entry.Name() + toks := strings.Split(filename, "-") + if len(toks) != 2 { + continue + } + if toks[0] != "cannon" { + continue + } + ver, err := strconv.ParseUint(toks[1], 10, 8) + if err != nil { + ret = append(ret, artifact{filename, math.MaxUint64}) + continue + } + ret = append(ret, artifact{filename, ver}) + } + return ret, nil +} + +type artifact struct { + filename string + ver uint64 +} + +func (a artifact) isValid() bool { + return a.ver != math.MaxUint64 +} + +var ListCommand = &cli.Command{ + Name: "list", + Usage: "List embedded Cannon VM implementations", + Description: "List embedded Cannon VM implementations", + Action: List, +} diff --git a/cannon/multicannon/load_elf.go b/cannon/multicannon/load_elf.go new file mode 100644 index 000000000000..cbe1fda46303 --- /dev/null +++ b/cannon/multicannon/load_elf.go @@ -0,0 +1,31 @@ +package main + +import ( + "fmt" + "os" + + "github.com/ethereum-optimism/optimism/cannon/cmd" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" + "github.com/urfave/cli/v2" +) + +func LoadELF(ctx *cli.Context) error { + if len(os.Args) == 2 && os.Args[2] == "--help" { + if err := list(); err != nil { + return err + } + fmt.Println("use `--type --help` to get more detailed help") + } + + typ, err := parseFlag(os.Args[1:], "--type") + if err != nil { + return err + } + ver, err := versions.ParseStateVersion(typ) + if err != nil { + return err + } + return ExecuteCannon(ctx.Context, os.Args[1:], ver) +} + +var LoadELFCommand = cmd.CreateLoadELFCommand(LoadELF) diff --git a/cannon/multicannon/main.go b/cannon/multicannon/main.go new file mode 100644 index 000000000000..31dde280d8d0 --- /dev/null +++ b/cannon/multicannon/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/urfave/cli/v2" +) + +func main() { + app := cli.NewApp() + app.Name = "multicannon" + app.Usage = "MIPS Fault Proof tool" + app.Description = "MIPS Fault Proof tool" + app.Commands = []*cli.Command{ + LoadELFCommand, + WitnessCommand, + RunCommand, + ListCommand, + } + ctx := ctxinterrupt.WithCancelOnInterrupt(context.Background()) + err := app.RunContext(ctx, os.Args) + if err != nil { + if errors.Is(err, ctx.Err()) { + _, _ = fmt.Fprintf(os.Stderr, "command interrupted") + os.Exit(130) + } else { + _, _ = fmt.Fprintf(os.Stderr, "error: %v", err) + os.Exit(1) + } + } +} diff --git a/cannon/multicannon/run.go b/cannon/multicannon/run.go new file mode 100644 index 000000000000..532cf317fb21 --- /dev/null +++ b/cannon/multicannon/run.go @@ -0,0 +1,39 @@ +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli/v2" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" +) + +func Run(ctx *cli.Context) error { + fmt.Printf("args %v\n", os.Args[:]) + if len(os.Args) == 3 && os.Args[2] == "--help" { + if err := list(); err != nil { + return err + } + fmt.Println("use `--input --help` to get more detailed help") + } + + inputPath, err := parsePathFlag(os.Args[1:], "--input") + if err != nil { + return err + } + version, err := versions.DetectVersion(inputPath) + if err != nil { + return err + } + return ExecuteCannon(ctx.Context, os.Args[1:], version) +} + +// var RunCommand = cmd.CreateRunCommand(Run) +var RunCommand = &cli.Command{ + Name: "run", + Usage: "Run VM step(s) and generate proof data to replicate onchain.", + Description: "Run VM step(s) and generate proof data to replicate onchain. See flags to match when to output a proof, a snapshot, or to stop early.", + Action: Run, + SkipFlagParsing: true, +} diff --git a/cannon/multicannon/util.go b/cannon/multicannon/util.go new file mode 100644 index 000000000000..ea484c6ce2d2 --- /dev/null +++ b/cannon/multicannon/util.go @@ -0,0 +1,37 @@ +package main + +import ( + "errors" + "fmt" + "os" + "strings" +) + +// parseFlag reads a flag argument. It assumes the flag has an argument +func parseFlag(args []string, flag string) (string, error) { + for i := 0; i < len(args); i++ { + arg := args[i] + if strings.HasPrefix(arg, flag) { + toks := strings.Split(arg, "=") + if len(toks) == 2 { + return toks[1], nil + } else if i+1 == len(args) { + return "", fmt.Errorf("flag needs an argument: %s", flag) + } else { + return args[i+1], nil + } + } + } + return "", fmt.Errorf("missing flag: %s", flag) +} + +func parsePathFlag(args []string, flag string) (string, error) { + path, err := parseFlag(args, flag) + if err != nil { + return "", err + } + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("file `%s` does not exist", path) + } + return path, nil +} diff --git a/cannon/multicannon/util_test.go b/cannon/multicannon/util_test.go new file mode 100644 index 000000000000..9997b1315a8f --- /dev/null +++ b/cannon/multicannon/util_test.go @@ -0,0 +1,68 @@ +package main + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseFlag(t *testing.T) { + cases := []struct { + name string + args string + flag string + expect string + expectErr string + }{ + { + name: "bar=one", + args: "--foo --bar=one --baz", + flag: "--bar", + expect: "one", + }, + { + name: "bar one", + args: "--foo --bar one --baz", + flag: "--bar", + expect: "one", + }, + { + name: "bar one first flag", + args: "--bar one --foo two --baz three", + flag: "--bar", + expect: "one", + }, + { + name: "bar one last flag", + args: "--foo --baz --bar one", + flag: "--bar", + expect: "one", + }, + { + name: "non-existent flag", + args: "--foo one", + flag: "--bar", + expectErr: "missing flag", + }, + { + name: "empty args", + args: "", + flag: "--foo", + expectErr: "missing flag", + }, + } + for _, tt := range cases { + tt := tt + t.Run(tt.name, func(t *testing.T) { + args := strings.Split(tt.args, " ") + result, err := parseFlag(args, tt.flag) + if tt.expectErr != "" { + require.ErrorContains(t, err, tt.expectErr) + } else { + require.NoError(t, err) + require.Equal(t, tt.expect, result) + } + }) + } +} diff --git a/cannon/multicannon/witness.go b/cannon/multicannon/witness.go new file mode 100644 index 000000000000..077d0d3f1aed --- /dev/null +++ b/cannon/multicannon/witness.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli/v2" + + "github.com/ethereum-optimism/optimism/cannon/cmd" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" +) + +func Witness(ctx *cli.Context) error { + if len(os.Args) == 3 && os.Args[2] == "--help" { + if err := list(); err != nil { + return err + } + fmt.Println("use `--input --help` to get more detailed help") + } + + inputPath, err := parsePathFlag(os.Args[1:], "--input") + if err != nil { + return err + } + version, err := versions.DetectVersion(inputPath) + if err != nil { + return err + } + return ExecuteCannon(ctx.Context, os.Args[1:], version) +} + +var WitnessCommand = cmd.CreateWitnessCommand(Witness) diff --git a/op-program/Dockerfile.repro b/op-program/Dockerfile.repro index 12e52ec5b5a5..57f65bb72b81 100644 --- a/op-program/Dockerfile.repro +++ b/op-program/Dockerfile.repro @@ -35,8 +35,8 @@ RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-pro GOOS=linux GOARCH=mips GOMIPS=softfloat GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION" # Run the op-program-client.elf binary directly through cannon's load-elf subcommand. -RUN /app/cannon/bin/cannon load-elf --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate.json --meta "" -RUN /app/cannon/bin/cannon load-elf --type cannon-mt --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate-mt.bin.gz --meta "" +RUN /app/cannon/bin/cannon load-elf --type singlethreaded --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate.json --meta "" +RUN /app/cannon/bin/cannon load-elf --type multithreaded --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate-mt.bin.gz --meta "" # Generate the prestate proof containing the absolute pre-state hash. RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate.json --meta "" --proof-fmt '/app/op-program/bin/%d.json' --output "" diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index a395968fd07a..540abfeb0466 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -46,8 +46,16 @@ ARG TARGETARCH # Build the Go services, utilizing caches and share the many common packages. # The "id" defaults to the value of "target", the cache will thus be reused during this build. # "sharing" defaults to "shared", the cache will thus be available to other concurrent docker builds. + +# For now fetch the v1 cannon binary from the op-challenger image +#FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:v1.1.0 AS cannon-builder-0 + FROM --platform=$BUILDPLATFORM builder AS cannon-builder -ARG CANNON_VERSION=v0.0.0 +# note: bump this CANNON_VERSION when the VM behavior changes +ARG CANNON_VERSION=v1.0.0 +# uncomment these lines once there's a new Cannon version available +#COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-0 +#COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-1 RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \ GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION" @@ -158,4 +166,4 @@ CMD ["op-supervisor"] FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-deployer-target COPY --from=op-deployer-builder /app/op-chain-ops/bin/op-deployer /usr/local/bin/ -CMD ["op-deployer"] \ No newline at end of file +CMD ["op-deployer"] From decf4513ae61b39a2fa6f40afcfe79e2fef619cc Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 25 Sep 2024 14:58:03 +1000 Subject: [PATCH 013/116] cannon: Build the cannon docker image and publish on cannon release tags (#12100) * cannon: Build the cannon docker image and publish on cannon release tags. * ci: Add docker builds as dependencies so they prevent merging if they fail. * cannon: Include specific implementation builds in final docker image. --- .circleci/config.yml | 14 +++++++++++++- ops/docker/op-stack-go/Dockerfile | 1 + ops/scripts/ci-docker-tag-op-stack-release.sh | 2 +- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6cab0308e927..0dffb662b116 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1673,6 +1673,12 @@ workflows: - op-e2e-fault-proof-tests - op-e2e-action-tests - op-e2e-action-tests-altda + # Not needed for the devnet but we want to make sure they build successfully + - cannon-docker-build + - op-dispute-mon-docker-build + - op-program-docker-build + - op-supervisor-docker-build + - proofs-tools-docker-build - docker-build: name: <>-docker-build docker_tags: <>,<> @@ -1691,6 +1697,7 @@ workflows: - da-server - op-supervisor - op-deployer + - cannon - cannon-prestate: requires: - go-mod-download @@ -1734,7 +1741,7 @@ workflows: type: approval filters: tags: - only: /^(da-server|ci-builder(-rust)?|proofs-tools|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ + only: /^(da-server|ci-builder(-rust)?|proofs-tools|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ branches: ignore: /.*/ # Standard (medium) cross-platform docker images go here @@ -1752,6 +1759,7 @@ workflows: - op-ufm - op-supervisor - op-deployer + - cannon name: <>-docker-release docker_tags: <> platforms: "linux/amd64,linux/arm64" @@ -1781,6 +1789,7 @@ workflows: - op-ufm - op-supervisor - op-deployer + - cannon name: <>-cross-platform requires: - op-node-docker-release @@ -1793,6 +1802,7 @@ workflows: - op-ufm-docker-release - op-supervisor-docker-release - op-deployer-docker-release + - cannon-docker-release # Standard (xlarge) AMD-only docker images go here - docker-build: matrix: @@ -1922,6 +1932,7 @@ workflows: - op-dispute-mon - op-conductor - op-supervisor + - cannon name: <>-docker-publish docker_tags: <>,<> platforms: "linux/amd64,linux/arm64" @@ -1941,6 +1952,7 @@ workflows: - op-dispute-mon - op-conductor - op-supervisor + - cannon name: <>-cross-platform requires: - <>-docker-publish diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 540abfeb0466..0266e7010a76 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -116,6 +116,7 @@ RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS cannon-target COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/ +COPY --from=cannon-builder /app/cannon/multicannon/embeds/* /usr/local/bin/ CMD ["cannon"] FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE AS op-program-target diff --git a/ops/scripts/ci-docker-tag-op-stack-release.sh b/ops/scripts/ci-docker-tag-op-stack-release.sh index 09ae8ad81df3..45dd92094994 100755 --- a/ops/scripts/ci-docker-tag-op-stack-release.sh +++ b/ops/scripts/ci-docker-tag-op-stack-release.sh @@ -6,7 +6,7 @@ DOCKER_REPO=$1 GIT_TAG=$2 GIT_SHA=$3 -IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|da-server|proofs-tools|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true) +IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|da-server|proofs-tools|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true) if [ -z "$IMAGE_NAME" ]; then echo "image name could not be parsed from git tag '$GIT_TAG'" exit 1 From 4f1e8a7036e1f85fb1754ca1bb08a461c7c50ac4 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 25 Sep 2024 20:54:30 +0800 Subject: [PATCH 014/116] remove unused function (#12087) --- packages/contracts-bedrock/test/Predeploys.t.sol | 7 ------- 1 file changed, 7 deletions(-) diff --git a/packages/contracts-bedrock/test/Predeploys.t.sol b/packages/contracts-bedrock/test/Predeploys.t.sol index 0d9e4879fd6b..6c9ac3750a03 100644 --- a/packages/contracts-bedrock/test/Predeploys.t.sol +++ b/packages/contracts-bedrock/test/Predeploys.t.sol @@ -12,13 +12,6 @@ contract PredeploysBaseTest is CommonTest { /// Internal helpers ////////////////////////////////////////////////////// - /// @dev Returns true if the address is an interop predeploy. - function _isInterop(address _addr) internal pure returns (bool) { - return _addr == Predeploys.CROSS_L2_INBOX || _addr == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER - || _addr == Predeploys.SUPERCHAIN_WETH || _addr == Predeploys.ETH_LIQUIDITY - || _addr == Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY || _addr == Predeploys.OPTIMISM_SUPERCHAIN_ERC20_BEACON; - } - /// @dev Returns true if the address is a predeploy that has a different code in the interop mode. function _interopCodeDiffer(address _addr) internal pure returns (bool) { return _addr == Predeploys.L1_BLOCK_ATTRIBUTES || _addr == Predeploys.L2_STANDARD_BRIDGE; From 106993f81203ead1ff060f463f92bfcaa84821f1 Mon Sep 17 00:00:00 2001 From: George Knee Date: Wed, 25 Sep 2024 14:28:33 +0100 Subject: [PATCH 015/116] op-batcher: Move decision about data availability type to channel submission time (#12002) * tidy up godoc * move data availability config decision to channel submission time instead of channel creation time Also, cache the ChannelConfig whenever we switch DA type so it is used by default for new channels * fix test * formatting changes * respond to PR comments * add unit test for Requeue method * reduce number of txs in test block * improve test (more blocks in queue) * hoist pending tx management up * wip * tidy up test * wip * fix * refactor to do requeue before calling nextTxData * introduce ErrInsufficientData do not return nextTxData from channel which was discarded by requeue * run test until nonzero data is returned by TxData * break up and improve error logic * fix test to anticipate ErrInsufficientData * after requeuing, call nextTxData again * remove unecessary checks * move err declaration to top of file * add some comments and whitespace * hoist lock back up to TxData * rename variable to blocksToRequeue * remove panic * add comment * use deterministic rng and nonecompressor in test * test: increase block size to fill channel more quickly * remove ErrInsufficientData replace with io.EOF as before * tidy up * typo --- op-batcher/batcher/channel.go | 11 +- op-batcher/batcher/channel_builder.go | 4 +- op-batcher/batcher/channel_config.go | 4 +- op-batcher/batcher/channel_config_provider.go | 4 + op-batcher/batcher/channel_manager.go | 103 +++++++++-- op-batcher/batcher/channel_manager_test.go | 172 ++++++++++++++++++ op-batcher/batcher/channel_test.go | 4 +- op-batcher/batcher/driver.go | 9 +- op-batcher/batcher/driver_test.go | 1 + 9 files changed, 283 insertions(+), 29 deletions(-) diff --git a/op-batcher/batcher/channel.go b/op-batcher/batcher/channel.go index e35124d8525a..de68fa588a0a 100644 --- a/op-batcher/batcher/channel.go +++ b/op-batcher/batcher/channel.go @@ -155,9 +155,9 @@ func (s *channel) ID() derive.ChannelID { return s.channelBuilder.ID() } -// NextTxData returns the next tx data packet. -// If cfg.MultiFrameTxs is false, it returns txData with a single frame. -// If cfg.MultiFrameTxs is true, it will read frames from its channel builder +// NextTxData dequeues the next frames from the channel and returns them encoded in a tx data packet. +// If cfg.UseBlobs is false, it returns txData with a single frame. +// If cfg.UseBlobs is true, it will read frames from its channel builder // until it either doesn't have more frames or the target number of frames is reached. // // NextTxData should only be called after HasTxData returned true. @@ -177,10 +177,11 @@ func (s *channel) NextTxData() txData { } func (s *channel) HasTxData() bool { - if s.IsFull() || !s.cfg.UseBlobs { + if s.IsFull() || // If the channel is full, we should start to submit it + !s.cfg.UseBlobs { // If using calldata, we only send one frame per tx return s.channelBuilder.HasFrame() } - // collect enough frames if channel is not full yet + // Collect enough frames if channel is not full yet return s.channelBuilder.PendingFrames() >= int(s.cfg.MaxFramesPerTx()) } diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index cb4345e419d4..0c16f3156d9b 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -417,12 +417,12 @@ func (c *ChannelBuilder) HasFrame() bool { } // PendingFrames returns the number of pending frames in the frames queue. -// It is larger zero iff HasFrames() returns true. +// It is larger zero iff HasFrame() returns true. func (c *ChannelBuilder) PendingFrames() int { return len(c.frames) } -// NextFrame returns the next available frame. +// NextFrame dequeues the next available frame. // HasFrame must be called prior to check if there's a next frame available. // Panics if called when there's no next frame. func (c *ChannelBuilder) NextFrame() frameData { diff --git a/op-batcher/batcher/channel_config.go b/op-batcher/batcher/channel_config.go index 63e0d5d5deef..45dc1d4dcfa4 100644 --- a/op-batcher/batcher/channel_config.go +++ b/op-batcher/batcher/channel_config.go @@ -51,8 +51,8 @@ type ChannelConfig struct { UseBlobs bool } -// ChannelConfig returns a copy of itself. This makes a ChannelConfig a static -// ChannelConfigProvider of itself. +// ChannelConfig returns a copy of the receiver. +// This allows the receiver to be a static ChannelConfigProvider of itself. func (cc ChannelConfig) ChannelConfig() ChannelConfig { return cc } diff --git a/op-batcher/batcher/channel_config_provider.go b/op-batcher/batcher/channel_config_provider.go index c65e83b8289f..6cf5b0db6863 100644 --- a/op-batcher/batcher/channel_config_provider.go +++ b/op-batcher/batcher/channel_config_provider.go @@ -48,6 +48,10 @@ func NewDynamicEthChannelConfig(lgr log.Logger, return dec } +// ChannelConfig will perform an estimate of the cost per byte for +// calldata and for blobs, given current market conditions: it will return +// the appropriate ChannelConfig depending on which is cheaper. It makes +// assumptions about the typical makeup of channel data. func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig { ctx, cancel := context.WithTimeout(context.Background(), dec.timeout) defer cancel() diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 1f22565c94c5..3bfff303db4b 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -35,6 +35,8 @@ type channelManager struct { blocks []*types.Block // The latest L1 block from all the L2 blocks in the most recently closed channel l1OriginLastClosedChannel eth.BlockID + // The default ChannelConfig to use for the next channel + defaultCfg ChannelConfig // last block hash - for reorg detection tip common.Hash @@ -54,6 +56,7 @@ func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider Channe log: log, metr: metr, cfgProvider: cfgProvider, + defaultCfg: cfgProvider.ChannelConfig(), rollupCfg: rollupCfg, txChannels: make(map[string]*channel), } @@ -133,7 +136,8 @@ func (s *channelManager) removePendingChannel(channel *channel) { s.channelQueue = append(s.channelQueue[:index], s.channelQueue[index+1:]...) } -// nextTxData pops off s.datas & handles updating the internal state +// nextTxData dequeues frames from the channel and returns them encoded in a transaction. +// It also updates the internal tx -> channels mapping func (s *channelManager) nextTxData(channel *channel) (txData, error) { if channel == nil || !channel.HasTxData() { s.log.Trace("no next tx data") @@ -146,12 +150,51 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) { // TxData returns the next tx data that should be submitted to L1. // -// If the pending channel is +// If the current channel is // full, it only returns the remaining frames of this channel until it got // successfully fully sent to L1. It returns io.EOF if there's no pending tx data. +// +// It will decide whether to switch DA type automatically. +// When switching DA type, the channelManager state will be rebuilt +// with a new ChannelConfig. func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { s.mu.Lock() defer s.mu.Unlock() + channel, err := s.getReadyChannel(l1Head) + if err != nil { + return emptyTxData, err + } + // If the channel has already started being submitted, + // return now and ensure no requeueing happens + if !channel.NoneSubmitted() { + return s.nextTxData(channel) + } + + // Call provider method to reassess optimal DA type + newCfg := s.cfgProvider.ChannelConfig() + + // No change: + if newCfg.UseBlobs == s.defaultCfg.UseBlobs { + s.log.Debug("Recomputing optimal ChannelConfig: no need to switch DA type", + "useBlobs", s.defaultCfg.UseBlobs) + return s.nextTxData(channel) + } + + // Change: + s.log.Info("Recomputing optimal ChannelConfig: changing DA type and requeing blocks...", + "useBlobsBefore", s.defaultCfg.UseBlobs, + "useBlobsAfter", newCfg.UseBlobs) + s.Requeue(newCfg) + channel, err = s.getReadyChannel(l1Head) + if err != nil { + return emptyTxData, err + } + return s.nextTxData(channel) +} + +// getReadyChannel returns the next channel ready to submit data, or an error. +// It adds blocks from the block queue to the current channel and generates frames for it. +func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { var firstWithTxData *channel for _, ch := range s.channelQueue { if ch.HasTxData() { @@ -160,27 +203,31 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { } } - dataPending := firstWithTxData != nil && firstWithTxData.HasTxData() + dataPending := firstWithTxData != nil s.log.Debug("Requested tx data", "l1Head", l1Head, "txdata_pending", dataPending, "blocks_pending", len(s.blocks)) - // Short circuit if there is pending tx data or the channel manager is closed. - if dataPending || s.closed { - return s.nextTxData(firstWithTxData) + // Short circuit if there is pending tx data or the channel manager is closed + if dataPending { + return firstWithTxData, nil + } + + if s.closed { + return nil, io.EOF } // No pending tx data, so we have to add new blocks to the channel // If we have no saved blocks, we will not be able to create valid frames if len(s.blocks) == 0 { - return txData{}, io.EOF + return nil, io.EOF } if err := s.ensureChannelWithSpace(l1Head); err != nil { - return txData{}, err + return nil, err } if err := s.processBlocks(); err != nil { - return txData{}, err + return nil, err } // Register current L1 head only after all pending blocks have been @@ -189,10 +236,10 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { s.registerL1Block(l1Head) if err := s.outputFrames(); err != nil { - return txData{}, err + return nil, err } - return s.nextTxData(s.currentChannel) + return s.currentChannel, nil } // ensureChannelWithSpace ensures currentChannel is populated with a channel that has @@ -203,7 +250,10 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { return nil } - cfg := s.cfgProvider.ChannelConfig() + // We reuse the ChannelConfig from the last channel. + // This will be reassessed at channel submission-time, + // but this is our best guess at the appropriate values for now. + cfg := s.defaultCfg pc, err := newChannel(s.log, s.metr, cfg, s.rollupCfg, s.l1OriginLastClosedChannel.Number) if err != nil { return fmt.Errorf("creating new channel: %w", err) @@ -228,7 +278,7 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { return nil } -// registerL1Block registers the given block at the pending channel. +// registerL1Block registers the given block at the current channel. func (s *channelManager) registerL1Block(l1Head eth.BlockID) { s.currentChannel.CheckTimeout(l1Head.Number) s.log.Debug("new L1-block registered at channel builder", @@ -238,7 +288,7 @@ func (s *channelManager) registerL1Block(l1Head eth.BlockID) { ) } -// processBlocks adds blocks from the blocks queue to the pending channel until +// processBlocks adds blocks from the blocks queue to the current channel until // either the queue got exhausted or the channel is full. func (s *channelManager) processBlocks() error { var ( @@ -288,6 +338,7 @@ func (s *channelManager) processBlocks() error { return nil } +// outputFrames generates frames for the current channel, and computes and logs the compression ratio func (s *channelManager) outputFrames() error { if err := s.currentChannel.OutputFrames(); err != nil { return fmt.Errorf("creating frames with channel builder: %w", err) @@ -339,6 +390,7 @@ func (s *channelManager) outputFrames() error { func (s *channelManager) AddL2Block(block *types.Block) error { s.mu.Lock() defer s.mu.Unlock() + if s.tip != (common.Hash{}) && s.tip != block.ParentHash() { return ErrReorg } @@ -414,3 +466,26 @@ func (s *channelManager) Close() error { } return nil } + +// Requeue rebuilds the channel manager state by +// rewinding blocks back from the channel queue, and setting the defaultCfg. +func (s *channelManager) Requeue(newCfg ChannelConfig) { + newChannelQueue := []*channel{} + blocksToRequeue := []*types.Block{} + for _, channel := range s.channelQueue { + if !channel.NoneSubmitted() { + newChannelQueue = append(newChannelQueue, channel) + continue + } + blocksToRequeue = append(blocksToRequeue, channel.channelBuilder.Blocks()...) + } + + // We put the blocks back at the front of the queue: + s.blocks = append(blocksToRequeue, s.blocks...) + // Channels which where already being submitted are put back + s.channelQueue = newChannelQueue + s.currentChannel = nil + // Setting the defaultCfg will cause new channels + // to pick up the new ChannelConfig + s.defaultCfg = newCfg +} diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index a6271df9a535..5df5feacf4bf 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -1,6 +1,7 @@ package batcher import ( + "errors" "io" "math/big" "math/rand" @@ -483,3 +484,174 @@ func TestChannelManager_ChannelCreation(t *testing.T) { }) } } + +// FakeDynamicEthChannelConfig is a ChannelConfigProvider which always returns +// either a blob- or calldata-based config depending on its internal chooseBlob +// switch. +type FakeDynamicEthChannelConfig struct { + DynamicEthChannelConfig + chooseBlobs bool +} + +func (f *FakeDynamicEthChannelConfig) ChannelConfig() ChannelConfig { + if f.chooseBlobs { + return f.blobConfig + } + return f.calldataConfig +} + +func newFakeDynamicEthChannelConfig(lgr log.Logger, + reqTimeout time.Duration) *FakeDynamicEthChannelConfig { + + calldataCfg := ChannelConfig{ + MaxFrameSize: 120_000 - 1, + TargetNumFrames: 1, + } + blobCfg := ChannelConfig{ + MaxFrameSize: eth.MaxBlobDataSize - 1, + TargetNumFrames: 3, // gets closest to amortized fixed tx costs + UseBlobs: true, + } + calldataCfg.InitNoneCompressor() + blobCfg.InitNoneCompressor() + + return &FakeDynamicEthChannelConfig{ + chooseBlobs: false, + DynamicEthChannelConfig: *NewDynamicEthChannelConfig( + lgr, + reqTimeout, + &mockGasPricer{}, + blobCfg, + calldataCfg), + } +} + +// TestChannelManager_TxData seeds the channel manager with blocks and triggers the +// blocks->channels pipeline multiple times. Values are chosen such that a channel +// is created under one set of market conditions, and then submitted under a different +// set of market conditions. The test asserts that the DA type is changed at channel +// submission time. +func TestChannelManager_TxData(t *testing.T) { + + type TestCase struct { + name string + chooseBlobsWhenChannelCreated bool + chooseBlobsWhenChannelSubmitted bool + } + + tt := []TestCase{ + {"blobs->blobs", true, true}, + {"calldata->calldata", false, false}, + {"blobs->calldata", true, false}, + {"calldata->blobs", false, true}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + + cfg := newFakeDynamicEthChannelConfig(l, 1000) + + cfg.chooseBlobs = tc.chooseBlobsWhenChannelCreated + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + require.Equal(t, tc.chooseBlobsWhenChannelCreated, m.defaultCfg.UseBlobs) + + // Seed channel manager with a block + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + blockA := derivetest.RandomL2BlockWithChainId(rng, 200, defaultTestRollupConfig.L2ChainID) + m.blocks = []*types.Block{blockA} + + // Call TxData a first time to trigger blocks->channels pipeline + _, err := m.TxData(eth.BlockID{}) + require.ErrorIs(t, err, io.EOF) + + // The test requires us to have something in the channel queue + // at this point, but not yet ready to send and not full + require.NotEmpty(t, m.channelQueue) + require.False(t, m.channelQueue[0].IsFull()) + + // Simulate updated market conditions + // by possibly flipping the state of the + // fake channel provider + l.Info("updating market conditions", "chooseBlobs", tc.chooseBlobsWhenChannelSubmitted) + cfg.chooseBlobs = tc.chooseBlobsWhenChannelSubmitted + + // Add a block and call TxData until + // we get some data to submit + var data txData + for { + m.blocks = []*types.Block{blockA} + data, err = m.TxData(eth.BlockID{}) + if err == nil && data.Len() > 0 { + break + } + if !errors.Is(err, io.EOF) { + require.NoError(t, err) + } + } + + require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, data.asBlob) + require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, m.defaultCfg.UseBlobs) + }) + } + +} + +// TestChannelManager_Requeue seeds the channel manager with blocks, +// takes a state snapshot, triggers the blocks->channels pipeline, +// and then calls Requeue. Finally, it asserts the channel manager's +// state is equal to the snapshot. It repeats this for a channel +// which has a pending transaction and verifies that Requeue is then +// a noop. +func TestChannelManager_Requeue(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + // Seed channel manager with blocks + rng := rand.New(rand.NewSource(99)) + blockA := derivetest.RandomL2BlockWithChainId(rng, 10, defaultTestRollupConfig.L2ChainID) + blockB := derivetest.RandomL2BlockWithChainId(rng, 10, defaultTestRollupConfig.L2ChainID) + + // This is the snapshot of channel manager state we want to reinstate + // when we requeue + stateSnapshot := []*types.Block{blockA, blockB} + m.blocks = stateSnapshot + require.Empty(t, m.channelQueue) + + // Trigger the blocks -> channelQueue data pipelining + require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) + require.NotEmpty(t, m.channelQueue) + require.NoError(t, m.processBlocks()) + + // Assert that at least one block was processed into the channel + require.NotContains(t, m.blocks, blockA) + + // Call the function we are testing + m.Requeue(m.defaultCfg) + + // Ensure we got back to the state above + require.Equal(t, m.blocks, stateSnapshot) + require.Empty(t, m.channelQueue) + + // Trigger the blocks -> channelQueue data pipelining again + require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) + require.NotEmpty(t, m.channelQueue) + require.NoError(t, m.processBlocks()) + + // Assert that at least one block was processed into the channel + require.NotContains(t, m.blocks, blockA) + + // Now mark the 0th channel in the queue as already + // starting to send on chain + channel0 := m.channelQueue[0] + channel0.pendingTransactions["foo"] = txData{} + require.False(t, channel0.NoneSubmitted()) + + // Call the function we are testing + m.Requeue(m.defaultCfg) + + // The requeue shouldn't affect the pending channel + require.Contains(t, m.channelQueue, channel0) + require.NotContains(t, m.blocks, blockA) +} diff --git a/op-batcher/batcher/channel_test.go b/op-batcher/batcher/channel_test.go index 7fa8030e771e..8dec9d9e108b 100644 --- a/op-batcher/batcher/channel_test.go +++ b/op-batcher/batcher/channel_test.go @@ -86,8 +86,8 @@ func TestChannelManager_NextTxData(t *testing.T) { require.Equal(t, txData{}, returnedTxData) // Set the pending channel - // The nextTxData function should still return EOF - // since the pending channel has no frames + // The nextTxData function should still return io.EOF + // since the current channel has no frames require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) channel := m.currentChannel require.NotNil(t, channel) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 0b7d36d960dd..968e6de3e71a 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -190,10 +190,11 @@ func (l *BatchSubmitter) StopBatchSubmitting(ctx context.Context) error { // loadBlocksIntoState loads all blocks since the previous stored block // It does the following: -// 1. Fetch the sync status of the sequencer -// 2. Check if the sync status is valid or if we are all the way up to date -// 3. Check if it needs to initialize state OR it is lagging (todo: lagging just means race condition?) -// 4. Load all new blocks into the local state. +// 1. Fetch the sync status of the sequencer +// 2. Check if the sync status is valid or if we are all the way up to date +// 3. Check if it needs to initialize state OR it is lagging (todo: lagging just means race condition?) +// 4. Load all new blocks into the local state. +// // If there is a reorg, it will reset the last stored block but not clear the internal state so // the state can be flushed to L1. func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error { diff --git a/op-batcher/batcher/driver_test.go b/op-batcher/batcher/driver_test.go index 1c5ace753771..5ce0983bfe1a 100644 --- a/op-batcher/batcher/driver_test.go +++ b/op-batcher/batcher/driver_test.go @@ -50,6 +50,7 @@ func setup(t *testing.T) (*BatchSubmitter, *mockL2EndpointProvider) { Log: testlog.Logger(t, log.LevelDebug), Metr: metrics.NoopMetrics, RollupConfig: cfg, + ChannelConfig: defaultTestChannelConfig(), EndpointProvider: ep, }), ep } From b0e6d13737513ca838fccdaf2f1d26e03a68601a Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 25 Sep 2024 23:51:33 +1000 Subject: [PATCH 016/116] cannon: Support the --version argument for multicannon. (#12101) Pick up the version from the release tag automatically. --- cannon/Makefile | 4 ++-- cannon/multicannon/main.go | 11 +++++++++++ cannon/multicannon/version/version.go | 6 ++++++ ops/docker/op-stack-go/Dockerfile | 3 +-- 4 files changed, 20 insertions(+), 4 deletions(-) create mode 100644 cannon/multicannon/version/version.go diff --git a/cannon/Makefile b/cannon/Makefile index 7540b88e58f2..e914ad542eae 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -4,8 +4,8 @@ VERSION ?= v0.0.0 LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta=$(VERSION_META) +LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/cannon/multicannon/version.Version=$(VERSION) +LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/cannon/multicannon/version.Meta=$(VERSION_META) LDFLAGS := -ldflags "$(LDFLAGSSTRING)" # Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 diff --git a/cannon/multicannon/main.go b/cannon/multicannon/main.go index 31dde280d8d0..e496eba880ba 100644 --- a/cannon/multicannon/main.go +++ b/cannon/multicannon/main.go @@ -6,15 +6,26 @@ import ( "fmt" "os" + "github.com/ethereum-optimism/optimism/cannon/multicannon/version" + opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" "github.com/urfave/cli/v2" ) +var ( + GitCommit = "" + GitDate = "" +) + +// VersionWithMeta holds the textual version string including the metadata. +var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDate, version.Meta) + func main() { app := cli.NewApp() app.Name = "multicannon" app.Usage = "MIPS Fault Proof tool" app.Description = "MIPS Fault Proof tool" + app.Version = VersionWithMeta app.Commands = []*cli.Command{ LoadELFCommand, WitnessCommand, diff --git a/cannon/multicannon/version/version.go b/cannon/multicannon/version/version.go new file mode 100644 index 000000000000..2456f656d45c --- /dev/null +++ b/cannon/multicannon/version/version.go @@ -0,0 +1,6 @@ +package version + +var ( + Version = "v0.0.0" + Meta = "dev" +) diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 0266e7010a76..35f14d19a439 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -51,8 +51,7 @@ ARG TARGETARCH #FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:v1.1.0 AS cannon-builder-0 FROM --platform=$BUILDPLATFORM builder AS cannon-builder -# note: bump this CANNON_VERSION when the VM behavior changes -ARG CANNON_VERSION=v1.0.0 +ARG CANNON_VERSION=v0.0.0 # uncomment these lines once there's a new Cannon version available #COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-0 #COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-1 From da9bc723d83570f3e8e1a4388228588bba8112d2 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Wed, 25 Sep 2024 22:36:41 +0800 Subject: [PATCH 017/116] show log for `devnet` (#12102) * show log for devnet * add comment --- bedrock-devnet/devnet/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bedrock-devnet/devnet/__init__.py b/bedrock-devnet/devnet/__init__.py index ccf080c18566..8a3fb0ee4c99 100644 --- a/bedrock-devnet/devnet/__init__.py +++ b/bedrock-devnet/devnet/__init__.py @@ -11,6 +11,8 @@ from multiprocessing import Process, Queue import concurrent.futures from collections import namedtuple +# This import is necessary for devnet logs to be shown. +from . import log_setup pjoin = os.path.join From b55f4d737594ca9d93a977b9221e8fc1c7a5b51f Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Wed, 25 Sep 2024 11:05:22 -0400 Subject: [PATCH 018/116] rename: OPStackManafer -> OPContractsManager (#12091) --- op-chain-ops/Makefile | 2 +- .../deployer/integration_test/apply_test.go | 2 +- .../{opsm => opcm}/implementations.go | 16 +- .../deployer/{opsm => opcm}/l2genesis.go | 2 +- .../deployer/{opsm => opcm}/opchain.go | 4 +- .../{opsm => opcm}/standard-versions.toml | 0 .../deployer/{opsm => opcm}/standard.go | 2 +- .../deployer/{opsm => opcm}/superchain.go | 2 +- .../deployer/pipeline/implementations.go | 12 +- op-chain-ops/deployer/pipeline/l2genesis.go | 6 +- op-chain-ops/deployer/pipeline/opchain.go | 10 +- op-chain-ops/deployer/pipeline/superchain.go | 8 +- op-chain-ops/deployer/state/state.go | 2 +- op-chain-ops/interopgen/configs.go | 4 +- op-chain-ops/interopgen/deploy.go | 17 +- op-chain-ops/interopgen/deployments.go | 4 +- op-chain-ops/interopgen/recipe.go | 6 +- .../scripts/DeployImplementations.s.sol | 204 +++++++++--------- .../scripts/DeployOPChain.s.sol | 38 ++-- .../scripts/DeploySuperchain.s.sol | 2 +- packages/contracts-bedrock/semver-lock.json | 4 +- ...ckManager.json => OPContractsManager.json} | 16 +- ...op.json => OPContractsManagerInterop.json} | 16 +- ...ckManager.json => OPContractsManager.json} | 4 +- ...op.json => OPContractsManagerInterop.json} | 4 +- ...tackManager.sol => OPContractsManager.sol} | 8 +- ...erop.sol => OPContractsManagerInterop.sol} | 6 +- .../test/DeployImplementations.t.sol | 18 +- .../test/DeployOPChain.t.sol | 22 +- ...Manager.t.sol => OPContractsManager.t.sol} | 48 ++--- packages/contracts-bedrock/test/Specs.t.sol | 48 ++--- .../test/vendor/Initializable.t.sol | 4 +- 32 files changed, 275 insertions(+), 266 deletions(-) rename op-chain-ops/deployer/{opsm => opcm}/implementations.go (88%) rename op-chain-ops/deployer/{opsm => opcm}/l2genesis.go (99%) rename op-chain-ops/deployer/{opsm => opcm}/opchain.go (98%) rename op-chain-ops/deployer/{opsm => opcm}/standard-versions.toml (100%) rename op-chain-ops/deployer/{opsm => opcm}/standard.go (88%) rename op-chain-ops/deployer/{opsm => opcm}/superchain.go (99%) rename packages/contracts-bedrock/snapshots/abi/{OPStackManager.json => OPContractsManager.json} (95%) rename packages/contracts-bedrock/snapshots/abi/{OPStackManagerInterop.json => OPContractsManagerInterop.json} (95%) rename packages/contracts-bedrock/snapshots/storageLayout/{OPStackManager.json => OPContractsManager.json} (84%) rename packages/contracts-bedrock/snapshots/storageLayout/{OPStackManagerInterop.json => OPContractsManagerInterop.json} (84%) rename packages/contracts-bedrock/src/L1/{OPStackManager.sol => OPContractsManager.sol} (98%) rename packages/contracts-bedrock/src/L1/{OPStackManagerInterop.sol => OPContractsManagerInterop.sol} (91%) rename packages/contracts-bedrock/test/L1/{OPStackManager.t.sol => OPContractsManager.t.sol} (71%) diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index 262a989bc465..fd3cc9ad67b3 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -46,6 +46,6 @@ fuzz: sync-standard-version: - curl -Lo ./deployer/opsm/standard-versions.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions.toml + curl -Lo ./deployer/opcm/standard-versions.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions.toml .PHONY: test fuzz op-deployer sync-standard-version \ No newline at end of file diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index f3bcd5b3e4bd..4399e0b887d3 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -151,7 +151,7 @@ func TestEndToEndApply(t *testing.T) { {"SuperchainConfigImpl", st.SuperchainDeployment.SuperchainConfigImplAddress}, {"ProtocolVersionsProxy", st.SuperchainDeployment.ProtocolVersionsProxyAddress}, {"ProtocolVersionsImpl", st.SuperchainDeployment.ProtocolVersionsImplAddress}, - {"OpsmProxy", st.ImplementationsDeployment.OpsmProxyAddress}, + {"OpcmProxy", st.ImplementationsDeployment.OpcmProxyAddress}, {"DelayedWETHImpl", st.ImplementationsDeployment.DelayedWETHImplAddress}, {"OptimismPortalImpl", st.ImplementationsDeployment.OptimismPortalImplAddress}, {"PreimageOracleSingleton", st.ImplementationsDeployment.PreimageOracleSingletonAddress}, diff --git a/op-chain-ops/deployer/opsm/implementations.go b/op-chain-ops/deployer/opcm/implementations.go similarity index 88% rename from op-chain-ops/deployer/opsm/implementations.go rename to op-chain-ops/deployer/opcm/implementations.go index d60330440abc..fec30d94cbd5 100644 --- a/op-chain-ops/deployer/opsm/implementations.go +++ b/op-chain-ops/deployer/opcm/implementations.go @@ -1,4 +1,4 @@ -package opsm +package opcm import ( "fmt" @@ -16,7 +16,7 @@ type DeployImplementationsInput struct { ChallengePeriodSeconds *big.Int ProofMaturityDelaySeconds *big.Int DisputeGameFinalityDelaySeconds *big.Int - // Release version to set OPSM implementations for, of the format `op-contracts/vX.Y.Z`. + // Release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. Release string SuperchainConfigProxy common.Address ProtocolVersionsProxy common.Address @@ -31,8 +31,8 @@ func (input *DeployImplementationsInput) InputSet() bool { } type DeployImplementationsOutput struct { - OpsmProxy common.Address - OpsmImpl common.Address + OpcmProxy common.Address + OpcmImpl common.Address DelayedWETHImpl common.Address OptimismPortalImpl common.Address PreimageOracleSingleton common.Address @@ -84,12 +84,12 @@ func DeployImplementations( } defer cleanupDeploy() - opsmContract := "OPStackManager" + opcmContract := "OPContractsManager" if input.UseInterop { - opsmContract = "OPStackManagerInterop" + opcmContract = "OPContractsManagerInterop" } - if err := host.RememberOnLabel("OPStackManager", opsmContract+".sol", opsmContract); err != nil { - return output, fmt.Errorf("failed to link OPStackManager label: %w", err) + if err := host.RememberOnLabel("OPContractsManager", opcmContract+".sol", opcmContract); err != nil { + return output, fmt.Errorf("failed to link OPContractsManager label: %w", err) } // So we can see in detail where the SystemConfig interop initializer fails diff --git a/op-chain-ops/deployer/opsm/l2genesis.go b/op-chain-ops/deployer/opcm/l2genesis.go similarity index 99% rename from op-chain-ops/deployer/opsm/l2genesis.go rename to op-chain-ops/deployer/opcm/l2genesis.go index 3567df71858f..8b6e123dad3f 100644 --- a/op-chain-ops/deployer/opsm/l2genesis.go +++ b/op-chain-ops/deployer/opcm/l2genesis.go @@ -1,4 +1,4 @@ -package opsm +package opcm import ( "fmt" diff --git a/op-chain-ops/deployer/opsm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go similarity index 98% rename from op-chain-ops/deployer/opsm/opchain.go rename to op-chain-ops/deployer/opcm/opchain.go index d600f200dcf1..d9685182b6e1 100644 --- a/op-chain-ops/deployer/opsm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -1,4 +1,4 @@ -package opsm +package opcm import ( "fmt" @@ -26,7 +26,7 @@ type DeployOPChainInput struct { BasefeeScalar uint32 BlobBaseFeeScalar uint32 L2ChainId *big.Int - OpsmProxy common.Address + OpcmProxy common.Address } func (input *DeployOPChainInput) InputSet() bool { diff --git a/op-chain-ops/deployer/opsm/standard-versions.toml b/op-chain-ops/deployer/opcm/standard-versions.toml similarity index 100% rename from op-chain-ops/deployer/opsm/standard-versions.toml rename to op-chain-ops/deployer/opcm/standard-versions.toml diff --git a/op-chain-ops/deployer/opsm/standard.go b/op-chain-ops/deployer/opcm/standard.go similarity index 88% rename from op-chain-ops/deployer/opsm/standard.go rename to op-chain-ops/deployer/opcm/standard.go index 56f0d7ada37b..9f182ca4685c 100644 --- a/op-chain-ops/deployer/opsm/standard.go +++ b/op-chain-ops/deployer/opcm/standard.go @@ -1,4 +1,4 @@ -package opsm +package opcm import "embed" diff --git a/op-chain-ops/deployer/opsm/superchain.go b/op-chain-ops/deployer/opcm/superchain.go similarity index 99% rename from op-chain-ops/deployer/opsm/superchain.go rename to op-chain-ops/deployer/opcm/superchain.go index d27f85499021..34804cc2bbdf 100644 --- a/op-chain-ops/deployer/opsm/superchain.go +++ b/op-chain-ops/deployer/opcm/superchain.go @@ -1,4 +1,4 @@ -package opsm +package opcm import ( "fmt" diff --git a/op-chain-ops/deployer/pipeline/implementations.go b/op-chain-ops/deployer/pipeline/implementations.go index 0dcda8feea19..5c5a1e99287c 100644 --- a/op-chain-ops/deployer/pipeline/implementations.go +++ b/op-chain-ops/deployer/pipeline/implementations.go @@ -5,7 +5,7 @@ import ( "fmt" "math/big" - "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -22,7 +22,7 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St lgr.Info("deploying implementations") var dump *foundry.ForgeAllocs - var dio opsm.DeployImplementationsOutput + var dio opcm.DeployImplementationsOutput var err error err = CallScriptBroadcast( ctx, @@ -37,9 +37,9 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St Handler: func(host *script.Host) error { host.SetEnvVar("IMPL_SALT", st.Create2Salt.Hex()[2:]) host.ImportState(st.SuperchainDeployment.StateDump) - dio, err = opsm.DeployImplementations( + dio, err = opcm.DeployImplementations( host, - opsm.DeployImplementationsInput{ + opcm.DeployImplementationsInput{ Salt: st.Create2Salt, WithdrawalDelaySeconds: big.NewInt(604800), MinProposalSizeBytes: big.NewInt(126000), @@ -50,7 +50,7 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, SuperchainProxyAdmin: st.SuperchainDeployment.ProxyAdminAddress, - StandardVersionsToml: opsm.StandardVersionsData, + StandardVersionsToml: opcm.StandardVersionsData, UseInterop: false, }, ) @@ -70,7 +70,7 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpsmProxyAddress: dio.OpsmProxy, + OpcmProxyAddress: dio.OpcmProxy, DelayedWETHImplAddress: dio.DelayedWETHImpl, OptimismPortalImplAddress: dio.OptimismPortalImpl, PreimageOracleSingletonAddress: dio.PreimageOracleSingleton, diff --git a/op-chain-ops/deployer/pipeline/l2genesis.go b/op-chain-ops/deployer/pipeline/l2genesis.go index f74c6e833620..25aa316c78a5 100644 --- a/op-chain-ops/deployer/pipeline/l2genesis.go +++ b/op-chain-ops/deployer/pipeline/l2genesis.go @@ -8,7 +8,7 @@ import ( "fmt" "math/big" - "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -47,8 +47,8 @@ func GenerateL2Genesis(ctx context.Context, env *Env, artifactsFS foundry.StatDi Client: env.L1Client, Broadcaster: DiscardBroadcaster, Handler: func(host *script.Host) error { - err := opsm.L2Genesis(host, &opsm.L2GenesisInput{ - L1Deployments: opsm.L1Deployments{ + err := opcm.L2Genesis(host, &opcm.L2GenesisInput{ + L1Deployments: opcm.L1Deployments{ L1CrossDomainMessengerProxy: thisChainState.L1CrossDomainMessengerProxyAddress, L1StandardBridgeProxy: thisChainState.L1StandardBridgeProxyAddress, L1ERC721BridgeProxy: thisChainState.L1ERC721BridgeProxyAddress, diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index 90d03b028142..1ae37970d7d1 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -5,7 +5,7 @@ import ( "fmt" "math/big" - "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -27,7 +27,7 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, return fmt.Errorf("failed to get chain intent: %w", err) } - var dco opsm.DeployOPChainOutput + var dco opcm.DeployOPChainOutput err = CallScriptBroadcast( ctx, CallScriptBroadcastOpts{ @@ -40,9 +40,9 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, Broadcaster: KeyedBroadcaster, Handler: func(host *script.Host) error { host.ImportState(st.ImplementationsDeployment.StateDump) - dco, err = opsm.DeployOPChain( + dco, err = opcm.DeployOPChain( host, - opsm.DeployOPChainInput{ + opcm.DeployOPChainInput{ OpChainProxyAdminOwner: thisIntent.Roles.ProxyAdminOwner, SystemConfigOwner: thisIntent.Roles.SystemConfigOwner, Batcher: thisIntent.Roles.Batcher, @@ -52,7 +52,7 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, BasefeeScalar: 1368, BlobBaseFeeScalar: 801949, L2ChainId: chainID.Big(), - OpsmProxy: st.ImplementationsDeployment.OpsmProxyAddress, + OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, }, ) return err diff --git a/op-chain-ops/deployer/pipeline/superchain.go b/op-chain-ops/deployer/pipeline/superchain.go index 21aeda0e23dc..cc1b8d04160c 100644 --- a/op-chain-ops/deployer/pipeline/superchain.go +++ b/op-chain-ops/deployer/pipeline/superchain.go @@ -7,7 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/script" - "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -24,7 +24,7 @@ func DeploySuperchain(ctx context.Context, env *Env, artifactsFS foundry.StatDir lgr.Info("deploying superchain") var dump *foundry.ForgeAllocs - var dso opsm.DeploySuperchainOutput + var dso opcm.DeploySuperchainOutput var err error err = CallScriptBroadcast( ctx, @@ -37,9 +37,9 @@ func DeploySuperchain(ctx context.Context, env *Env, artifactsFS foundry.StatDir Client: env.L1Client, Broadcaster: KeyedBroadcaster, Handler: func(host *script.Host) error { - dso, err = opsm.DeploySuperchain( + dso, err = opcm.DeploySuperchain( host, - opsm.DeploySuperchainInput{ + opcm.DeploySuperchainInput{ ProxyAdminOwner: intent.SuperchainRoles.ProxyAdminOwner, ProtocolVersionsOwner: intent.SuperchainRoles.ProtocolVersionsOwner, Guardian: intent.SuperchainRoles.Guardian, diff --git a/op-chain-ops/deployer/state/state.go b/op-chain-ops/deployer/state/state.go index 098fa7a731d7..674e06d743a0 100644 --- a/op-chain-ops/deployer/state/state.go +++ b/op-chain-ops/deployer/state/state.go @@ -65,7 +65,7 @@ type SuperchainDeployment struct { } type ImplementationsDeployment struct { - OpsmProxyAddress common.Address `json:"opsmProxyAddress"` + OpcmProxyAddress common.Address `json:"opcmProxyAddress"` DelayedWETHImplAddress common.Address `json:"delayedWETHImplAddress"` OptimismPortalImplAddress common.Address `json:"optimismPortalImplAddress"` PreimageOracleSingletonAddress common.Address `json:"preimageOracleSingletonAddress"` diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index 9abe9880fe8b..f40d29904c2f 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -33,7 +33,7 @@ type SuperFaultProofConfig struct { DisputeGameFinalityDelaySeconds *big.Int } -type OPSMImplementationsConfig struct { +type OPCMImplementationsConfig struct { Release string FaultProof SuperFaultProofConfig @@ -51,7 +51,7 @@ type SuperchainConfig struct { Paused bool - Implementations OPSMImplementationsConfig + Implementations OPCMImplementationsConfig genesis.SuperchainL1DeployConfig } diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 9da41e9894d8..692a80d3225e 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -5,13 +5,12 @@ import ( "fmt" "math/big" - "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis/beacondeposit" @@ -149,7 +148,7 @@ func prepareInitialL1(l1Host *script.Host, cfg *L1Config) (*L1Deployment, error) func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*SuperchainDeployment, error) { l1Host.SetTxOrigin(superCfg.Deployer) - superDeployment, err := opsm.DeploySuperchain(l1Host, opsm.DeploySuperchainInput{ + superDeployment, err := opcm.DeploySuperchain(l1Host, opcm.DeploySuperchainInput{ ProxyAdminOwner: superCfg.ProxyAdminOwner, ProtocolVersionsOwner: superCfg.ProtocolVersionsOwner, Guardian: superCfg.SuperchainConfigGuardian, @@ -161,7 +160,7 @@ func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup return nil, fmt.Errorf("failed to deploy Superchain contracts: %w", err) } - implementationsDeployment, err := opsm.DeployImplementations(l1Host, opsm.DeployImplementationsInput{ + implementationsDeployment, err := opcm.DeployImplementations(l1Host, opcm.DeployImplementationsInput{ WithdrawalDelaySeconds: superCfg.Implementations.FaultProof.WithdrawalDelaySeconds, MinProposalSizeBytes: superCfg.Implementations.FaultProof.MinProposalSizeBytes, ChallengePeriodSeconds: superCfg.Implementations.FaultProof.ChallengePeriodSeconds, @@ -172,7 +171,7 @@ func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, SuperchainProxyAdmin: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, - StandardVersionsToml: opsm.StandardVersionsData, + StandardVersionsToml: opcm.StandardVersionsData, }) if err != nil { return nil, fmt.Errorf("failed to deploy Implementations contracts: %w", err) @@ -197,7 +196,7 @@ func deployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme l1Host.SetTxOrigin(cfg.Deployer) - output, err := opsm.DeployOPChain(l1Host, opsm.DeployOPChainInput{ + output, err := opcm.DeployOPChain(l1Host, opcm.DeployOPChainInput{ OpChainProxyAdminOwner: cfg.ProxyAdminOwner, SystemConfigOwner: cfg.SystemConfigOwner, Batcher: cfg.BatchSenderAddress, @@ -207,7 +206,7 @@ func deployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), - OpsmProxy: superDeployment.OpsmProxy, + OpcmProxy: superDeployment.OpcmProxy, }) if err != nil { return nil, fmt.Errorf("failed to deploy L2 OP chain: %w", err) @@ -220,8 +219,8 @@ func deployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme } func genesisL2(l2Host *script.Host, cfg *L2Config, deployment *L2Deployment) error { - if err := opsm.L2Genesis(l2Host, &opsm.L2GenesisInput{ - L1Deployments: opsm.L1Deployments{ + if err := opcm.L2Genesis(l2Host, &opcm.L2GenesisInput{ + L1Deployments: opcm.L1Deployments{ L1CrossDomainMessengerProxy: deployment.L1CrossDomainMessengerProxy, L1StandardBridgeProxy: deployment.L1StandardBridgeProxy, L1ERC721BridgeProxy: deployment.L1ERC721BridgeProxy, diff --git a/op-chain-ops/interopgen/deployments.go b/op-chain-ops/interopgen/deployments.go index b6bb124d8e85..ba18fbfdf9bd 100644 --- a/op-chain-ops/interopgen/deployments.go +++ b/op-chain-ops/interopgen/deployments.go @@ -9,8 +9,8 @@ type L1Deployment struct { } type Implementations struct { - OpsmProxy common.Address `json:"OPSMProxy"` - OpsmImpl common.Address `json:"OPSMImpl"` + OpcmProxy common.Address `json:"OPCMProxy"` + OpcmImpl common.Address `json:"OPCMImpl"` DelayedWETHImpl common.Address `json:"DelayedWETHImpl"` OptimismPortalImpl common.Address `json:"OptimismPortalImpl"` PreimageOracleSingleton common.Address `json:"PreimageOracleSingleton"` diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 4dbe58e4ca1f..eea42b87e0a4 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -8,7 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opsm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" ) @@ -67,7 +67,7 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) ProxyAdminOwner: superchainProxyAdmin, ProtocolVersionsOwner: superchainProtocolVersionsOwner, Deployer: superchainDeployer, - Implementations: OPSMImplementationsConfig{ + Implementations: OPCMImplementationsConfig{ Release: "dev", FaultProof: SuperFaultProofConfig{ WithdrawalDelaySeconds: big.NewInt(604800), @@ -77,7 +77,7 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) DisputeGameFinalityDelaySeconds: big.NewInt(6), }, UseInterop: true, - StandardVersionsToml: opsm.StandardVersionsData, + StandardVersionsToml: opcm.StandardVersionsData, }, SuperchainL1DeployConfig: genesis.SuperchainL1DeployConfig{ RequiredProtocolVersion: params.OPStackSupport, diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index df950fed71f2..bdebf297c3f6 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -28,7 +28,7 @@ import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; @@ -36,7 +36,7 @@ import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; -import { OPStackManagerInterop } from "src/L1/OPStackManagerInterop.sol"; +import { OPContractsManagerInterop } from "src/L1/OPContractsManagerInterop.sol"; import { OptimismPortalInterop } from "src/L1/OptimismPortalInterop.sol"; import { SystemConfigInterop } from "src/L1/SystemConfigInterop.sol"; @@ -55,7 +55,7 @@ contract DeployImplementationsInput is BaseDeployIO { uint256 internal _proofMaturityDelaySeconds; uint256 internal _disputeGameFinalityDelaySeconds; - // The release version to set OPSM implementations for, of the format `op-contracts/vX.Y.Z`. + // The release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. string internal _release; // Outputs from DeploySuperchain.s.sol. @@ -166,8 +166,8 @@ contract DeployImplementationsInput is BaseDeployIO { } contract DeployImplementationsOutput is BaseDeployIO { - OPStackManager internal _opsmProxy; - OPStackManager internal _opsmImpl; + OPContractsManager internal _opcmProxy; + OPContractsManager internal _opcmImpl; DelayedWETH internal _delayedWETHImpl; OptimismPortal2 internal _optimismPortalImpl; PreimageOracle internal _preimageOracleSingleton; @@ -183,8 +183,8 @@ contract DeployImplementationsOutput is BaseDeployIO { require(_addr != address(0), "DeployImplementationsOutput: cannot set zero address"); // forgefmt: disable-start - if (sel == this.opsmProxy.selector) _opsmProxy = OPStackManager(payable(_addr)); - else if (sel == this.opsmImpl.selector) _opsmImpl = OPStackManager(payable(_addr)); + if (sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(payable(_addr)); + else if (sel == this.opcmImpl.selector) _opcmImpl = OPContractsManager(payable(_addr)); else if (sel == this.optimismPortalImpl.selector) _optimismPortalImpl = OptimismPortal2(payable(_addr)); else if (sel == this.delayedWETHImpl.selector) _delayedWETHImpl = DelayedWETH(payable(_addr)); else if (sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = PreimageOracle(_addr); @@ -203,8 +203,8 @@ contract DeployImplementationsOutput is BaseDeployIO { // With 12 addresses, we'd get a stack too deep error if we tried to do this inline as a // single call to `Solarray.addresses`. So we split it into two calls. address[] memory addrs1 = Solarray.addresses( - address(this.opsmProxy()), - address(this.opsmImpl()), + address(this.opcmProxy()), + address(this.opcmImpl()), address(this.optimismPortalImpl()), address(this.delayedWETHImpl()), address(this.preimageOracleSingleton()), @@ -225,15 +225,15 @@ contract DeployImplementationsOutput is BaseDeployIO { assertValidDeploy(_dii); } - function opsmProxy() public returns (OPStackManager) { - DeployUtils.assertValidContractAddress(address(_opsmProxy)); - DeployUtils.assertImplementationSet(address(_opsmProxy)); - return _opsmProxy; + function opcmProxy() public returns (OPContractsManager) { + DeployUtils.assertValidContractAddress(address(_opcmProxy)); + DeployUtils.assertImplementationSet(address(_opcmProxy)); + return _opcmProxy; } - function opsmImpl() public view returns (OPStackManager) { - DeployUtils.assertValidContractAddress(address(_opsmImpl)); - return _opsmImpl; + function opcmImpl() public view returns (OPContractsManager) { + DeployUtils.assertValidContractAddress(address(_opcmImpl)); + return _opcmImpl; } function optimismPortalImpl() public view returns (OptimismPortal2) { @@ -294,35 +294,35 @@ contract DeployImplementationsOutput is BaseDeployIO { assertValidL1ERC721BridgeImpl(_dii); assertValidL1StandardBridgeImpl(_dii); assertValidMipsSingleton(_dii); - assertValidOpsmProxy(_dii); - assertValidOpsmImpl(_dii); + assertValidOpcmProxy(_dii); + assertValidOpcmImpl(_dii); assertValidOptimismMintableERC20FactoryImpl(_dii); assertValidOptimismPortalImpl(_dii); assertValidPreimageOracleSingleton(_dii); assertValidSystemConfigImpl(_dii); } - function assertValidOpsmProxy(DeployImplementationsInput _dii) internal { + function assertValidOpcmProxy(DeployImplementationsInput _dii) internal { // First we check the proxy as itself. - Proxy proxy = Proxy(payable(address(opsmProxy()))); + Proxy proxy = Proxy(payable(address(opcmProxy()))); vm.prank(address(0)); address admin = proxy.admin(); - require(admin == address(_dii.superchainProxyAdmin()), "OPSMP-10"); + require(admin == address(_dii.superchainProxyAdmin()), "OPCMP-10"); - // Then we check the proxy as OPSM. - DeployUtils.assertInitialized({ _contractAddress: address(opsmProxy()), _slot: 0, _offset: 0 }); - require(address(opsmProxy().superchainConfig()) == address(_dii.superchainConfigProxy()), "OPSMP-20"); - require(address(opsmProxy().protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPSMP-30"); - require(LibString.eq(opsmProxy().latestRelease(), _dii.release()), "OPSMP-50"); // Initial release is latest. + // Then we check the proxy as OPCM. + DeployUtils.assertInitialized({ _contractAddress: address(opcmProxy()), _slot: 0, _offset: 0 }); + require(address(opcmProxy().superchainConfig()) == address(_dii.superchainConfigProxy()), "OPCMP-20"); + require(address(opcmProxy().protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPCMP-30"); + require(LibString.eq(opcmProxy().latestRelease(), _dii.release()), "OPCMP-50"); // Initial release is latest. } - function assertValidOpsmImpl(DeployImplementationsInput _dii) internal { - Proxy proxy = Proxy(payable(address(opsmProxy()))); + function assertValidOpcmImpl(DeployImplementationsInput _dii) internal { + Proxy proxy = Proxy(payable(address(opcmProxy()))); vm.prank(address(0)); - OPStackManager impl = OPStackManager(proxy.implementation()); + OPContractsManager impl = OPContractsManager(proxy.implementation()); DeployUtils.assertInitialized({ _contractAddress: address(impl), _slot: 0, _offset: 0 }); - require(address(impl.superchainConfig()) == address(_dii.superchainConfigProxy()), "OPSMI-10"); - require(address(impl.protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPSMI-20"); + require(address(impl.superchainConfig()) == address(_dii.superchainConfigProxy()), "OPCMI-10"); + require(address(impl.protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPCMI-20"); } function assertValidOptimismPortalImpl(DeployImplementationsInput) internal view { @@ -468,42 +468,42 @@ contract DeployImplementations is Script { deployMipsSingleton(_dii, _dio); deployDisputeGameFactoryImpl(_dii, _dio); - // Deploy the OP Stack Manager with the new implementations set. - deployOPStackManager(_dii, _dio); + // Deploy the OP Contracts Manager with the new implementations set. + deployOPContractsManager(_dii, _dio); _dio.checkOutput(_dii); } // -------- Deployment Steps -------- - // --- OP Stack Manager --- + // --- OP Contracts Manager --- - function opsmSystemConfigSetter( + function opcmSystemConfigSetter( DeployImplementationsInput, DeployImplementationsOutput _dio ) internal view virtual - returns (OPStackManager.ImplementationSetter memory) + returns (OPContractsManager.ImplementationSetter memory) { - return OPStackManager.ImplementationSetter({ + return OPContractsManager.ImplementationSetter({ name: "SystemConfig", - info: OPStackManager.Implementation(address(_dio.systemConfigImpl()), SystemConfig.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), SystemConfig.initialize.selector) }); } - // Deploy and initialize a proxied OPStackManager. - function createOPSMContract( + // Deploy and initialize a proxied OPContractsManager. + function createOPCMContract( DeployImplementationsInput _dii, DeployImplementationsOutput _dio, - OPStackManager.Blueprints memory _blueprints, + OPContractsManager.Blueprints memory _blueprints, string memory _release, - OPStackManager.ImplementationSetter[] memory _setters + OPContractsManager.ImplementationSetter[] memory _setters ) internal virtual - returns (OPStackManager opsmProxy_) + returns (OPContractsManager opcmProxy_) { ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); @@ -511,29 +511,35 @@ contract DeployImplementations is Script { Proxy proxy = new Proxy(address(msg.sender)); deployOPContractsManagerImpl(_dii, _dio); - OPStackManager opsmImpl = _dio.opsmImpl(); + OPContractsManager opcmImpl = _dio.opcmImpl(); - OPStackManager.InitializerInputs memory initializerInputs = - OPStackManager.InitializerInputs(_blueprints, _setters, _release, true); + OPContractsManager.InitializerInputs memory initializerInputs = + OPContractsManager.InitializerInputs(_blueprints, _setters, _release, true); vm.startBroadcast(msg.sender); proxy.upgradeToAndCall( - address(opsmImpl), abi.encodeWithSelector(opsmImpl.initialize.selector, initializerInputs) + address(opcmImpl), abi.encodeWithSelector(opcmImpl.initialize.selector, initializerInputs) ); proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); - opsmProxy_ = OPStackManager(address(proxy)); + opcmProxy_ = OPContractsManager(address(proxy)); } - function deployOPStackManager(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { + function deployOPContractsManager( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + public + virtual + { string memory release = _dii.release(); - // First we deploy the blueprints for the singletons deployed by OPSM. + // First we deploy the blueprints for the singletons deployed by OPCM. // forgefmt: disable-start bytes32 salt = _dii.salt(); - OPStackManager.Blueprints memory blueprints; + OPContractsManager.Blueprints memory blueprints; vm.startBroadcast(msg.sender); blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(type(AddressManager).creationCode), salt); @@ -546,54 +552,56 @@ contract DeployImplementations is Script { vm.stopBroadcast(); // forgefmt: disable-end - OPStackManager.ImplementationSetter[] memory setters = new OPStackManager.ImplementationSetter[](9); - setters[0] = OPStackManager.ImplementationSetter({ + OPContractsManager.ImplementationSetter[] memory setters = new OPContractsManager.ImplementationSetter[](9); + setters[0] = OPContractsManager.ImplementationSetter({ name: "L1ERC721Bridge", - info: OPStackManager.Implementation(address(_dio.l1ERC721BridgeImpl()), L1ERC721Bridge.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.l1ERC721BridgeImpl()), L1ERC721Bridge.initialize.selector) }); - setters[1] = OPStackManager.ImplementationSetter({ + setters[1] = OPContractsManager.ImplementationSetter({ name: "OptimismPortal", - info: OPStackManager.Implementation(address(_dio.optimismPortalImpl()), OptimismPortal2.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.optimismPortalImpl()), OptimismPortal2.initialize.selector) }); - setters[2] = opsmSystemConfigSetter(_dii, _dio); - setters[3] = OPStackManager.ImplementationSetter({ + setters[2] = opcmSystemConfigSetter(_dii, _dio); + setters[3] = OPContractsManager.ImplementationSetter({ name: "OptimismMintableERC20Factory", - info: OPStackManager.Implementation( + info: OPContractsManager.Implementation( address(_dio.optimismMintableERC20FactoryImpl()), OptimismMintableERC20Factory.initialize.selector ) }); - setters[4] = OPStackManager.ImplementationSetter({ + setters[4] = OPContractsManager.ImplementationSetter({ name: "L1CrossDomainMessenger", - info: OPStackManager.Implementation( + info: OPContractsManager.Implementation( address(_dio.l1CrossDomainMessengerImpl()), L1CrossDomainMessenger.initialize.selector ) }); - setters[5] = OPStackManager.ImplementationSetter({ + setters[5] = OPContractsManager.ImplementationSetter({ name: "L1StandardBridge", - info: OPStackManager.Implementation(address(_dio.l1StandardBridgeImpl()), L1StandardBridge.initialize.selector) + info: OPContractsManager.Implementation( + address(_dio.l1StandardBridgeImpl()), L1StandardBridge.initialize.selector + ) }); - setters[6] = OPStackManager.ImplementationSetter({ + setters[6] = OPContractsManager.ImplementationSetter({ name: "DisputeGameFactory", - info: OPStackManager.Implementation( + info: OPContractsManager.Implementation( address(_dio.disputeGameFactoryImpl()), DisputeGameFactory.initialize.selector ) }); - setters[7] = OPStackManager.ImplementationSetter({ + setters[7] = OPContractsManager.ImplementationSetter({ name: "DelayedWETH", - info: OPStackManager.Implementation(address(_dio.delayedWETHImpl()), DelayedWETH.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.delayedWETHImpl()), DelayedWETH.initialize.selector) }); - setters[8] = OPStackManager.ImplementationSetter({ + setters[8] = OPContractsManager.ImplementationSetter({ name: "MIPS", // MIPS is a singleton for all chains, so it doesn't need to be initialized, so the // selector is just `bytes4(0)`. - info: OPStackManager.Implementation(address(_dio.mipsSingleton()), bytes4(0)) + info: OPContractsManager.Implementation(address(_dio.mipsSingleton()), bytes4(0)) }); - // This call contains a broadcast to deploy OPSM which is proxied. - OPStackManager opsmProxy = createOPSMContract(_dii, _dio, blueprints, release, setters); + // This call contains a broadcast to deploy OPCM which is proxied. + OPContractsManager opcmProxy = createOPCMContract(_dii, _dio, blueprints, release, setters); - vm.label(address(opsmProxy), "OPStackManager"); - _dio.set(_dio.opsmProxy.selector, address(opsmProxy)); + vm.label(address(opcmProxy), "OPContractsManager"); + _dio.set(_dio.opcmProxy.selector, address(opcmProxy)); } // --- Core Contracts --- @@ -736,10 +744,10 @@ contract DeployImplementations is Script { vm.broadcast(msg.sender); // TODO: Eventually we will want to select the correct implementation based on the release. - OPStackManager impl = new OPStackManager(superchainConfigProxy, protocolVersionsProxy); + OPContractsManager impl = new OPContractsManager(superchainConfigProxy, protocolVersionsProxy); - vm.label(address(impl), "OPStackManagerImpl"); - _dio.set(_dio.opsmImpl.selector, address(impl)); + vm.label(address(impl), "OPContractsManagerImpl"); + _dio.set(_dio.opcmImpl.selector, address(impl)); } // --- Fault Proofs Contracts --- @@ -968,7 +976,7 @@ contract DeployImplementations is Script { // architecture, this comment block documents how to update the deploy scripts to support new features. // // Using the base scripts and contracts (DeploySuperchain, DeployImplementations, DeployOPChain, and -// the corresponding OPStackManager) deploys a standard chain. For nonstandard and in-development +// the corresponding OPContractsManager) deploys a standard chain. For nonstandard and in-development // features we need to modify some or all of those contracts, and we do that via inheritance. Using // interop as an example, they've made the following changes to L1 contracts: // - `OptimismPortalInterop is OptimismPortal`: A different portal implementation is used, and @@ -981,32 +989,32 @@ contract DeployImplementations is Script { // Similar to how inheritance was used to develop the new portal and system config contracts, we use // inheritance to modify up to all of the deployer contracts. For this interop example, what this // means is we need: -// - An `OPStackManagerInterop is OPStackManager` that knows how to encode the calldata for the +// - An `OPContractsManagerInterop is OPContractsManager` that knows how to encode the calldata for the // new system config initializer. // - A `DeployImplementationsInterop is DeployImplementations` that: // - Deploys OptimismPortalInterop instead of OptimismPortal. // - Deploys SystemConfigInterop instead of SystemConfig. -// - Deploys OPStackManagerInterop instead of OPStackManager, which contains the updated logic +// - Deploys OPContractsManagerInterop instead of OPContractsManager, which contains the updated logic // for encoding the SystemConfig initializer. -// - Updates the OPSM release setter logic to use the updated initializer. +// - Updates the OPCM release setter logic to use the updated initializer. // - A `DeployOPChainInterop is DeployOPChain` that allows the updated input parameter to be passed. // // Most of the complexity in the above flow comes from the the new input for the updated SystemConfig // initializer. If all function signatures were the same, all we'd have to change is the contract -// implementations that are deployed then set in the OPSM. For now, to simplify things until we +// implementations that are deployed then set in the OPCM. For now, to simplify things until we // resolve https://github.com/ethereum-optimism/optimism/issues/11783, we just assume this new role // is the same as the proxy admin owner. contract DeployImplementationsInterop is DeployImplementations { - function createOPSMContract( + function createOPCMContract( DeployImplementationsInput _dii, DeployImplementationsOutput _dio, - OPStackManager.Blueprints memory _blueprints, + OPContractsManager.Blueprints memory _blueprints, string memory _release, - OPStackManager.ImplementationSetter[] memory _setters + OPContractsManager.ImplementationSetter[] memory _setters ) internal override - returns (OPStackManager opsmProxy_) + returns (OPContractsManager opcmProxy_) { ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); @@ -1014,20 +1022,20 @@ contract DeployImplementationsInterop is DeployImplementations { Proxy proxy = new Proxy(address(msg.sender)); deployOPContractsManagerImpl(_dii, _dio); // overriding function - OPStackManager opsmImpl = _dio.opsmImpl(); + OPContractsManager opcmImpl = _dio.opcmImpl(); - OPStackManager.InitializerInputs memory initializerInputs = - OPStackManager.InitializerInputs(_blueprints, _setters, _release, true); + OPContractsManager.InitializerInputs memory initializerInputs = + OPContractsManager.InitializerInputs(_blueprints, _setters, _release, true); vm.startBroadcast(msg.sender); proxy.upgradeToAndCall( - address(opsmImpl), abi.encodeWithSelector(opsmImpl.initialize.selector, initializerInputs) + address(opcmImpl), abi.encodeWithSelector(opcmImpl.initialize.selector, initializerInputs) ); proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); - opsmProxy_ = OPStackManagerInterop(address(proxy)); + opcmProxy_ = OPContractsManagerInterop(address(proxy)); } function deployOptimismPortalImpl( @@ -1097,24 +1105,26 @@ contract DeployImplementationsInterop is DeployImplementations { vm.broadcast(msg.sender); // TODO: Eventually we will want to select the correct implementation based on the release. - OPStackManager impl = new OPStackManagerInterop(superchainConfigProxy, protocolVersionsProxy); + OPContractsManager impl = new OPContractsManagerInterop(superchainConfigProxy, protocolVersionsProxy); - vm.label(address(impl), "OPStackManagerImpl"); - _dio.set(_dio.opsmImpl.selector, address(impl)); + vm.label(address(impl), "OPContractsManagerImpl"); + _dio.set(_dio.opcmImpl.selector, address(impl)); } - function opsmSystemConfigSetter( + function opcmSystemConfigSetter( DeployImplementationsInput, DeployImplementationsOutput _dio ) internal view override - returns (OPStackManager.ImplementationSetter memory) + returns (OPContractsManager.ImplementationSetter memory) { - return OPStackManager.ImplementationSetter({ + return OPContractsManager.ImplementationSetter({ name: "SystemConfig", - info: OPStackManager.Implementation(address(_dio.systemConfigImpl()), SystemConfigInterop.initialize.selector) + info: OPContractsManager.Implementation( + address(_dio.systemConfigImpl()), SystemConfigInterop.initialize.selector + ) }); } } diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index e0df48cc6029..524f9896b2bd 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -26,7 +26,7 @@ import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; import { Claim, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; @@ -46,7 +46,7 @@ contract DeployOPChainInput is BaseDeployIO { uint32 internal _basefeeScalar; uint32 internal _blobBaseFeeScalar; uint256 internal _l2ChainId; - OPStackManager internal _opsmProxy; + OPContractsManager internal _opcmProxy; function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployOPChainInput: cannot set zero address"); @@ -56,7 +56,7 @@ contract DeployOPChainInput is BaseDeployIO { else if (_sel == this.unsafeBlockSigner.selector) _unsafeBlockSigner = _addr; else if (_sel == this.proposer.selector) _proposer = _addr; else if (_sel == this.challenger.selector) _challenger = _addr; - else if (_sel == this.opsmProxy.selector) _opsmProxy = OPStackManager(_addr); + else if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(_addr); else revert("DeployOPChainInput: unknown selector"); } @@ -129,7 +129,7 @@ contract DeployOPChainInput is BaseDeployIO { // anchor root and deploy a new permissioned dispute game contract anyway. // // You can `console.logBytes(abi.encode(defaultStartingAnchorRoots))` to get the bytes that - // are hardcoded into `op-chain-ops/deployer/opsm/opchain.go` + // are hardcoded into `op-chain-ops/deployer/opcm/opchain.go` AnchorStateRegistry.StartingAnchorRoot[] memory defaultStartingAnchorRoots = new AnchorStateRegistry.StartingAnchorRoot[](1); defaultStartingAnchorRoots[0] = AnchorStateRegistry.StartingAnchorRoot({ @@ -139,10 +139,10 @@ contract DeployOPChainInput is BaseDeployIO { return abi.encode(defaultStartingAnchorRoots); } - // TODO: Check that opsm is proxied and it has an implementation. - function opsmProxy() public view returns (OPStackManager) { - require(address(_opsmProxy) != address(0), "DeployOPChainInput: not set"); - return _opsmProxy; + // TODO: Check that opcm is proxied and it has an implementation. + function opcmProxy() public view returns (OPContractsManager) { + require(address(_opcmProxy) != address(0), "DeployOPChainInput: not set"); + return _opcmProxy; } } @@ -309,8 +309,8 @@ contract DeployOPChainOutput is BaseDeployIO { require(GameType.unwrap(game.gameType()) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON), "DPG-10"); require(Claim.unwrap(game.absolutePrestate()) == bytes32(hex"dead"), "DPG-20"); - OPStackManager opsm = _doi.opsmProxy(); - (address mips,) = opsm.implementations(opsm.latestRelease(), "MIPS"); + OPContractsManager opcm = _doi.opcmProxy(); + (address mips,) = opcm.implementations(opcm.latestRelease(), "MIPS"); require(game.vm() == IBigStepper(mips), "DPG-30"); require(address(game.weth()) == address(delayedWETHPermissionedGameProxy()), "DPG-40"); @@ -368,7 +368,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(systemConfig.startBlock() == block.number, "SYSCON-140"); require( - systemConfig.batchInbox() == _doi.opsmProxy().chainIdToBatchInboxAddress(_doi.l2ChainId()), "SYSCON-150" + systemConfig.batchInbox() == _doi.opcmProxy().chainIdToBatchInboxAddress(_doi.l2ChainId()), "SYSCON-150" ); require(systemConfig.l1CrossDomainMessenger() == address(l1CrossDomainMessengerProxy()), "SYSCON-160"); @@ -393,7 +393,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(address(messenger.PORTAL()) == address(optimismPortalProxy()), "L1xDM-30"); require(address(messenger.portal()) == address(optimismPortalProxy()), "L1xDM-40"); - require(address(messenger.superchainConfig()) == address(_doi.opsmProxy().superchainConfig()), "L1xDM-50"); + require(address(messenger.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L1xDM-50"); bytes32 xdmSenderSlot = vm.load(address(messenger), bytes32(uint256(204))); require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER, "L1xDM-60"); @@ -409,7 +409,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(address(bridge.messenger()) == address(messenger), "L1SB-20"); require(address(bridge.OTHER_BRIDGE()) == Predeploys.L2_STANDARD_BRIDGE, "L1SB-30"); require(address(bridge.otherBridge()) == Predeploys.L2_STANDARD_BRIDGE, "L1SB-40"); - require(address(bridge.superchainConfig()) == address(_doi.opsmProxy().superchainConfig()), "L1SB-50"); + require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L1SB-50"); } function assertValidOptimismMintableERC20Factory(DeployOPChainInput) internal view { @@ -431,12 +431,12 @@ contract DeployOPChainOutput is BaseDeployIO { require(address(bridge.MESSENGER()) == address(l1CrossDomainMessengerProxy()), "L721B-30"); require(address(bridge.messenger()) == address(l1CrossDomainMessengerProxy()), "L721B-40"); - require(address(bridge.superchainConfig()) == address(_doi.opsmProxy().superchainConfig()), "L721B-50"); + require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L721B-50"); } function assertValidOptimismPortal(DeployOPChainInput _doi) internal view { OptimismPortal2 portal = optimismPortalProxy(); - ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opsmProxy().superchainConfig())); + ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcmProxy().superchainConfig())); require(address(portal.disputeGameFactory()) == address(disputeGameFactoryProxy()), "PORTAL-10"); require(address(portal.systemConfig()) == address(systemConfigProxy()), "PORTAL-20"); @@ -470,9 +470,9 @@ contract DeployOPChain is Script { // -------- Core Deployment Methods -------- function run(DeployOPChainInput _doi, DeployOPChainOutput _doo) public { - OPStackManager opsmProxy = _doi.opsmProxy(); + OPContractsManager opcmProxy = _doi.opcmProxy(); - OPStackManager.Roles memory roles = OPStackManager.Roles({ + OPContractsManager.Roles memory roles = OPContractsManager.Roles({ opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), systemConfigOwner: _doi.systemConfigOwner(), batcher: _doi.batcher(), @@ -480,7 +480,7 @@ contract DeployOPChain is Script { proposer: _doi.proposer(), challenger: _doi.challenger() }); - OPStackManager.DeployInput memory deployInput = OPStackManager.DeployInput({ + OPContractsManager.DeployInput memory deployInput = OPContractsManager.DeployInput({ roles: roles, basefeeScalar: _doi.basefeeScalar(), blobBasefeeScalar: _doi.blobBaseFeeScalar(), @@ -489,7 +489,7 @@ contract DeployOPChain is Script { }); vm.broadcast(msg.sender); - OPStackManager.DeployOutput memory deployOutput = opsmProxy.deploy(deployInput); + OPContractsManager.DeployOutput memory deployOutput = opcmProxy.deploy(deployInput); vm.label(address(deployOutput.opChainProxyAdmin), "opChainProxyAdmin"); vm.label(address(deployOutput.addressManager), "addressManager"); diff --git a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol index 365b67df7b47..5ca889bf409e 100644 --- a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol @@ -15,7 +15,7 @@ import { BaseDeployIO } from "scripts/utils/BaseDeployIO.sol"; // This comment block defines the requirements and rationale for the architecture used in this forge // script, along with other scripts that are being written as new Superchain-first deploy scripts to -// complement the OP Stack Manager. The script architecture is a bit different than a standard forge +// complement the OP Contracts Manager. The script architecture is a bit different than a standard forge // deployment script. // // There are three categories of users that are expected to interact with the scripts: diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index d88cbdba6bfd..b68db55580f2 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -31,9 +31,9 @@ "initCodeHash": "0x433fac9de52d8ce8fc3471b78ef6cc9cff1019f480c9ad91b6e09ab8738a8edb", "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, - "src/L1/OPStackManager.sol": { + "src/L1/OPContractsManager.sol": { "initCodeHash": "0x92c72b75206e756742df25d67d295e4479e65db1473948b8f53cb4ca642025d5", - "sourceCodeHash": "0x3cbd30c68cad0dd18d49165bd21d94422b7403174f91a733e2398539dadf8656" + "sourceCodeHash": "0x5e04124ee67298d2f1245139baf7de79dee421d2c031c6e5abe0cd3b1bdbdb32" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPStackManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json similarity index 95% rename from packages/contracts-bedrock/snapshots/abi/OPStackManager.json rename to packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 9654f8f084ab..ca2f2ab8ac83 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPStackManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -62,7 +62,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.Blueprints", + "internalType": "struct OPContractsManager.Blueprints", "name": "", "type": "tuple" } @@ -126,7 +126,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.Roles", + "internalType": "struct OPContractsManager.Roles", "name": "roles", "type": "tuple" }, @@ -151,7 +151,7 @@ "type": "bytes" } ], - "internalType": "struct OPStackManager.DeployInput", + "internalType": "struct OPContractsManager.DeployInput", "name": "_input", "type": "tuple" } @@ -236,7 +236,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.DeployOutput", + "internalType": "struct OPContractsManager.DeployOutput", "name": "", "type": "tuple" } @@ -320,7 +320,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.Blueprints", + "internalType": "struct OPContractsManager.Blueprints", "name": "blueprints", "type": "tuple" }, @@ -344,12 +344,12 @@ "type": "bytes4" } ], - "internalType": "struct OPStackManager.Implementation", + "internalType": "struct OPContractsManager.Implementation", "name": "info", "type": "tuple" } ], - "internalType": "struct OPStackManager.ImplementationSetter[]", + "internalType": "struct OPContractsManager.ImplementationSetter[]", "name": "setters", "type": "tuple[]" }, @@ -364,7 +364,7 @@ "type": "bool" } ], - "internalType": "struct OPStackManager.InitializerInputs", + "internalType": "struct OPContractsManager.InitializerInputs", "name": "_initializerInputs", "type": "tuple" } diff --git a/packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json similarity index 95% rename from packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json rename to packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 9654f8f084ab..ca2f2ab8ac83 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPStackManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -62,7 +62,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.Blueprints", + "internalType": "struct OPContractsManager.Blueprints", "name": "", "type": "tuple" } @@ -126,7 +126,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.Roles", + "internalType": "struct OPContractsManager.Roles", "name": "roles", "type": "tuple" }, @@ -151,7 +151,7 @@ "type": "bytes" } ], - "internalType": "struct OPStackManager.DeployInput", + "internalType": "struct OPContractsManager.DeployInput", "name": "_input", "type": "tuple" } @@ -236,7 +236,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.DeployOutput", + "internalType": "struct OPContractsManager.DeployOutput", "name": "", "type": "tuple" } @@ -320,7 +320,7 @@ "type": "address" } ], - "internalType": "struct OPStackManager.Blueprints", + "internalType": "struct OPContractsManager.Blueprints", "name": "blueprints", "type": "tuple" }, @@ -344,12 +344,12 @@ "type": "bytes4" } ], - "internalType": "struct OPStackManager.Implementation", + "internalType": "struct OPContractsManager.Implementation", "name": "info", "type": "tuple" } ], - "internalType": "struct OPStackManager.ImplementationSetter[]", + "internalType": "struct OPContractsManager.ImplementationSetter[]", "name": "setters", "type": "tuple[]" }, @@ -364,7 +364,7 @@ "type": "bool" } ], - "internalType": "struct OPStackManager.InitializerInputs", + "internalType": "struct OPContractsManager.InitializerInputs", "name": "_initializerInputs", "type": "tuple" } diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json similarity index 84% rename from packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json rename to packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index c22ed7c2c8da..cbb977f214b4 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -25,7 +25,7 @@ "label": "implementations", "offset": 0, "slot": "2", - "type": "mapping(string => mapping(string => struct OPStackManager.Implementation))" + "type": "mapping(string => mapping(string => struct OPContractsManager.Implementation))" }, { "bytes": "32", @@ -39,7 +39,7 @@ "label": "blueprint", "offset": 0, "slot": "4", - "type": "struct OPStackManager.Blueprints" + "type": "struct OPContractsManager.Blueprints" }, { "bytes": "1600", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json similarity index 84% rename from packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json rename to packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json index c22ed7c2c8da..cbb977f214b4 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPStackManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json @@ -25,7 +25,7 @@ "label": "implementations", "offset": 0, "slot": "2", - "type": "mapping(string => mapping(string => struct OPStackManager.Implementation))" + "type": "mapping(string => mapping(string => struct OPContractsManager.Implementation))" }, { "bytes": "32", @@ -39,7 +39,7 @@ "label": "blueprint", "offset": 0, "slot": "4", - "type": "struct OPStackManager.Blueprints" + "type": "struct OPContractsManager.Blueprints" }, { "bytes": "1600", diff --git a/packages/contracts-bedrock/src/L1/OPStackManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol similarity index 98% rename from packages/contracts-bedrock/src/L1/OPStackManager.sol rename to packages/contracts-bedrock/src/L1/OPContractsManager.sol index 12e9a6f5cbdb..4f36897d637c 100644 --- a/packages/contracts-bedrock/src/L1/OPStackManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -40,7 +40,7 @@ import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; /// @custom:proxied true -contract OPStackManager is ISemver, Initializable { +contract OPContractsManager is ISemver, Initializable { // -------- Structs -------- /// @notice Represents the roles that can be set when deploying a standard OP Stack chain. @@ -113,7 +113,7 @@ contract OPStackManager is ISemver, Initializable { address permissionedDisputeGame2; } - /// @notice Inputs required when initializing the OPStackManager. To avoid 'StackTooDeep' errors, + /// @notice Inputs required when initializing the OPContractsManager. To avoid 'StackTooDeep' errors, /// all necessary inputs (excluding immutables) for initialization are bundled together in this struct. struct InitializerInputs { Blueprints blueprints; @@ -133,7 +133,7 @@ contract OPStackManager is ISemver, Initializable { /// @notice Address of the ProtocolVersions contract shared by all chains. ProtocolVersions public immutable protocolVersions; - /// @notice The latest release of the OP Stack Manager, as a string of the format `op-contracts/vX.Y.Z`. + /// @notice The latest release of the OP Contracts Manager, as a string of the format `op-contracts/vX.Y.Z`. string public latestRelease; /// @notice Maps a release version to a contract name to it's implementation data. @@ -181,7 +181,7 @@ contract OPStackManager is ISemver, Initializable { // -------- Methods -------- - /// @notice OPSM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. + /// @notice OPCM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. constructor(SuperchainConfig _superchainConfig, ProtocolVersions _protocolVersions) { assertValidContractAddress(address(_superchainConfig)); diff --git a/packages/contracts-bedrock/src/L1/OPStackManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol similarity index 91% rename from packages/contracts-bedrock/src/L1/OPStackManagerInterop.sol rename to packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index 0a4a88625c9d..90fa13e7455c 100644 --- a/packages/contracts-bedrock/src/L1/OPStackManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { ResourceMetering } from "src/L1/ResourceMetering.sol"; @@ -9,12 +9,12 @@ import { SystemConfig } from "src/L1/SystemConfig.sol"; import { SystemConfigInterop } from "src/L1/SystemConfigInterop.sol"; /// @custom:proxied true -contract OPStackManagerInterop is OPStackManager { +contract OPContractsManagerInterop is OPContractsManager { constructor( SuperchainConfig _superchainConfig, ProtocolVersions _protocolVersions ) - OPStackManager(_superchainConfig, _protocolVersions) + OPContractsManager(_superchainConfig, _protocolVersions) { } // The `SystemConfigInterop` contract has an extra `address _dependencyManager` argument diff --git a/packages/contracts-bedrock/test/DeployImplementations.t.sol b/packages/contracts-bedrock/test/DeployImplementations.t.sol index 1eac67c6fdf3..1dca71b4ec70 100644 --- a/packages/contracts-bedrock/test/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/DeployImplementations.t.sol @@ -10,7 +10,7 @@ import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; @@ -106,11 +106,11 @@ contract DeployImplementationsOutput_Test is Test { function test_set_succeeds() public { Proxy proxy = new Proxy(address(0)); - address opsmImpl = address(makeAddr("opsmImpl")); + address opcmImpl = address(makeAddr("opcmImpl")); vm.prank(address(0)); - proxy.upgradeTo(opsmImpl); + proxy.upgradeTo(opcmImpl); - OPStackManager opsmProxy = OPStackManager(address(proxy)); + OPContractsManager opcmProxy = OPContractsManager(address(proxy)); OptimismPortal2 optimismPortalImpl = OptimismPortal2(payable(makeAddr("optimismPortalImpl"))); DelayedWETH delayedWETHImpl = DelayedWETH(payable(makeAddr("delayedWETHImpl"))); PreimageOracle preimageOracleSingleton = PreimageOracle(makeAddr("preimageOracleSingleton")); @@ -124,8 +124,8 @@ contract DeployImplementationsOutput_Test is Test { OptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryImpl")); DisputeGameFactory disputeGameFactoryImpl = DisputeGameFactory(makeAddr("disputeGameFactoryImpl")); - vm.etch(address(opsmProxy), address(opsmProxy).code); - vm.etch(address(opsmImpl), hex"01"); + vm.etch(address(opcmProxy), address(opcmProxy).code); + vm.etch(address(opcmImpl), hex"01"); vm.etch(address(optimismPortalImpl), hex"01"); vm.etch(address(delayedWETHImpl), hex"01"); vm.etch(address(preimageOracleSingleton), hex"01"); @@ -136,7 +136,7 @@ contract DeployImplementationsOutput_Test is Test { vm.etch(address(l1StandardBridgeImpl), hex"01"); vm.etch(address(optimismMintableERC20FactoryImpl), hex"01"); vm.etch(address(disputeGameFactoryImpl), hex"01"); - dio.set(dio.opsmProxy.selector, address(opsmProxy)); + dio.set(dio.opcmProxy.selector, address(opcmProxy)); dio.set(dio.optimismPortalImpl.selector, address(optimismPortalImpl)); dio.set(dio.delayedWETHImpl.selector, address(delayedWETHImpl)); dio.set(dio.preimageOracleSingleton.selector, address(preimageOracleSingleton)); @@ -148,7 +148,7 @@ contract DeployImplementationsOutput_Test is Test { dio.set(dio.optimismMintableERC20FactoryImpl.selector, address(optimismMintableERC20FactoryImpl)); dio.set(dio.disputeGameFactoryImpl.selector, address(disputeGameFactoryImpl)); - assertEq(address(opsmProxy), address(dio.opsmProxy()), "50"); + assertEq(address(opcmProxy), address(dio.opcmProxy()), "50"); assertEq(address(optimismPortalImpl), address(dio.optimismPortalImpl()), "100"); assertEq(address(delayedWETHImpl), address(dio.delayedWETHImpl()), "200"); assertEq(address(preimageOracleSingleton), address(dio.preimageOracleSingleton()), "300"); @@ -413,7 +413,7 @@ contract DeployImplementations_Test is Test { string memory release = string(bytes.concat(hash(_seed, 5))); protocolVersionsProxy = ProtocolVersions(address(uint160(uint256(hash(_seed, 7))))); - // Must configure the ProxyAdmin contract which is used to upgrade the OPSM's proxy contract. + // Must configure the ProxyAdmin contract which is used to upgrade the OPCM's proxy contract. ProxyAdmin superchainProxyAdmin = new ProxyAdmin(msg.sender); superchainConfigProxy = SuperchainConfig(address(new Proxy(payable(address(superchainProxyAdmin))))); diff --git a/packages/contracts-bedrock/test/DeployOPChain.t.sol b/packages/contracts-bedrock/test/DeployOPChain.t.sol index 732eea8b05bf..3cbc313cfe11 100644 --- a/packages/contracts-bedrock/test/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/DeployOPChain.t.sol @@ -23,7 +23,7 @@ import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions, ProtocolVersion } from "src/L1/ProtocolVersions.sol"; -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; @@ -46,7 +46,7 @@ contract DeployOPChainInput_Test is Test { uint32 basefeeScalar = 100; uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; - OPStackManager opsm = OPStackManager(makeAddr("opsm")); + OPContractsManager opcm = OPContractsManager(makeAddr("opcm")); function setUp() public { doi = new DeployOPChainInput(); @@ -62,7 +62,7 @@ contract DeployOPChainInput_Test is Test { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opsmProxy.selector, address(opsm)); + doi.set(doi.opcmProxy.selector, address(opcm)); // Compare the default inputs to the getter methods. assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "200"); assertEq(systemConfigOwner, doi.systemConfigOwner(), "300"); @@ -73,7 +73,7 @@ contract DeployOPChainInput_Test is Test { assertEq(basefeeScalar, doi.basefeeScalar(), "800"); assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "900"); assertEq(l2ChainId, doi.l2ChainId(), "1000"); - assertEq(address(opsm), address(doi.opsmProxy()), "1100"); + assertEq(address(opcm), address(doi.opcmProxy()), "1100"); } function test_getters_whenNotSet_revert() public { @@ -328,7 +328,7 @@ contract DeployOPChain_TestBase is Test { ProtocolVersions protocolVersionsProxy; // Define default inputs for DeployOPChain. - // `opsm` is set during `setUp` since it is an output of the previous step. + // `opcm` is set during `setUp` since it is an output of the previous step. address opChainProxyAdminOwner = makeAddr("defaultOPChainProxyAdminOwner"); address systemConfigOwner = makeAddr("defaultSystemConfigOwner"); address batcher = makeAddr("defaultBatcher"); @@ -339,7 +339,7 @@ contract DeployOPChain_TestBase is Test { uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; AnchorStateRegistry.StartingAnchorRoot[] startingAnchorRoots; - OPStackManager opsm = OPStackManager(address(0)); + OPContractsManager opcm = OPContractsManager(address(0)); function setUp() public virtual { // Set defaults for reference types @@ -401,8 +401,8 @@ contract DeployOPChain_TestBase is Test { deployOPChain = new DeployOPChain(); (doi, doo) = deployOPChain.etchIOContracts(); - // Set the OPStackManager address as input to DeployOPChain. - opsm = dio.opsmProxy(); + // Set the OPContractsManager input for DeployOPChain. + opcm = dio.opcmProxy(); } // See the function of the same name in the `DeployImplementations_Test` contract of @@ -456,11 +456,11 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opsmProxy.selector, address(opsm)); // Not fuzzed since it must be an actual instance. + doi.set(doi.opcmProxy.selector, address(opcm)); // Not fuzzed since it must be an actual instance. deployOPChain.run(doi, doo); - // TODO Add fault proof contract assertions below once OPSM fully supports them. + // TODO Add fault proof contract assertions below once OPCM fully supports them. // Assert that individual input fields were properly set based on the inputs. assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "100"); @@ -486,7 +486,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { // assertEq(address(doo.faultDisputeGame().proposer()), proposer, "2700"); // assertEq(address(doo.faultDisputeGame().challenger()), challenger, "2800"); - // Most architecture assertions are handled within the OP Stack Manager itself and therefore + // Most architecture assertions are handled within the OP Contracts Manager itself and therefore // we only assert on the things that are not visible onchain. // TODO add these assertions: AddressManager, Proxy, ProxyAdmin, etc. } diff --git a/packages/contracts-bedrock/test/L1/OPStackManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol similarity index 71% rename from packages/contracts-bedrock/test/L1/OPStackManager.t.sol rename to packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index ea26a6dae0b3..7f52b702dd71 100644 --- a/packages/contracts-bedrock/test/L1/OPStackManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -6,18 +6,18 @@ import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; import { DeployOPChainInput } from "scripts/DeployOPChain.s.sol"; import { DeployOPChain_TestBase } from "test/DeployOPChain.t.sol"; -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; // Exposes internal functions for testing. -contract OPStackManager_Harness is OPStackManager { +contract OPContractsManager_Harness is OPContractsManager { constructor( SuperchainConfig _superchainConfig, ProtocolVersions _protocolVersions ) - OPStackManager(_superchainConfig, _protocolVersions) + OPContractsManager(_superchainConfig, _protocolVersions) { } function chainIdToBatchInboxAddress_exposed(uint256 l2ChainId) public pure returns (address) { @@ -26,12 +26,12 @@ contract OPStackManager_Harness is OPStackManager { } // Unlike other test suites, we intentionally do not inherit from CommonTest or Setup. This is -// because OPStackManager acts as a deploy script, so we start from a clean slate here and -// work OPStackManager's deployment into the existing test setup, instead of using the existing -// test setup to deploy OPStackManager. We do however inherit from DeployOPChain_TestBase so +// because OPContractsManager acts as a deploy script, so we start from a clean slate here and +// work OPContractsManager's deployment into the existing test setup, instead of using the existing +// test setup to deploy OPContractsManager. We do however inherit from DeployOPChain_TestBase so // we can use its setup to deploy the implementations similarly to how a real deployment would // happen. -contract OPStackManager_Deploy_Test is DeployOPChain_TestBase { +contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { using stdStorage for StdStorage; event Deployed(uint256 indexed l2ChainId, SystemConfig indexed systemConfig); @@ -48,14 +48,14 @@ contract OPStackManager_Deploy_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opsmProxy.selector, address(opsm)); + doi.set(doi.opcmProxy.selector, address(opcm)); } // This helper function is used to convert the input struct type defined in DeployOPChain.s.sol - // to the input struct type defined in OPStackManager.sol. - function toOPSMDeployInput(DeployOPChainInput _doi) internal view returns (OPStackManager.DeployInput memory) { - return OPStackManager.DeployInput({ - roles: OPStackManager.Roles({ + // to the input struct type defined in OPContractsManager.sol. + function toOPCMDeployInput(DeployOPChainInput _doi) internal view returns (OPContractsManager.DeployInput memory) { + return OPContractsManager.DeployInput({ + roles: OPContractsManager.Roles({ opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), systemConfigOwner: _doi.systemConfigOwner(), batcher: _doi.batcher(), @@ -71,30 +71,30 @@ contract OPStackManager_Deploy_Test is DeployOPChain_TestBase { } function test_deploy_l2ChainIdEqualsZero_reverts() public { - OPStackManager.DeployInput memory deployInput = toOPSMDeployInput(doi); + OPContractsManager.DeployInput memory deployInput = toOPCMDeployInput(doi); deployInput.l2ChainId = 0; - vm.expectRevert(OPStackManager.InvalidChainId.selector); - opsm.deploy(deployInput); + vm.expectRevert(OPContractsManager.InvalidChainId.selector); + opcm.deploy(deployInput); } function test_deploy_l2ChainIdEqualsCurrentChainId_reverts() public { - OPStackManager.DeployInput memory deployInput = toOPSMDeployInput(doi); + OPContractsManager.DeployInput memory deployInput = toOPCMDeployInput(doi); deployInput.l2ChainId = block.chainid; - vm.expectRevert(OPStackManager.InvalidChainId.selector); - opsm.deploy(deployInput); + vm.expectRevert(OPContractsManager.InvalidChainId.selector); + opcm.deploy(deployInput); } function test_deploy_succeeds() public { vm.expectEmit(true, false, true, true); // TODO precompute the system config address. emit Deployed(doi.l2ChainId(), SystemConfig(address(1))); - opsm.deploy(toOPSMDeployInput(doi)); + opcm.deploy(toOPCMDeployInput(doi)); } } // These tests use the harness which exposes internal functions for testing. -contract OPStackManager_InternalMethods_Test is Test { - OPStackManager_Harness opsmHarness; +contract OPContractsManager_InternalMethods_Test is Test { + OPContractsManager_Harness opcmHarness; function setUp() public { SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfig")); @@ -102,7 +102,7 @@ contract OPStackManager_InternalMethods_Test is Test { vm.etch(address(superchainConfigProxy), hex"01"); vm.etch(address(protocolVersionsProxy), hex"01"); - opsmHarness = new OPStackManager_Harness({ + opcmHarness = new OPContractsManager_Harness({ _superchainConfig: superchainConfigProxy, _protocolVersions: protocolVersionsProxy }); @@ -114,12 +114,12 @@ contract OPStackManager_InternalMethods_Test is Test { // 2. Hash it and manually take the first 19 bytes, and prefixed it with 0x00. uint256 chainId = 1234; address expected = 0x0017FA14b0d73Aa6A26D6b8720c1c84b50984f5C; - address actual = opsmHarness.chainIdToBatchInboxAddress_exposed(chainId); + address actual = opcmHarness.chainIdToBatchInboxAddress_exposed(chainId); vm.assertEq(expected, actual); chainId = type(uint256).max; expected = 0x00a9C584056064687E149968cBaB758a3376D22A; - actual = opsmHarness.chainIdToBatchInboxAddress_exposed(chainId); + actual = opcmHarness.chainIdToBatchInboxAddress_exposed(chainId); vm.assertEq(expected, actual); } } diff --git a/packages/contracts-bedrock/test/Specs.t.sol b/packages/contracts-bedrock/test/Specs.t.sol index cb67549d4c9c..7f13a8800252 100644 --- a/packages/contracts-bedrock/test/Specs.t.sol +++ b/packages/contracts-bedrock/test/Specs.t.sol @@ -10,7 +10,7 @@ import { Executables } from "scripts/libraries/Executables.sol"; import { ForgeArtifacts, Abi, AbiEntry } from "scripts/libraries/ForgeArtifacts.sol"; // Contracts -import { OPStackManager } from "src/L1/OPStackManager.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; // Interfaces import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; @@ -836,29 +836,29 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "WETH98", _sel: _getSel("transferFrom(address,address,uint256)") }); _addSpec({ _name: "WETH98", _sel: _getSel("withdraw(uint256)") }); - // OPStackManager - _addSpec({ _name: "OPStackManager", _sel: _getSel("version()") }); - _addSpec({ _name: "OPStackManager", _sel: _getSel("superchainConfig()") }); - _addSpec({ _name: "OPStackManager", _sel: _getSel("protocolVersions()") }); - _addSpec({ _name: "OPStackManager", _sel: _getSel("latestRelease()") }); - _addSpec({ _name: "OPStackManager", _sel: _getSel("implementations(string,string)") }); - _addSpec({ _name: "OPStackManager", _sel: _getSel("systemConfigs(uint256)") }); - _addSpec({ _name: "OPStackManager", _sel: OPStackManager.initialize.selector }); - _addSpec({ _name: "OPStackManager", _sel: OPStackManager.deploy.selector }); - _addSpec({ _name: "OPStackManager", _sel: OPStackManager.blueprints.selector }); - _addSpec({ _name: "OPStackManager", _sel: OPStackManager.chainIdToBatchInboxAddress.selector }); - - // OPStackManagerInterop - _addSpec({ _name: "OPStackManagerInterop", _sel: _getSel("version()") }); - _addSpec({ _name: "OPStackManagerInterop", _sel: _getSel("superchainConfig()") }); - _addSpec({ _name: "OPStackManagerInterop", _sel: _getSel("protocolVersions()") }); - _addSpec({ _name: "OPStackManagerInterop", _sel: _getSel("latestRelease()") }); - _addSpec({ _name: "OPStackManagerInterop", _sel: _getSel("implementations(string,string)") }); - _addSpec({ _name: "OPStackManagerInterop", _sel: _getSel("systemConfigs(uint256)") }); - _addSpec({ _name: "OPStackManagerInterop", _sel: OPStackManager.initialize.selector }); - _addSpec({ _name: "OPStackManagerInterop", _sel: OPStackManager.deploy.selector }); - _addSpec({ _name: "OPStackManagerInterop", _sel: OPStackManager.blueprints.selector }); - _addSpec({ _name: "OPStackManagerInterop", _sel: OPStackManager.chainIdToBatchInboxAddress.selector }); + // OPContractsManager + _addSpec({ _name: "OPContractsManager", _sel: _getSel("version()") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("superchainConfig()") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("protocolVersions()") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("latestRelease()") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("implementations(string,string)") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("systemConfigs(uint256)") }); + _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.initialize.selector }); + _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.deploy.selector }); + _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.blueprints.selector }); + _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.chainIdToBatchInboxAddress.selector }); + + // OPContractsManagerInterop + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("version()") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("superchainConfig()") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("protocolVersions()") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("latestRelease()") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("implementations(string,string)") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("systemConfigs(uint256)") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.initialize.selector }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.deploy.selector }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.blueprints.selector }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.chainIdToBatchInboxAddress.selector }); // DeputyGuardianModule _addSpec({ diff --git a/packages/contracts-bedrock/test/vendor/Initializable.t.sol b/packages/contracts-bedrock/test/vendor/Initializable.t.sol index d5c1a9e5e4c3..7b7596b9bafe 100644 --- a/packages/contracts-bedrock/test/vendor/Initializable.t.sol +++ b/packages/contracts-bedrock/test/vendor/Initializable.t.sol @@ -411,8 +411,8 @@ contract Initializer_Test is Bridge_Initializer { excludes[4] = "src/dispute/FaultDisputeGame.sol"; excludes[5] = "src/dispute/PermissionedDisputeGame.sol"; // TODO: Eventually remove this exclusion. Same reason as above dispute contracts. - excludes[6] = "src/L1/OPStackManager.sol"; - excludes[7] = "src/L1/OPStackManagerInterop.sol"; + excludes[6] = "src/L1/OPContractsManager.sol"; + excludes[7] = "src/L1/OPContractsManagerInterop.sol"; // Get all contract names in the src directory, minus the excluded contracts. string[] memory contractNames = ForgeArtifacts.getContractNames("src/*", excludes); From 10a16aaf4ba12d430ec666c72aad555eeae70ed2 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 25 Sep 2024 09:59:29 -0600 Subject: [PATCH 019/116] op-supervisor: logs-db empty-db edge-case fix (#12097) --- .../supervisor/backend/db/logs/db.go | 12 +-- .../supervisor/backend/db/logs/db_test.go | 86 +++++++++++++++++++ 2 files changed, 93 insertions(+), 5 deletions(-) diff --git a/op-supervisor/supervisor/backend/db/logs/db.go b/op-supervisor/supervisor/backend/db/logs/db.go index 61184318ece9..996a5c68d712 100644 --- a/op-supervisor/supervisor/backend/db/logs/db.go +++ b/op-supervisor/supervisor/backend/db/logs/db.go @@ -210,12 +210,11 @@ func (db *DB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, er func (db *DB) LatestSealedBlockNum() (n uint64, ok bool) { db.rwLock.RLock() defer db.rwLock.RUnlock() + if db.lastEntryContext.nextEntryIndex == 0 { + return 0, false // empty DB, time to add the first seal + } if !db.lastEntryContext.hasCompleteBlock() { - if db.lastEntryContext.blockNum == 0 { - db.log.Debug("No DB contents yet") - } else { - db.log.Debug("New block is already in progress", "num", db.lastEntryContext.blockNum) - } + db.log.Debug("New block is already in progress", "num", db.lastEntryContext.blockNum) } return db.lastEntryContext.blockNum, true } @@ -381,6 +380,9 @@ func (db *DB) newIterator(index entrydb.EntryIdx) *iterator { // to find the closest one with an equal or lower block number and equal or lower amount of seen logs. // Returns the index of the searchCheckpoint to begin reading from or an error. func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb.EntryIdx, error) { + if db.lastEntryContext.nextEntryIndex == 0 { + return 0, ErrFuture // empty DB, everything is in the future + } n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1 // Define: x is the array of known checkpoints // Invariant: x[i] <= target, x[j] > target. diff --git a/op-supervisor/supervisor/backend/db/logs/db_test.go b/op-supervisor/supervisor/backend/db/logs/db_test.go index c89433c7b4fe..31067b05808d 100644 --- a/op-supervisor/supervisor/backend/db/logs/db_test.go +++ b/op-supervisor/supervisor/backend/db/logs/db_test.go @@ -81,6 +81,92 @@ func TestEmptyDbDoesNotFindEntry(t *testing.T) { }) } +func TestLatestSealedBlockNum(t *testing.T) { + t.Run("Empty case", func(t *testing.T) { + runDBTest(t, + func(t *testing.T, db *DB, m *stubMetrics) {}, + func(t *testing.T, db *DB, m *stubMetrics) { + n, ok := db.LatestSealedBlockNum() + require.False(t, ok, "empty db expected") + require.Zero(t, n) + idx, err := db.searchCheckpoint(0, 0) + require.ErrorIs(t, err, ErrFuture, "no checkpoint in empty db") + require.Zero(t, idx) + }) + }) + t.Run("Zero case", func(t *testing.T) { + genesis := eth.BlockID{Hash: createHash(0), Number: 0} + runDBTest(t, + func(t *testing.T, db *DB, m *stubMetrics) { + require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5000), "seal genesis") + }, + func(t *testing.T, db *DB, m *stubMetrics) { + n, ok := db.LatestSealedBlockNum() + require.True(t, ok, "genesis block expected") + require.Equal(t, genesis.Number, n) + idx, err := db.searchCheckpoint(0, 0) + require.NoError(t, err) + require.Zero(t, idx, "genesis block as checkpoint 0") + }) + }) + t.Run("Later genesis case", func(t *testing.T) { + genesis := eth.BlockID{Hash: createHash(10), Number: 10} + runDBTest(t, + func(t *testing.T, db *DB, m *stubMetrics) { + require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5000), "seal genesis") + }, + func(t *testing.T, db *DB, m *stubMetrics) { + n, ok := db.LatestSealedBlockNum() + require.True(t, ok, "genesis block expected") + require.Equal(t, genesis.Number, n) + idx, err := db.searchCheckpoint(genesis.Number, 0) + require.NoError(t, err) + require.Zero(t, idx, "anchor block as checkpoint 0") + _, err = db.searchCheckpoint(0, 0) + require.ErrorIs(t, err, ErrSkipped, "no checkpoint before genesis") + }) + }) + t.Run("Block 1 case", func(t *testing.T) { + genesis := eth.BlockID{Hash: createHash(0), Number: 0} + block1 := eth.BlockID{Hash: createHash(1), Number: 1} + runDBTest(t, + func(t *testing.T, db *DB, m *stubMetrics) { + require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5000), "seal genesis") + require.NoError(t, db.SealBlock(genesis.Hash, block1, 5001), "seal block 1") + }, + func(t *testing.T, db *DB, m *stubMetrics) { + n, ok := db.LatestSealedBlockNum() + require.True(t, ok, "block 1 expected") + require.Equal(t, block1.Number, n) + idx, err := db.searchCheckpoint(block1.Number, 0) + require.NoError(t, err) + require.Equal(t, entrydb.EntryIdx(0), idx, "checkpoint 0 still for block 1") + }) + }) + t.Run("Using checkpoint case", func(t *testing.T) { + genesis := eth.BlockID{Hash: createHash(0), Number: 0} + runDBTest(t, + func(t *testing.T, db *DB, m *stubMetrics) { + require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5000), "seal genesis") + for i := 1; i <= 260; i++ { + id := eth.BlockID{Hash: createHash(i), Number: uint64(i)} + require.NoError(t, db.SealBlock(createHash(i-1), id, 5001), "seal block %d", i) + } + }, + func(t *testing.T, db *DB, m *stubMetrics) { + n, ok := db.LatestSealedBlockNum() + require.True(t, ok, "latest block expected") + expected := uint64(260) + require.Equal(t, expected, n) + idx, err := db.searchCheckpoint(expected, 0) + require.NoError(t, err) + // It costs 2 entries per block, so if we add more than 1 checkpoint worth of blocks, + // then we get to checkpoint 2 + require.Equal(t, entrydb.EntryIdx(searchCheckpointFrequency*2), idx, "checkpoint 1 reached") + }) + }) +} + func TestAddLog(t *testing.T) { t.Run("BlockZero", func(t *testing.T) { // There are no logs in the genesis block so recording an entry for block 0 should be rejected. From 616a078ea1159c9fa0436a81219e946e28adb10a Mon Sep 17 00:00:00 2001 From: George Knee Date: Wed, 25 Sep 2024 19:22:30 +0100 Subject: [PATCH 020/116] op-batcher: prevent over-assessment of DA type (#12115) * test: assert that default config doesn't change prematurely * test: use a better system to ensure we are not over assessing * return io.EOF from getReadyChannel when the current channel has no tx data also improve godoc --- op-batcher/batcher/channel_manager.go | 12 ++++++++++-- op-batcher/batcher/channel_manager_test.go | 19 +++++++++++++++---- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 3bfff303db4b..23e8f7843696 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -193,7 +193,11 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { } // getReadyChannel returns the next channel ready to submit data, or an error. -// It adds blocks from the block queue to the current channel and generates frames for it. +// It will create a new channel if necessary. +// If there is no data ready to send, it adds blocks from the block queue +// to the current channel and generates frames for it. +// Always returns nil and the io.EOF sentinel error when +// there is no channel with txData func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { var firstWithTxData *channel for _, ch := range s.channelQueue { @@ -239,7 +243,11 @@ func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { return nil, err } - return s.currentChannel, nil + if s.currentChannel.HasTxData() { + return s.currentChannel, nil + } + + return nil, io.EOF } // ensureChannelWithSpace ensures currentChannel is populated with a channel that has diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index 5df5feacf4bf..c129cd9cde99 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -491,9 +491,11 @@ func TestChannelManager_ChannelCreation(t *testing.T) { type FakeDynamicEthChannelConfig struct { DynamicEthChannelConfig chooseBlobs bool + assessments int } func (f *FakeDynamicEthChannelConfig) ChannelConfig() ChannelConfig { + f.assessments++ if f.chooseBlobs { return f.blobConfig } @@ -537,13 +539,21 @@ func TestChannelManager_TxData(t *testing.T) { name string chooseBlobsWhenChannelCreated bool chooseBlobsWhenChannelSubmitted bool + + // * One when the channelManager was created + // * One when the channel is about to be submitted + // * Potentially one more if the replacement channel is about to be submitted, + // this only happens when going from calldata->blobs because + // the channel is no longer ready to send until more data + // is added. + numExpectedAssessments int } tt := []TestCase{ - {"blobs->blobs", true, true}, - {"calldata->calldata", false, false}, - {"blobs->calldata", true, false}, - {"calldata->blobs", false, true}, + {"blobs->blobs", true, true, 2}, + {"calldata->calldata", false, false, 2}, + {"blobs->calldata", true, false, 2}, + {"calldata->blobs", false, true, 3}, } for _, tc := range tt { @@ -590,6 +600,7 @@ func TestChannelManager_TxData(t *testing.T) { } } + require.Equal(t, tc.numExpectedAssessments, cfg.assessments) require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, data.asBlob) require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, m.defaultCfg.UseBlobs) }) From 36180d78bdf3fa96d3a89efb127f7d463245a2ba Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Wed, 25 Sep 2024 15:14:46 -0400 Subject: [PATCH 021/116] rename: proxyAdminOwner -> superchainProxyAdminOwner (#12106) * rename: proxyAdminOwner -> superchainProxyAdminOwner * fix: renames in tests. * fix: renaming in go code. * fix: Changing the intent to contain SuperchainProxyAdminOwner instead of just ProxyAdminOwner * fix: reverting last change --- op-chain-ops/deployer/opcm/superchain.go | 2 +- op-chain-ops/deployer/pipeline/superchain.go | 2 +- op-chain-ops/interopgen/deploy.go | 2 +- .../scripts/DeploySuperchain.s.sol | 16 ++++++++-------- .../contracts-bedrock/test/DeployOPChain.t.sol | 4 ++-- .../test/DeploySuperchain.t.sol | 18 +++++++++--------- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/op-chain-ops/deployer/opcm/superchain.go b/op-chain-ops/deployer/opcm/superchain.go index 34804cc2bbdf..4f648bbfa8a3 100644 --- a/op-chain-ops/deployer/opcm/superchain.go +++ b/op-chain-ops/deployer/opcm/superchain.go @@ -14,7 +14,7 @@ import ( ) type DeploySuperchainInput struct { - ProxyAdminOwner common.Address `toml:"proxyAdminOwner"` + SuperchainProxyAdminOwner common.Address `toml:"superchainProxyAdminOwner"` ProtocolVersionsOwner common.Address `toml:"protocolVersionsOwner"` Guardian common.Address `toml:"guardian"` Paused bool `toml:"paused"` diff --git a/op-chain-ops/deployer/pipeline/superchain.go b/op-chain-ops/deployer/pipeline/superchain.go index cc1b8d04160c..13737475c916 100644 --- a/op-chain-ops/deployer/pipeline/superchain.go +++ b/op-chain-ops/deployer/pipeline/superchain.go @@ -40,7 +40,7 @@ func DeploySuperchain(ctx context.Context, env *Env, artifactsFS foundry.StatDir dso, err = opcm.DeploySuperchain( host, opcm.DeploySuperchainInput{ - ProxyAdminOwner: intent.SuperchainRoles.ProxyAdminOwner, + SuperchainProxyAdminOwner: intent.SuperchainRoles.ProxyAdminOwner, ProtocolVersionsOwner: intent.SuperchainRoles.ProtocolVersionsOwner, Guardian: intent.SuperchainRoles.Guardian, Paused: false, diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 692a80d3225e..be837484e512 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -149,7 +149,7 @@ func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup l1Host.SetTxOrigin(superCfg.Deployer) superDeployment, err := opcm.DeploySuperchain(l1Host, opcm.DeploySuperchainInput{ - ProxyAdminOwner: superCfg.ProxyAdminOwner, + SuperchainProxyAdminOwner: superCfg.ProxyAdminOwner, ProtocolVersionsOwner: superCfg.ProtocolVersionsOwner, Guardian: superCfg.SuperchainConfigGuardian, Paused: superCfg.Paused, diff --git a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol index 5ca889bf409e..7726d5709076 100644 --- a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol @@ -81,7 +81,7 @@ contract DeploySuperchainInput is BaseDeployIO { // Role inputs. address internal _guardian; address internal _protocolVersionsOwner; - address internal _proxyAdminOwner; + address internal _superchainProxyAdminOwner; // Other inputs. bool internal _paused; @@ -94,7 +94,7 @@ contract DeploySuperchainInput is BaseDeployIO { require(_address != address(0), "DeploySuperchainInput: cannot set zero address"); if (_sel == this.guardian.selector) _guardian = _address; else if (_sel == this.protocolVersionsOwner.selector) _protocolVersionsOwner = _address; - else if (_sel == this.proxyAdminOwner.selector) _proxyAdminOwner = _address; + else if (_sel == this.superchainProxyAdminOwner.selector) _superchainProxyAdminOwner = _address; else revert("DeploySuperchainInput: unknown selector"); } @@ -115,9 +115,9 @@ contract DeploySuperchainInput is BaseDeployIO { // validate that each input is set before accessing it. With getter methods, we can automatically // validate that each input is set before allowing any field to be accessed. - function proxyAdminOwner() public view returns (address) { - require(_proxyAdminOwner != address(0), "DeploySuperchainInput: proxyAdminOwner not set"); - return _proxyAdminOwner; + function superchainProxyAdminOwner() public view returns (address) { + require(_superchainProxyAdminOwner != address(0), "DeploySuperchainInput: superchainProxyAdminOwner not set"); + return _superchainProxyAdminOwner; } function protocolVersionsOwner() public view returns (address) { @@ -232,7 +232,7 @@ contract DeploySuperchainOutput is BaseDeployIO { } function assertValidSuperchainProxyAdmin(DeploySuperchainInput _dsi) internal view { - require(superchainProxyAdmin().owner() == _dsi.proxyAdminOwner(), "SPA-10"); + require(superchainProxyAdmin().owner() == _dsi.superchainProxyAdminOwner(), "SPA-10"); } function assertValidSuperchainConfig(DeploySuperchainInput _dsi) internal { @@ -386,13 +386,13 @@ contract DeploySuperchain is Script { } function transferProxyAdminOwnership(DeploySuperchainInput _dsi, DeploySuperchainOutput _dso) public { - address proxyAdminOwner = _dsi.proxyAdminOwner(); + address superchainProxyAdminOwner = _dsi.superchainProxyAdminOwner(); ProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); DeployUtils.assertValidContractAddress(address(superchainProxyAdmin)); vm.broadcast(msg.sender); - superchainProxyAdmin.transferOwnership(proxyAdminOwner); + superchainProxyAdmin.transferOwnership(superchainProxyAdminOwner); } // -------- Utilities -------- diff --git a/packages/contracts-bedrock/test/DeployOPChain.t.sol b/packages/contracts-bedrock/test/DeployOPChain.t.sol index 3cbc313cfe11..445602062d5a 100644 --- a/packages/contracts-bedrock/test/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/DeployOPChain.t.sol @@ -308,7 +308,7 @@ contract DeployOPChain_TestBase is Test { DeployOPChainOutput doo; // Define default inputs for DeploySuperchain. - address proxyAdminOwner = makeAddr("defaultProxyAdminOwner"); + address superchainProxyAdminOwner = makeAddr("defaultSuperchainProxyAdminOwner"); address protocolVersionsOwner = makeAddr("defaultProtocolVersionsOwner"); address guardian = makeAddr("defaultGuardian"); bool paused = false; @@ -365,7 +365,7 @@ contract DeployOPChain_TestBase is Test { DeploySuperchain deploySuperchain = new DeploySuperchain(); (DeploySuperchainInput dsi, DeploySuperchainOutput dso) = deploySuperchain.etchIOContracts(); - dsi.set(dsi.proxyAdminOwner.selector, proxyAdminOwner); + dsi.set(dsi.superchainProxyAdminOwner.selector, superchainProxyAdminOwner); dsi.set(dsi.protocolVersionsOwner.selector, protocolVersionsOwner); dsi.set(dsi.guardian.selector, guardian); dsi.set(dsi.paused.selector, paused); diff --git a/packages/contracts-bedrock/test/DeploySuperchain.t.sol b/packages/contracts-bedrock/test/DeploySuperchain.t.sol index 4b51aae0f3d6..a6bcf2aa2f50 100644 --- a/packages/contracts-bedrock/test/DeploySuperchain.t.sol +++ b/packages/contracts-bedrock/test/DeploySuperchain.t.sol @@ -13,7 +13,7 @@ import { DeploySuperchainInput, DeploySuperchain, DeploySuperchainOutput } from contract DeploySuperchainInput_Test is Test { DeploySuperchainInput dsi; - address proxyAdminOwner = makeAddr("defaultProxyAdminOwner"); + address superchainProxyAdminOwner = makeAddr("superchainProxyAdminOwner"); address protocolVersionsOwner = makeAddr("defaultProtocolVersionsOwner"); address guardian = makeAddr("defaultGuardian"); bool paused = false; @@ -25,8 +25,8 @@ contract DeploySuperchainInput_Test is Test { } function test_getters_whenNotSet_revert() public { - vm.expectRevert("DeploySuperchainInput: proxyAdminOwner not set"); - dsi.proxyAdminOwner(); + vm.expectRevert("DeploySuperchainInput: superchainProxyAdminOwner not set"); + dsi.superchainProxyAdminOwner(); vm.expectRevert("DeploySuperchainInput: protocolVersionsOwner not set"); dsi.protocolVersionsOwner(); @@ -151,7 +151,7 @@ contract DeploySuperchain_Test is Test { // Generate random input values from the seed. This doesn't give us the benefit of the forge // fuzzer's dictionary, but that's ok because we are just testing that values are set and // passed correctly. - address proxyAdminOwner = address(uint160(uint256(hash(_seed, 0)))); + address superchainProxyAdminOwner = address(uint160(uint256(hash(_seed, 0)))); address protocolVersionsOwner = address(uint160(uint256(hash(_seed, 1)))); address guardian = address(uint160(uint256(hash(_seed, 2)))); bool paused = bool(uint8(uint256(hash(_seed, 3))) % 2 == 0); @@ -159,7 +159,7 @@ contract DeploySuperchain_Test is Test { ProtocolVersion recommendedProtocolVersion = ProtocolVersion.wrap(uint256(hash(_seed, 5))); // Set the input values on the input contract. - dsi.set(dsi.proxyAdminOwner.selector, proxyAdminOwner); + dsi.set(dsi.superchainProxyAdminOwner.selector, superchainProxyAdminOwner); dsi.set(dsi.protocolVersionsOwner.selector, protocolVersionsOwner); dsi.set(dsi.guardian.selector, guardian); dsi.set(dsi.paused.selector, paused); @@ -170,7 +170,7 @@ contract DeploySuperchain_Test is Test { deploySuperchain.run(dsi, dso); // Assert inputs were properly passed through to the contract initializers. - assertEq(address(dso.superchainProxyAdmin().owner()), proxyAdminOwner, "100"); + assertEq(address(dso.superchainProxyAdmin().owner()), superchainProxyAdminOwner, "100"); assertEq(address(dso.protocolVersionsProxy().owner()), protocolVersionsOwner, "200"); assertEq(address(dso.superchainConfigProxy().guardian()), guardian, "300"); assertEq(dso.superchainConfigProxy().paused(), paused, "400"); @@ -196,7 +196,7 @@ contract DeploySuperchain_Test is Test { function test_run_NullInput_reverts() public { // Set default values for all inputs. - dsi.set(dsi.proxyAdminOwner.selector, defaultProxyAdminOwner); + dsi.set(dsi.superchainProxyAdminOwner.selector, defaultProxyAdminOwner); dsi.set(dsi.protocolVersionsOwner.selector, defaultProtocolVersionsOwner); dsi.set(dsi.guardian.selector, defaultGuardian); dsi.set(dsi.paused.selector, defaultPaused); @@ -207,8 +207,8 @@ contract DeploySuperchain_Test is Test { // methods to set the zero address, so we use StdStorage. We can't use the `checked_write` // method, because it does a final call to test that the value was set correctly, but for us // that would revert. Therefore we use StdStorage to find the slot, then we write to it. - uint256 slot = zeroOutSlotForSelector(dsi.proxyAdminOwner.selector); - vm.expectRevert("DeploySuperchainInput: proxyAdminOwner not set"); + uint256 slot = zeroOutSlotForSelector(dsi.superchainProxyAdminOwner.selector); + vm.expectRevert("DeploySuperchainInput: superchainProxyAdminOwner not set"); deploySuperchain.run(dsi, dso); // Restore the value we just tested. vm.store(address(dsi), bytes32(slot), bytes32(uint256(uint160(defaultProxyAdminOwner)))); From 06f1406e82945ef178dac7ee9d8c247fa54c5c18 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 25 Sep 2024 13:29:45 -0600 Subject: [PATCH 022/116] Add support for pre-existing OPSM (#12099) * Add support for pre-existing OPSM This PR adds support for deploying OP Chains against an existing OPSM deployment. It adds a new `OPSMAddress` field to the intent to hold the address of the OPSM. When specified, the `ContractsRelease` field is ignored. In the future, this field will be pulled from the Superchain Registry. Since the Go Forge tooling doesn't support forking yet, the deployment is performed using a raw call to `eth_sendRawTransaction`. Data about the Superchain deployments is pulled from the OPSM itself via `eth_call`. To expose the deployment output following the Superchain deployment, I updated the `Deployed` event to emit the ABI-encoded bytes of the `DeployOutput` struct to avoid stack-too-deep errors. This isn't ideal, but at least it gets me all of the fields I need in a log event without more invasive changes. * chore: add version identifer to Deployed event * chore: emit msg.sender in Deployed event * Fix merge issues * test: fix specs test * semver-lock * code review updates --------- Co-authored-by: Matt Solomon --- op-chain-ops/deployer/broadcaster/keyed.go | 2 +- op-chain-ops/deployer/init.go | 7 +- .../deployer/integration_test/apply_test.go | 141 ++++++++---- op-chain-ops/deployer/opcm/contract.go | 83 +++++++ op-chain-ops/deployer/opcm/opchain.go | 212 +++++++++++++++++- op-chain-ops/deployer/pipeline/init.go | 35 +++ op-chain-ops/deployer/pipeline/opchain.go | 98 +++++--- op-chain-ops/deployer/state/intent.go | 7 + packages/contracts-bedrock/semver-lock.json | 4 +- .../snapshots/abi/OPContractsManager.json | 29 ++- .../abi/OPContractsManagerInterop.json | 29 ++- .../src/L1/OPContractsManager.sol | 20 +- .../test/L1/OPContractsManager.t.sol | 8 +- packages/contracts-bedrock/test/Specs.t.sol | 2 + 14 files changed, 577 insertions(+), 100 deletions(-) create mode 100644 op-chain-ops/deployer/opcm/contract.go diff --git a/op-chain-ops/deployer/broadcaster/keyed.go b/op-chain-ops/deployer/broadcaster/keyed.go index 2784c4d455be..63b72010042b 100644 --- a/op-chain-ops/deployer/broadcaster/keyed.go +++ b/op-chain-ops/deployer/broadcaster/keyed.go @@ -162,7 +162,7 @@ func (t *KeyedBroadcaster) Broadcast(ctx context.Context) ([]BroadcastResult, er ) } - results = append(results, outRes) + results[i] = outRes } return results, txErr.ErrorOrNil() } diff --git a/op-chain-ops/deployer/init.go b/op-chain-ops/deployer/init.go index 0cc288b40ffb..bd79f980cdff 100644 --- a/op-chain-ops/deployer/init.go +++ b/op-chain-ops/deployer/init.go @@ -65,9 +65,10 @@ func Init(cfg InitConfig) error { } intent := &state.Intent{ - L1ChainID: cfg.L1ChainID, - UseFaultProofs: true, - FundDevAccounts: true, + L1ChainID: cfg.L1ChainID, + UseFaultProofs: true, + FundDevAccounts: true, + ContractsRelease: "dev", } l1ChainIDBig := intent.L1ChainIDBig() diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index 4399e0b887d3..ad22651fa36e 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -93,18 +93,104 @@ func TestEndToEndApply(t *testing.T) { id := uint256.NewInt(1) - addrFor := func(key devkeys.Key) common.Address { - addr, err := dk.Address(key) - require.NoError(t, err) - return addr - } + deployerAddr, err := dk.Address(depKey) + require.NoError(t, err) + env := &pipeline.Env{ Workdir: t.TempDir(), L1Client: l1Client, Signer: signer, - Deployer: addrFor(depKey), + Deployer: deployerAddr, Logger: lgr, } + + t.Run("initial chain", func(t *testing.T) { + intent, st := makeIntent(t, l1ChainID, artifactsURL, dk, id) + + require.NoError(t, deployer.ApplyPipeline( + ctx, + env, + intent, + st, + )) + + addrs := []struct { + name string + addr common.Address + }{ + {"SuperchainProxyAdmin", st.SuperchainDeployment.ProxyAdminAddress}, + {"SuperchainConfigProxy", st.SuperchainDeployment.SuperchainConfigProxyAddress}, + {"SuperchainConfigImpl", st.SuperchainDeployment.SuperchainConfigImplAddress}, + {"ProtocolVersionsProxy", st.SuperchainDeployment.ProtocolVersionsProxyAddress}, + {"ProtocolVersionsImpl", st.SuperchainDeployment.ProtocolVersionsImplAddress}, + {"OpcmProxy", st.ImplementationsDeployment.OpcmProxyAddress}, + {"DelayedWETHImpl", st.ImplementationsDeployment.DelayedWETHImplAddress}, + {"OptimismPortalImpl", st.ImplementationsDeployment.OptimismPortalImplAddress}, + {"PreimageOracleSingleton", st.ImplementationsDeployment.PreimageOracleSingletonAddress}, + {"MipsSingleton", st.ImplementationsDeployment.MipsSingletonAddress}, + {"SystemConfigImpl", st.ImplementationsDeployment.SystemConfigImplAddress}, + {"L1CrossDomainMessengerImpl", st.ImplementationsDeployment.L1CrossDomainMessengerImplAddress}, + {"L1ERC721BridgeImpl", st.ImplementationsDeployment.L1ERC721BridgeImplAddress}, + {"L1StandardBridgeImpl", st.ImplementationsDeployment.L1StandardBridgeImplAddress}, + {"OptimismMintableERC20FactoryImpl", st.ImplementationsDeployment.OptimismMintableERC20FactoryImplAddress}, + {"DisputeGameFactoryImpl", st.ImplementationsDeployment.DisputeGameFactoryImplAddress}, + } + for _, addr := range addrs { + t.Run(addr.name, func(t *testing.T) { + code, err := l1Client.CodeAt(ctx, addr.addr, nil) + require.NoError(t, err) + require.NotEmpty(t, code, "contracts %s at %s has no code", addr.name, addr.addr) + }) + } + + validateOPChainDeployment(t, ctx, l1Client, st) + }) + + t.Run("subsequent chain", func(t *testing.T) { + newID := uint256.NewInt(2) + intent, st := makeIntent(t, l1ChainID, artifactsURL, dk, newID) + env.Workdir = t.TempDir() + + require.NoError(t, deployer.ApplyPipeline( + ctx, + env, + intent, + st, + )) + + addrs := []struct { + name string + addr common.Address + }{ + {"SuperchainConfigProxy", st.SuperchainDeployment.SuperchainConfigProxyAddress}, + {"ProtocolVersionsProxy", st.SuperchainDeployment.ProtocolVersionsProxyAddress}, + {"OpcmProxy", st.ImplementationsDeployment.OpcmProxyAddress}, + } + for _, addr := range addrs { + t.Run(addr.name, func(t *testing.T) { + code, err := l1Client.CodeAt(ctx, addr.addr, nil) + require.NoError(t, err) + require.NotEmpty(t, code, "contracts %s at %s has no code", addr.name, addr.addr) + }) + } + + validateOPChainDeployment(t, ctx, l1Client, st) + }) +} + +func makeIntent( + t *testing.T, + l1ChainID *big.Int, + artifactsURL *url.URL, + dk *devkeys.MnemonicDevKeys, + l2ChainID *uint256.Int, +) (*state.Intent, *state.State) { + addrFor := func(key devkeys.Key) common.Address { + addr, err := dk.Address(key) + require.NoError(t, err) + return addr + } + intent := &state.Intent{ L1ChainID: l1ChainID.Uint64(), SuperchainRoles: state.SuperchainRoles{ @@ -118,7 +204,7 @@ func TestEndToEndApply(t *testing.T) { ContractsRelease: "dev", Chains: []*state.ChainIntent{ { - ID: id.Bytes32(), + ID: l2ChainID.Bytes32(), Roles: state.ChainRoles{ ProxyAdminOwner: addrFor(devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), SystemConfigOwner: addrFor(devkeys.SystemConfigOwner.Key(l1ChainID)), @@ -134,43 +220,10 @@ func TestEndToEndApply(t *testing.T) { st := &state.State{ Version: 1, } + return intent, st +} - require.NoError(t, deployer.ApplyPipeline( - ctx, - env, - intent, - st, - )) - - addrs := []struct { - name string - addr common.Address - }{ - {"SuperchainProxyAdmin", st.SuperchainDeployment.ProxyAdminAddress}, - {"SuperchainConfigProxy", st.SuperchainDeployment.SuperchainConfigProxyAddress}, - {"SuperchainConfigImpl", st.SuperchainDeployment.SuperchainConfigImplAddress}, - {"ProtocolVersionsProxy", st.SuperchainDeployment.ProtocolVersionsProxyAddress}, - {"ProtocolVersionsImpl", st.SuperchainDeployment.ProtocolVersionsImplAddress}, - {"OpcmProxy", st.ImplementationsDeployment.OpcmProxyAddress}, - {"DelayedWETHImpl", st.ImplementationsDeployment.DelayedWETHImplAddress}, - {"OptimismPortalImpl", st.ImplementationsDeployment.OptimismPortalImplAddress}, - {"PreimageOracleSingleton", st.ImplementationsDeployment.PreimageOracleSingletonAddress}, - {"MipsSingleton", st.ImplementationsDeployment.MipsSingletonAddress}, - {"SystemConfigImpl", st.ImplementationsDeployment.SystemConfigImplAddress}, - {"L1CrossDomainMessengerImpl", st.ImplementationsDeployment.L1CrossDomainMessengerImplAddress}, - {"L1ERC721BridgeImpl", st.ImplementationsDeployment.L1ERC721BridgeImplAddress}, - {"L1StandardBridgeImpl", st.ImplementationsDeployment.L1StandardBridgeImplAddress}, - {"OptimismMintableERC20FactoryImpl", st.ImplementationsDeployment.OptimismMintableERC20FactoryImplAddress}, - {"DisputeGameFactoryImpl", st.ImplementationsDeployment.DisputeGameFactoryImplAddress}, - } - for _, addr := range addrs { - t.Run(addr.name, func(t *testing.T) { - code, err := l1Client.CodeAt(ctx, addr.addr, nil) - require.NoError(t, err) - require.NotEmpty(t, code, "contracts %s at %s has no code", addr.name, addr.addr) - }) - } - +func validateOPChainDeployment(t *testing.T, ctx context.Context, l1Client *ethclient.Client, st *state.State) { for _, chainState := range st.Chains { chainAddrs := []struct { name string @@ -197,7 +250,7 @@ func TestEndToEndApply(t *testing.T) { if addr.name == "FaultDisputeGameAddress" { continue } - t.Run(fmt.Sprintf("chain %s - %s", chainState.ID, addr.name), func(t *testing.T) { + t.Run(addr.name, func(t *testing.T) { code, err := l1Client.CodeAt(ctx, addr.addr, nil) require.NoError(t, err) require.NotEmpty(t, code, "contracts %s at %s for chain %s has no code", addr.name, addr.addr, chainState.ID) diff --git a/op-chain-ops/deployer/opcm/contract.go b/op-chain-ops/deployer/opcm/contract.go new file mode 100644 index 000000000000..c81222aafe88 --- /dev/null +++ b/op-chain-ops/deployer/opcm/contract.go @@ -0,0 +1,83 @@ +package opcm + +import ( + "bytes" + "context" + "fmt" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +type Contract struct { + addr common.Address + client *ethclient.Client +} + +func NewContract(addr common.Address, client *ethclient.Client) *Contract { + return &Contract{addr: addr, client: client} +} + +func (c *Contract) SuperchainConfig(ctx context.Context) (common.Address, error) { + return c.getAddress(ctx, "superchainConfig") +} + +func (c *Contract) ProtocolVersions(ctx context.Context) (common.Address, error) { + return c.getAddress(ctx, "protocolVersions") +} + +func (c *Contract) getAddress(ctx context.Context, name string) (common.Address, error) { + method := abi.NewMethod( + name, + name, + abi.Function, + "view", + true, + false, + abi.Arguments{}, + abi.Arguments{ + abi.Argument{ + Name: "address", + Type: mustType("address"), + Indexed: false, + }, + }, + ) + + calldata, err := method.Inputs.Pack() + if err != nil { + return common.Address{}, fmt.Errorf("failed to pack inputs: %w", err) + } + + msg := ethereum.CallMsg{ + To: &c.addr, + Data: append(bytes.Clone(method.ID), calldata...), + } + result, err := c.client.CallContract(ctx, msg, nil) + if err != nil { + return common.Address{}, fmt.Errorf("failed to call contract: %w", err) + } + + out, err := method.Outputs.Unpack(result) + if err != nil { + return common.Address{}, fmt.Errorf("failed to unpack result: %w", err) + } + if len(out) != 1 { + return common.Address{}, fmt.Errorf("unexpected output length: %d", len(out)) + } + addr, ok := out[0].(common.Address) + if !ok { + return common.Address{}, fmt.Errorf("unexpected type: %T", out[0]) + } + return addr, nil +} + +func mustType(t string) abi.Type { + typ, err := abi.NewType(t, "", nil) + if err != nil { + panic(err) + } + return typ +} diff --git a/op-chain-ops/deployer/opcm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go index d9685182b6e1..c204c1a57ec3 100644 --- a/op-chain-ops/deployer/opcm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -1,12 +1,19 @@ package opcm import ( + "context" "fmt" "math/big" + "strings" - "github.com/ethereum/go-ethereum/common" - + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/holiman/uint256" ) // PermissionedGameStartingAnchorRoots is a root of bytes32(hex"dead") for the permissioned game at block 0, @@ -45,7 +52,6 @@ type DeployOPChainOutput struct { OptimismMintableERC20FactoryProxy common.Address L1StandardBridgeProxy common.Address L1CrossDomainMessengerProxy common.Address - // Fault proof contracts below. OptimismPortalProxy common.Address DisputeGameFactoryProxy common.Address @@ -97,3 +103,203 @@ func DeployOPChain(host *script.Host, input DeployOPChainInput) (DeployOPChainOu return dco, nil } + +// opcmRoles is an internal struct used to pass the roles to OPSM. See opcmDeployInput for more info. +type opcmRoles struct { + OpChainProxyAdminOwner common.Address + SystemConfigOwner common.Address + Batcher common.Address + UnsafeBlockSigner common.Address + Proposer common.Address + Challenger common.Address +} + +// opcmDeployInput is the input struct for the deploy method of the OPStackManager contract. We +// define a separate struct here to match what the OPSM contract expects. +type opcmDeployInput struct { + Roles opcmRoles + BasefeeScalar uint32 + BlobBasefeeScalar uint32 + L2ChainId *big.Int + StartingAnchorRoots []byte +} + +// decodeOutputABIJSON defines an ABI for a fake method called "decodeOutput" that returns the +// DeployOutput struct. This allows the code in the deployer to decode directly into a struct +// using Geth's ABI library. +const decodeOutputABIJSON = ` +[ + { + "type": "function", + "name": "decodeOutput", + "inputs": [], + "outputs": [ + { + "name": "output", + "indexed": false, + "type": "tuple", + "components": [ + { + "name": "opChainProxyAdmin", + "type": "address" + }, + { + "name": "addressManager", + "type": "address" + }, + { + "name": "l1ERC721BridgeProxy", + "type": "address" + }, + { + "name": "systemConfigProxy", + "type": "address" + }, + { + "name": "optimismMintableERC20FactoryProxy", + "type": "address" + }, + { + "name": "l1StandardBridgeProxy", + "type": "address" + }, + { + "name": "l1CrossDomainMessengerProxy", + "type": "address" + }, + { + "name": "optimismPortalProxy", + "type": "address" + }, + { + "name": "disputeGameFactoryProxy", + "type": "address" + }, + { + "name": "anchorStateRegistryProxy", + "type": "address" + }, + { + "name": "anchorStateRegistryImpl", + "type": "address" + }, + { + "name": "faultDisputeGame", + "type": "address", + "internalType": "contract FaultDisputeGame" + }, + { + "name": "permissionedDisputeGame", + "type": "address" + }, + { + "name": "delayedWETHPermissionedGameProxy", + "type": "address" + }, + { + "name": "delayedWETHPermissionlessGameProxy", + "type": "address" + } + ] + } + ] + } +] +` + +var decodeOutputABI abi.ABI + +// DeployOPChainRaw deploys an OP Chain using a raw call to a pre-deployed OPSM contract. +func DeployOPChainRaw( + ctx context.Context, + l1 *ethclient.Client, + bcast broadcaster.Broadcaster, + deployer common.Address, + artifacts foundry.StatDirFs, + input DeployOPChainInput, +) (DeployOPChainOutput, error) { + var out DeployOPChainOutput + + artifactsFS := &foundry.ArtifactsFS{FS: artifacts} + opcmArtifacts, err := artifactsFS.ReadArtifact("OPContractsManager.sol", "OPContractsManager") + if err != nil { + return out, fmt.Errorf("failed to read OPStackManager artifact: %w", err) + } + + opcmABI := opcmArtifacts.ABI + calldata, err := opcmABI.Pack("deploy", opcmDeployInput{ + Roles: opcmRoles{ + OpChainProxyAdminOwner: input.OpChainProxyAdminOwner, + SystemConfigOwner: input.SystemConfigOwner, + Batcher: input.Batcher, + UnsafeBlockSigner: input.UnsafeBlockSigner, + Proposer: input.Proposer, + Challenger: input.Challenger, + }, + BasefeeScalar: input.BasefeeScalar, + BlobBasefeeScalar: input.BlobBaseFeeScalar, + L2ChainId: input.L2ChainId, + StartingAnchorRoots: input.StartingAnchorRoots(), + }) + if err != nil { + return out, fmt.Errorf("failed to pack deploy input: %w", err) + } + + nonce, err := l1.NonceAt(ctx, deployer, nil) + if err != nil { + return out, fmt.Errorf("failed to read nonce: %w", err) + } + + bcast.Hook(script.Broadcast{ + From: deployer, + To: input.OpcmProxy, + Input: calldata, + Value: (*hexutil.U256)(uint256.NewInt(0)), + // use hardcoded 19MM gas for now since this is roughly what we've seen this deployment cost. + GasUsed: 19_000_000, + Type: script.BroadcastCall, + Nonce: nonce, + }) + + results, err := bcast.Broadcast(ctx) + if err != nil { + return out, fmt.Errorf("failed to broadcast OP chain deployment: %w", err) + } + + deployedEvent := opcmABI.Events["Deployed"] + res := results[0] + + for _, log := range res.Receipt.Logs { + if log.Topics[0] != deployedEvent.ID { + continue + } + + type EventData struct { + DeployOutput []byte + } + var data EventData + if err := opcmABI.UnpackIntoInterface(&data, "Deployed", log.Data); err != nil { + return out, fmt.Errorf("failed to unpack Deployed event: %w", err) + } + + type OutputData struct { + Output DeployOPChainOutput + } + var outData OutputData + if err := decodeOutputABI.UnpackIntoInterface(&outData, "decodeOutput", data.DeployOutput); err != nil { + return out, fmt.Errorf("failed to unpack DeployOutput: %w", err) + } + + return outData.Output, nil + } + + return out, fmt.Errorf("failed to find Deployed event") +} + +func init() { + var err error + decodeOutputABI, err = abi.JSON(strings.NewReader(decodeOutputABIJSON)) + if err != nil { + panic(fmt.Sprintf("failed to parse decodeOutput ABI: %v", err)) + } +} diff --git a/op-chain-ops/deployer/pipeline/init.go b/op-chain-ops/deployer/pipeline/init.go index 094e103aa940..a680c7fdb48f 100644 --- a/op-chain-ops/deployer/pipeline/init.go +++ b/op-chain-ops/deployer/pipeline/init.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "fmt" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -34,6 +35,40 @@ func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent * } } + if intent.OPCMAddress != (common.Address{}) { + env.Logger.Info("using provided OPCM address, populating state", "address", intent.OPCMAddress.Hex()) + + if intent.ContractsRelease == "dev" { + env.Logger.Warn("using dev release with existing OPCM, this field will be ignored") + } + + opcmContract := opcm.NewContract(intent.OPCMAddress, env.L1Client) + protocolVersions, err := opcmContract.ProtocolVersions(ctx) + if err != nil { + return fmt.Errorf("error getting protocol versions address: %w", err) + } + superchainConfig, err := opcmContract.SuperchainConfig(ctx) + if err != nil { + return fmt.Errorf("error getting superchain config address: %w", err) + } + env.Logger.Debug( + "populating protocol versions and superchain config addresses", + "protocolVersions", protocolVersions.Hex(), + "superchainConfig", superchainConfig.Hex(), + ) + + // The below fields are the only ones required to perform an OP Chain + // deployment via an existing OPCM contract. All the others are used + // for deploying the OPCM itself, which isn't necessary in this case. + st.SuperchainDeployment = &state.SuperchainDeployment{ + ProtocolVersionsProxyAddress: protocolVersions, + SuperchainConfigProxyAddress: superchainConfig, + } + st.ImplementationsDeployment = &state.ImplementationsDeployment{ + OpcmProxyAddress: intent.OPCMAddress, + } + } + // If the state has never been applied, we don't need to perform // any additional checks. if st.AppliedIntent == nil { diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index 1ae37970d7d1..27919fb8b135 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -5,6 +5,8 @@ import ( "fmt" "math/big" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" @@ -27,45 +29,73 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, return fmt.Errorf("failed to get chain intent: %w", err) } + input := opcm.DeployOPChainInput{ + OpChainProxyAdminOwner: thisIntent.Roles.ProxyAdminOwner, + SystemConfigOwner: thisIntent.Roles.SystemConfigOwner, + Batcher: thisIntent.Roles.Batcher, + UnsafeBlockSigner: thisIntent.Roles.UnsafeBlockSigner, + Proposer: thisIntent.Roles.Proposer, + Challenger: thisIntent.Roles.Challenger, + BasefeeScalar: 1368, + BlobBaseFeeScalar: 801949, + L2ChainId: chainID.Big(), + OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, + } + var dco opcm.DeployOPChainOutput - err = CallScriptBroadcast( - ctx, - CallScriptBroadcastOpts{ - L1ChainID: big.NewInt(int64(intent.L1ChainID)), - Logger: lgr, - ArtifactsFS: artifactsFS, - Deployer: env.Deployer, - Signer: env.Signer, - Client: env.L1Client, - Broadcaster: KeyedBroadcaster, - Handler: func(host *script.Host) error { - host.ImportState(st.ImplementationsDeployment.StateDump) - dco, err = opcm.DeployOPChain( - host, - opcm.DeployOPChainInput{ - OpChainProxyAdminOwner: thisIntent.Roles.ProxyAdminOwner, - SystemConfigOwner: thisIntent.Roles.SystemConfigOwner, - Batcher: thisIntent.Roles.Batcher, - UnsafeBlockSigner: thisIntent.Roles.UnsafeBlockSigner, - Proposer: thisIntent.Roles.Proposer, - Challenger: thisIntent.Roles.Challenger, - BasefeeScalar: 1368, - BlobBaseFeeScalar: 801949, - L2ChainId: chainID.Big(), - OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, - }, - ) - return err + if intent.OPCMAddress == (common.Address{}) { + err = CallScriptBroadcast( + ctx, + CallScriptBroadcastOpts{ + L1ChainID: big.NewInt(int64(intent.L1ChainID)), + Logger: lgr, + ArtifactsFS: artifactsFS, + Deployer: env.Deployer, + Signer: env.Signer, + Client: env.L1Client, + Broadcaster: KeyedBroadcaster, + Handler: func(host *script.Host) error { + host.ImportState(st.ImplementationsDeployment.StateDump) + + dco, err = opcm.DeployOPChain( + host, + input, + ) + return err + }, }, - }, - ) - if err != nil { - return fmt.Errorf("error deploying OP chain: %w", err) + ) + if err != nil { + return fmt.Errorf("error deploying OP chain: %w", err) + } + } else { + lgr.Info("deploying using existing OPCM", "address", intent.OPCMAddress.Hex()) + + bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: big.NewInt(int64(intent.L1ChainID)), + Client: env.L1Client, + Signer: env.Signer, + From: env.Deployer, + }) + if err != nil { + return fmt.Errorf("failed to create broadcaster: %w", err) + } + dco, err = opcm.DeployOPChainRaw( + ctx, + env.L1Client, + bcaster, + env.Deployer, + artifactsFS, + input, + ) + if err != nil { + return fmt.Errorf("error deploying OP chain: %w", err) + } } st.Chains = append(st.Chains, &state.ChainState{ - ID: chainID, - + ID: chainID, ProxyAdminAddress: dco.OpChainProxyAdmin, AddressManagerAddress: dco.AddressManager, L1ERC721BridgeProxyAddress: dco.L1ERC721BridgeProxy, diff --git a/op-chain-ops/deployer/state/intent.go b/op-chain-ops/deployer/state/intent.go index 17bedacd77b5..755ad6bbba54 100644 --- a/op-chain-ops/deployer/state/intent.go +++ b/op-chain-ops/deployer/state/intent.go @@ -3,6 +3,7 @@ package state import ( "fmt" "math/big" + "strings" "github.com/ethereum-optimism/optimism/op-service/ioutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil" @@ -26,6 +27,8 @@ type Intent struct { ContractsRelease string `json:"contractsVersion" toml:"contractsVersion"` + OPCMAddress common.Address `json:"opcmAddress" toml:"opcmAddress"` + Chains []*ChainIntent `json:"chains" toml:"chains"` GlobalDeployOverrides map[string]any `json:"globalDeployOverrides" toml:"globalDeployOverrides"` @@ -60,6 +63,10 @@ func (c *Intent) Check() error { return fmt.Errorf("contractArtifactsURL must be set") } + if c.ContractsRelease != "dev" && !strings.HasPrefix(c.ContractsRelease, "op-contracts/") { + return fmt.Errorf("contractsVersion must be either the literal \"dev\" or start with \"op-contracts/\"") + } + return nil } diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index b68db55580f2..b9962af979cc 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x92c72b75206e756742df25d67d295e4479e65db1473948b8f53cb4ca642025d5", - "sourceCodeHash": "0x5e04124ee67298d2f1245139baf7de79dee421d2c031c6e5abe0cd3b1bdbdb32" + "initCodeHash": "0x7903f225091334a1910470bb1b5c111f13f6f2572faf03e0c74ad625e4c0d6f5", + "sourceCodeHash": "0x3a25b0ac70b1d434773c86f46b1f2a995722e33d3273762fd5abbb541bffa7db" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index ca2f2ab8ac83..57900b34e8e3 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -15,6 +15,19 @@ "stateMutability": "nonpayable", "type": "constructor" }, + { + "inputs": [], + "name": "OUTPUT_VERSION", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "blueprints", @@ -448,6 +461,12 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "outputVersion", + "type": "uint256" + }, { "indexed": true, "internalType": "uint256", @@ -456,9 +475,15 @@ }, { "indexed": true, - "internalType": "contract SystemConfig", - "name": "systemConfig", + "internalType": "address", + "name": "deployer", "type": "address" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "deployOutput", + "type": "bytes" } ], "name": "Deployed", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index ca2f2ab8ac83..57900b34e8e3 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -15,6 +15,19 @@ "stateMutability": "nonpayable", "type": "constructor" }, + { + "inputs": [], + "name": "OUTPUT_VERSION", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "blueprints", @@ -448,6 +461,12 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "outputVersion", + "type": "uint256" + }, { "indexed": true, "internalType": "uint256", @@ -456,9 +475,15 @@ }, { "indexed": true, - "internalType": "contract SystemConfig", - "name": "systemConfig", + "internalType": "address", + "name": "deployer", "type": "address" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "deployOutput", + "type": "bytes" } ], "name": "Deployed", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 4f36897d637c..d05ba7c8821a 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -124,8 +124,12 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.6 - string public constant version = "1.0.0-beta.6"; + /// @custom:semver 1.0.0-beta.7 + string public constant version = "1.0.0-beta.7"; + + /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct + /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. + uint256 public constant OUTPUT_VERSION = 0; /// @notice Address of the SuperchainConfig contract shared by all chains. SuperchainConfig public immutable superchainConfig; @@ -155,9 +159,13 @@ contract OPContractsManager is ISemver, Initializable { // -------- Events -------- /// @notice Emitted when a new OP Stack chain is deployed. - /// @param l2ChainId The chain ID of the new chain. - /// @param systemConfig The address of the new chain's SystemConfig contract. - event Deployed(uint256 indexed l2ChainId, SystemConfig indexed systemConfig); + /// @param outputVersion Version that indicates how to decode the `deployOutput` argument. + /// @param l2ChainId Chain ID of the new chain. + /// @param deployer Address that deployed the chain. + /// @param deployOutput ABI-encoded output of the deployment. + event Deployed( + uint256 indexed outputVersion, uint256 indexed l2ChainId, address indexed deployer, bytes deployOutput + ); // -------- Errors -------- @@ -334,7 +342,7 @@ contract OPContractsManager is ISemver, Initializable { // Transfer ownership of the ProxyAdmin from this contract to the specified owner. output.opChainProxyAdmin.transferOwnership(_input.roles.opChainProxyAdminOwner); - emit Deployed(l2ChainId, output.systemConfigProxy); + emit Deployed(OUTPUT_VERSION, l2ChainId, msg.sender, abi.encode(output)); return output; } diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 7f52b702dd71..54c87616e176 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -34,7 +34,9 @@ contract OPContractsManager_Harness is OPContractsManager { contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { using stdStorage for StdStorage; - event Deployed(uint256 indexed l2ChainId, SystemConfig indexed systemConfig); + event Deployed( + uint256 indexed outputVersion, uint256 indexed l2ChainId, address indexed deployer, bytes deployOutput + ); function setUp() public override { DeployOPChain_TestBase.setUp(); @@ -86,8 +88,8 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { } function test_deploy_succeeds() public { - vm.expectEmit(true, false, true, true); // TODO precompute the system config address. - emit Deployed(doi.l2ChainId(), SystemConfig(address(1))); + vm.expectEmit(true, true, true, false); // TODO precompute the expected `deployOutput`. + emit Deployed(0, doi.l2ChainId(), address(this), bytes("")); opcm.deploy(toOPCMDeployInput(doi)); } } diff --git a/packages/contracts-bedrock/test/Specs.t.sol b/packages/contracts-bedrock/test/Specs.t.sol index 7f13a8800252..b95604135eb0 100644 --- a/packages/contracts-bedrock/test/Specs.t.sol +++ b/packages/contracts-bedrock/test/Specs.t.sol @@ -843,6 +843,7 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "OPContractsManager", _sel: _getSel("latestRelease()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("implementations(string,string)") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("systemConfigs(uint256)") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("OUTPUT_VERSION()") }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.initialize.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.blueprints.selector }); @@ -855,6 +856,7 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("latestRelease()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("implementations(string,string)") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("systemConfigs(uint256)") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("OUTPUT_VERSION()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.initialize.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.blueprints.selector }); From e81c50de0a51954c64444b849be4768c8116cffb Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Wed, 25 Sep 2024 15:53:40 -0400 Subject: [PATCH 023/116] fix: OPCM additional safety checks (#12107) --- .../scripts/DeployOPChain.s.sol | 17 +++++++++-------- .../contracts-bedrock/test/DeployOPChain.t.sol | 17 +++++++++++++++-- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 524f9896b2bd..6cecb41d36eb 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -139,9 +139,10 @@ contract DeployOPChainInput is BaseDeployIO { return abi.encode(defaultStartingAnchorRoots); } - // TODO: Check that opcm is proxied and it has an implementation. - function opcmProxy() public view returns (OPContractsManager) { + function opcmProxy() public returns (OPContractsManager) { require(address(_opcmProxy) != address(0), "DeployOPChainInput: not set"); + DeployUtils.assertValidContractAddress(address(_opcmProxy)); + DeployUtils.assertImplementationSet(address(_opcmProxy)); return _opcmProxy; } } @@ -303,7 +304,7 @@ contract DeployOPChainOutput is BaseDeployIO { assertValidSystemConfig(_doi); } - function assertValidPermissionedDisputeGame(DeployOPChainInput _doi) internal view { + function assertValidPermissionedDisputeGame(DeployOPChainInput _doi) internal { PermissionedDisputeGame game = permissionedDisputeGame(); require(GameType.unwrap(game.gameType()) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON), "DPG-10"); @@ -344,7 +345,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(address(registry.disputeGameFactory()) == address(disputeGameFactoryProxy()), "ANCHORI-10"); } - function assertValidSystemConfig(DeployOPChainInput _doi) internal view { + function assertValidSystemConfig(DeployOPChainInput _doi) internal { SystemConfig systemConfig = systemConfigProxy(); DeployUtils.assertInitialized({ _contractAddress: address(systemConfig), _slot: 0, _offset: 0 }); @@ -383,7 +384,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(gasPayingToken == Constants.ETHER, "SYSCON-220"); } - function assertValidL1CrossDomainMessenger(DeployOPChainInput _doi) internal view { + function assertValidL1CrossDomainMessenger(DeployOPChainInput _doi) internal { L1CrossDomainMessenger messenger = l1CrossDomainMessengerProxy(); DeployUtils.assertInitialized({ _contractAddress: address(messenger), _slot: 0, _offset: 20 }); @@ -399,7 +400,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER, "L1xDM-60"); } - function assertValidL1StandardBridge(DeployOPChainInput _doi) internal view { + function assertValidL1StandardBridge(DeployOPChainInput _doi) internal { L1StandardBridge bridge = l1StandardBridgeProxy(); L1CrossDomainMessenger messenger = l1CrossDomainMessengerProxy(); @@ -421,7 +422,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(factory.bridge() == address(l1StandardBridgeProxy()), "MERC20F-20"); } - function assertValidL1ERC721Bridge(DeployOPChainInput _doi) internal view { + function assertValidL1ERC721Bridge(DeployOPChainInput _doi) internal { L1ERC721Bridge bridge = l1ERC721BridgeProxy(); DeployUtils.assertInitialized({ _contractAddress: address(bridge), _slot: 0, _offset: 0 }); @@ -434,7 +435,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L721B-50"); } - function assertValidOptimismPortal(DeployOPChainInput _doi) internal view { + function assertValidOptimismPortal(DeployOPChainInput _doi) internal { OptimismPortal2 portal = optimismPortalProxy(); ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcmProxy().superchainConfig())); diff --git a/packages/contracts-bedrock/test/DeployOPChain.t.sol b/packages/contracts-bedrock/test/DeployOPChain.t.sol index 445602062d5a..9537f6339575 100644 --- a/packages/contracts-bedrock/test/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/DeployOPChain.t.sol @@ -30,6 +30,7 @@ import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; +import { Proxy } from "src/universal/Proxy.sol"; import { GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; @@ -52,6 +53,15 @@ contract DeployOPChainInput_Test is Test { doi = new DeployOPChainInput(); } + function buildOpcmProxy() public returns (Proxy opcmProxy) { + opcmProxy = new Proxy(address(0)); + OPContractsManager opcmImpl = OPContractsManager(address(makeAddr("opcmImpl"))); + vm.prank(address(0)); + opcmProxy.upgradeTo(address(opcmImpl)); + vm.etch(address(opcmProxy), address(opcmProxy).code); + vm.etch(address(opcmImpl), hex"01"); + } + function test_set_succeeds() public { doi.set(doi.opChainProxyAdminOwner.selector, opChainProxyAdminOwner); doi.set(doi.systemConfigOwner.selector, systemConfigOwner); @@ -62,7 +72,10 @@ contract DeployOPChainInput_Test is Test { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); + + (Proxy opcmProxy) = buildOpcmProxy(); + doi.set(doi.opcmProxy.selector, address(opcmProxy)); + // Compare the default inputs to the getter methods. assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "200"); assertEq(systemConfigOwner, doi.systemConfigOwner(), "300"); @@ -73,7 +86,7 @@ contract DeployOPChainInput_Test is Test { assertEq(basefeeScalar, doi.basefeeScalar(), "800"); assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "900"); assertEq(l2ChainId, doi.l2ChainId(), "1000"); - assertEq(address(opcm), address(doi.opcmProxy()), "1100"); + assertEq(address(opcmProxy), address(doi.opcmProxy()), "1100"); } function test_getters_whenNotSet_revert() public { From f37d53b3781dd4c911dc58812db59b67c9293005 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Wed, 25 Sep 2024 16:04:29 -0400 Subject: [PATCH 024/116] feat(opcm): Update comment about what gets deployed where (#12128) * feat(opcm): Update comment about what gets deployed where * test(deploy): Add note clarifying MCP readiness of DelayedWeths and Portal2 --- .../scripts/DeployImplementations.s.sol | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index bdebf297c3f6..cf992ab582b7 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -759,10 +759,13 @@ contract DeployImplementations is Script { // | AnchorStateRegistry | Yes | Bespoke | No | // | FaultDisputeGame | No | Bespoke | No | Not yet supported by OPCM // | PermissionedDisputeGame | No | Bespoke | No | - // | DelayedWETH | Yes | Two bespoke (one per DisputeGame) | No | + // | DelayedWETH | Yes | Two bespoke (one per DisputeGame) | Yes *️⃣ | // | PreimageOracle | No | Shared | N/A | // | MIPS | No | Shared | N/A | - // | OptimismPortal2 | Yes | Shared | No | + // | OptimismPortal2 | Yes | Shared | Yes *️⃣ | + // + // - *️⃣ These contracts have immutable values which are intended to be constant for all contracts within a + // Superchain, and are therefore MCP ready for any chain using the Standard Configuration. // // This script only deploys the shared contracts. The bespoke contracts are deployed by // `DeployOPChain.s.sol`. When the shared contracts are proxied, the contracts deployed here are @@ -777,6 +780,12 @@ contract DeployImplementations is Script { // // For contracts which are not MCP ready neither the Proxy nor the implementation can be shared, therefore they // are deployed by `DeployOpChain.s.sol`. + // These are: + // - AnchorStateRegistry (proxy and implementation) + // - FaultDisputeGame (not proxied) + // - PermissionedDisputeGame (not proxied) + // - DelayedWeth (proxies only) + // - OptimismPortal2 (proxies only) function deployOptimismPortalImpl( DeployImplementationsInput _dii, From d125012b81fa5f65315aa5ae4976dd43a53c44a8 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Wed, 25 Sep 2024 16:54:16 -0400 Subject: [PATCH 025/116] test: move OPCM tests to own dir (#12110) --- packages/contracts-bedrock/test/L1/OPContractsManager.t.sol | 2 +- .../contracts-bedrock/test/{ => opcm}/DeployAuthSystem.t.sol | 0 .../test/{ => opcm}/DeployImplementations.t.sol | 0 packages/contracts-bedrock/test/{ => opcm}/DeployOPChain.t.sol | 0 .../contracts-bedrock/test/{ => opcm}/DeploySuperchain.t.sol | 0 5 files changed, 1 insertion(+), 1 deletion(-) rename packages/contracts-bedrock/test/{ => opcm}/DeployAuthSystem.t.sol (100%) rename packages/contracts-bedrock/test/{ => opcm}/DeployImplementations.t.sol (100%) rename packages/contracts-bedrock/test/{ => opcm}/DeployOPChain.t.sol (100%) rename packages/contracts-bedrock/test/{ => opcm}/DeploySuperchain.t.sol (100%) diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 54c87616e176..c99ea77357fa 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -4,7 +4,7 @@ pragma solidity 0.8.15; import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; import { DeployOPChainInput } from "scripts/DeployOPChain.s.sol"; -import { DeployOPChain_TestBase } from "test/DeployOPChain.t.sol"; +import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; diff --git a/packages/contracts-bedrock/test/DeployAuthSystem.t.sol b/packages/contracts-bedrock/test/opcm/DeployAuthSystem.t.sol similarity index 100% rename from packages/contracts-bedrock/test/DeployAuthSystem.t.sol rename to packages/contracts-bedrock/test/opcm/DeployAuthSystem.t.sol diff --git a/packages/contracts-bedrock/test/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol similarity index 100% rename from packages/contracts-bedrock/test/DeployImplementations.t.sol rename to packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol diff --git a/packages/contracts-bedrock/test/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol similarity index 100% rename from packages/contracts-bedrock/test/DeployOPChain.t.sol rename to packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol diff --git a/packages/contracts-bedrock/test/DeploySuperchain.t.sol b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol similarity index 100% rename from packages/contracts-bedrock/test/DeploySuperchain.t.sol rename to packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol From d90e434083d5f7d7fdb4c07ec1a69344e66d2bcb Mon Sep 17 00:00:00 2001 From: George Knee Date: Wed, 25 Sep 2024 21:56:53 +0100 Subject: [PATCH 026/116] use deterministic seed for test data (#12129) This was causing the test to fail in approx 5% of cases --- op-batcher/batcher/channel_manager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index c129cd9cde99..dc913505c05f 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -567,7 +567,7 @@ func TestChannelManager_TxData(t *testing.T) { require.Equal(t, tc.chooseBlobsWhenChannelCreated, m.defaultCfg.UseBlobs) // Seed channel manager with a block - rng := rand.New(rand.NewSource(time.Now().UnixNano())) + rng := rand.New(rand.NewSource(99)) blockA := derivetest.RandomL2BlockWithChainId(rng, 200, defaultTestRollupConfig.L2ChainID) m.blocks = []*types.Block{blockA} From c19d51bff3285792286a35e7ed10a932e390bcc3 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 25 Sep 2024 15:06:47 -0600 Subject: [PATCH 027/116] op-supervisor: head pointers, refactor block processor (#12031) * op-supervisor: supervisor-head-pointers squashed change entry indices to head pointers, refactor block processor, backend fixes Co-authored-by: Axel Kingsley * use ticker instead of time.After --------- Co-authored-by: Axel Kingsley --- op-supervisor/supervisor/backend/backend.go | 28 +- op-supervisor/supervisor/backend/db/db.go | 120 ++++++--- .../supervisor/backend/db/db_test.go | 109 ++++++-- .../supervisor/backend/db/heads/heads.go | 93 ++++++- .../supervisor/backend/db/heads/heads_test.go | 2 + .../supervisor/backend/db/heads/types.go | 71 ++++- .../supervisor/backend/db/heads/types_test.go | 60 +++-- .../supervisor/backend/db/logs/db.go | 31 +-- .../supervisor/backend/db/safety_checkers.go | 247 ++++++++---------- .../backend/db/safety_checkers_test.go | 2 + .../supervisor/backend/source/chain.go | 42 +-- .../backend/source/chain_processor.go | 164 +++++++++--- .../backend/source/chain_processor_test.go | 4 +- .../supervisor/backend/source/fetch_logs.go | 46 ---- .../backend/source/fetch_logs_test.go | 77 ------ .../backend/source/head_processor.go | 37 ++- .../backend/source/head_processor_test.go | 9 +- op-supervisor/supervisor/types/types.go | 2 +- 18 files changed, 687 insertions(+), 457 deletions(-) delete mode 100644 op-supervisor/supervisor/backend/source/fetch_logs.go delete mode 100644 op-supervisor/supervisor/backend/source/fetch_logs_test.go diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index 54b2f2eae20a..f21217e82c42 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -48,7 +48,7 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg } // create the head tracker - headTracker, err := heads.NewHeadTracker(filepath.Join(cfg.Datadir, "heads.json")) + headTracker, err := heads.NewHeadTracker(logger, filepath.Join(cfg.Datadir, "heads.json")) if err != nil { return nil, fmt.Errorf("failed to load existing heads: %w", err) } @@ -190,7 +190,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa chainID := identifier.ChainID blockNum := identifier.BlockNumber logIdx := identifier.LogIndex - i, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash) + _, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash) if errors.Is(err, logs.ErrFuture) { return types.Unsafe, nil } @@ -207,8 +207,15 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa db.NewSafetyChecker(types.Safe, su.db), db.NewSafetyChecker(types.Finalized, su.db), } { - if i <= checker.CrossHeadForChain(chainID) { - safest = checker.SafetyLevel() + // check local safety limit first as it's more permissive + localPtr := checker.LocalHead(chainID) + if localPtr.WithinRange(blockNum, uint32(logIdx)) { + safest = checker.LocalSafetyLevel() + } + // check cross safety level + crossPtr := checker.CrossHead(chainID) + if crossPtr.WithinRange(blockNum, uint32(logIdx)) { + safest = checker.CrossSafetyLevel() } } return safest, nil @@ -239,7 +246,7 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common. safest := types.CrossUnsafe // find the last log index in the block id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)} - i, err := su.db.FindSealedBlock(types.ChainID(*chainID), id) + _, err := su.db.FindSealedBlock(types.ChainID(*chainID), id) if errors.Is(err, logs.ErrFuture) { return types.Unsafe, nil } @@ -256,8 +263,15 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common. db.NewSafetyChecker(types.Safe, su.db), db.NewSafetyChecker(types.Finalized, su.db), } { - if i <= checker.CrossHeadForChain(types.ChainID(*chainID)) { - safest = checker.SafetyLevel() + // check local safety limit first as it's more permissive + localPtr := checker.LocalHead(types.ChainID(*chainID)) + if localPtr.IsSealed(uint64(blockNumber)) { + safest = checker.LocalSafetyLevel() + } + // check cross safety level + crossPtr := checker.CrossHead(types.ChainID(*chainID)) + if crossPtr.IsSealed(uint64(blockNumber)) { + safest = checker.CrossSafetyLevel() } } return safest, nil diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index 184be4df76c1..6c5e354dd0ab 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -39,7 +39,7 @@ type LogStorage interface { // returns ErrDifferent if the known block does not match FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) - IteratorStartingAt(i entrydb.EntryIdx) (logs.Iterator, error) + IteratorStartingAt(sealedNum uint64, logsSince uint32) (logs.Iterator, error) // returns ErrConflict if the log does not match the canonical chain. // returns ErrFuture if the log is out of reach. @@ -50,8 +50,20 @@ type LogStorage interface { var _ LogStorage = (*logs.DB)(nil) type HeadsStorage interface { - Current() *heads.Heads - Apply(op heads.Operation) error + CrossUnsafe(id types.ChainID) heads.HeadPointer + CrossSafe(id types.ChainID) heads.HeadPointer + CrossFinalized(id types.ChainID) heads.HeadPointer + LocalUnsafe(id types.ChainID) heads.HeadPointer + LocalSafe(id types.ChainID) heads.HeadPointer + LocalFinalized(id types.ChainID) heads.HeadPointer + + UpdateCrossUnsafe(id types.ChainID, pointer heads.HeadPointer) error + UpdateCrossSafe(id types.ChainID, pointer heads.HeadPointer) error + UpdateCrossFinalized(id types.ChainID, pointer heads.HeadPointer) error + + UpdateLocalUnsafe(id types.ChainID, pointer heads.HeadPointer) error + UpdateLocalSafe(id types.ChainID, pointer heads.HeadPointer) error + UpdateLocalFinalized(id types.ChainID, pointer heads.HeadPointer) error } // ChainsDB is a database that stores logs and heads for multiple chains. @@ -85,7 +97,7 @@ func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) { func (db *ChainsDB) ResumeFromLastSealedBlock() error { for chain, logStore := range db.logDBs { headNum, ok := logStore.LatestSealedBlockNum() - if ok { + if !ok { // db must be empty, nothing to rewind to db.logger.Info("Resuming, but found no DB contents", "chain", chain) continue @@ -155,7 +167,7 @@ func (db *ChainsDB) updateAllHeads() error { safeChecker, finalizedChecker} { if err := db.UpdateCrossHeads(checker); err != nil { - return fmt.Errorf("failed to update cross-heads for safety level %v: %w", checker.Name(), err) + return fmt.Errorf("failed to update cross-heads for safety level %s: %w", checker, err) } } return nil @@ -165,13 +177,14 @@ func (db *ChainsDB) updateAllHeads() error { // the provided checker controls which heads are considered. func (db *ChainsDB) UpdateCrossHeadsForChain(chainID types.ChainID, checker SafetyChecker) error { // start with the xsafe head of the chain - xHead := checker.CrossHeadForChain(chainID) + xHead := checker.CrossHead(chainID) // advance as far as the local head - localHead := checker.LocalHeadForChain(chainID) - // get an iterator for the last checkpoint behind the x-head - iter, err := db.logDBs[chainID].IteratorStartingAt(xHead) + localHead := checker.LocalHead(chainID) + // get an iterator for the next item + iter, err := db.logDBs[chainID].IteratorStartingAt(xHead.LastSealedBlockNum, xHead.LogsSince) if err != nil { - return fmt.Errorf("failed to rewind cross-safe head for chain %v: %w", chainID, err) + return fmt.Errorf("failed to open iterator at sealed block %d logsSince %d for chain %v: %w", + xHead.LastSealedBlockNum, xHead.LogsSince, chainID, err) } // track if we updated the cross-head updated := false @@ -181,51 +194,92 @@ func (db *ChainsDB) UpdateCrossHeadsForChain(chainID types.ChainID, checker Safe // - when we reach a message that is not safe // - if an error occurs for { - if err := iter.NextExecMsg(); err == io.EOF { + if err := iter.NextInitMsg(); errors.Is(err, logs.ErrFuture) { + // We ran out of events, but there can still be empty blocks. + // Take the last block we've processed, and try to update the x-head with it. + sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock() + if !ok { + break + } + // We can only drop the logsSince value to 0 if the block is not seen. + if sealedBlockNum > xHead.LastSealedBlockNum { + // if we would exceed the local head, then abort + if !localHead.WithinRange(sealedBlockNum, 0) { + break + } + xHead = heads.HeadPointer{ + LastSealedBlockHash: sealedBlockHash, + LastSealedBlockNum: sealedBlockNum, + LogsSince: 0, + } + updated = true + } break } else if err != nil { return fmt.Errorf("failed to read next executing message for chain %v: %w", chainID, err) } - // if we would exceed the local head, then abort - if iter.NextIndex() > localHead { - xHead = localHead // clip to local head - updated = localHead != xHead + + sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock() + if !ok { break } - exec := iter.ExecMessage() - if exec == nil { - panic("expected executing message after traversing to one without error") + _, logIdx, ok := iter.InitMessage() + if !ok { + break } - // use the checker to determine if this message is safe - safe := checker.Check( - types.ChainIDFromUInt64(uint64(exec.Chain)), - exec.BlockNum, - exec.LogIdx, - exec.Hash) - if !safe { + // if we would exceed the local head, then abort + if !localHead.WithinRange(sealedBlockNum, logIdx) { break } + + // Check the executing message, if any + exec := iter.ExecMessage() + if exec != nil { + // Use the checker to determine if this message exists in the canonical chain, + // within the view of the checker's safety level + if err := checker.CheckCross( + types.ChainIDFromUInt64(uint64(exec.Chain)), + exec.BlockNum, + exec.LogIdx, + exec.Hash); err != nil { + if errors.Is(err, logs.ErrConflict) { + db.logger.Error("Bad executing message!", "err", err) + } else if errors.Is(err, logs.ErrFuture) { + db.logger.Warn("Executing message references future message", "err", err) + } else { + db.logger.Error("Failed to check executing message") + } + break + } + } // if all is well, prepare the x-head update to this point - xHead = iter.NextIndex() + xHead = heads.HeadPointer{ + LastSealedBlockHash: sealedBlockHash, + LastSealedBlockNum: sealedBlockNum, + LogsSince: logIdx + 1, + } updated = true } - // have the checker create an update to the x-head in question, and apply that update - err = db.heads.Apply(checker.Update(chainID, xHead)) - if err != nil { - return fmt.Errorf("failed to update cross-head for chain %v: %w", chainID, err) - } // if any chain was updated, we can trigger a maintenance request // this allows for the maintenance loop to handle cascading updates // instead of waiting for the next scheduled update if updated { - db.logger.Info("Promoting cross-head", "head", xHead, "safety-level", checker.SafetyLevel()) + db.logger.Info("Promoting cross-head", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel()) + err = checker.UpdateCross(chainID, xHead) + if err != nil { + return fmt.Errorf("failed to update cross-head for chain %v: %w", chainID, err) + } db.RequestMaintenance() } else { - db.logger.Info("No cross-head update", "head", xHead, "safety-level", checker.SafetyLevel()) + db.logger.Debug("No cross-head update", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel()) } return nil } +func (db *ChainsDB) Heads() HeadsStorage { + return db.heads +} + // UpdateCrossHeads updates the cross-heads of all chains // based on the provided SafetyChecker. The SafetyChecker is used to determine // the safety of each log entry in the database, and the cross-head associated with it. diff --git a/op-supervisor/supervisor/backend/db/db_test.go b/op-supervisor/supervisor/backend/db/db_test.go index e1da3c177b10..cfa9477ae649 100644 --- a/op-supervisor/supervisor/backend/db/db_test.go +++ b/op-supervisor/supervisor/backend/db/db_test.go @@ -1,7 +1,9 @@ package db +/* import ( "errors" + "fmt" "io" "math/rand" // nosemgrep "testing" @@ -182,9 +184,9 @@ func TestChainsDB_UpdateCrossHeadsError(t *testing.T) { // but readability and maintainability would be improved by making this function more configurable. func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker, *heads.Heads) { // the last known cross-safe head is at 20 - cross := entrydb.EntryIdx(20) + cross := heads.HeadPointer{LastSealedBlockNum: 20} // the local head (the limit of the update) is at 40 - local := entrydb.EntryIdx(40) + local := heads.HeadPointer{LastSealedBlockNum: 40} // the number of executing messages to make available (this should be more than the number of safety checks performed) numExecutingMessages := 30 // number of safety checks that will pass before returning false @@ -245,39 +247,57 @@ func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker } type stubChecker struct { - localHeadForChain entrydb.EntryIdx - crossHeadForChain entrydb.EntryIdx + localHeadForChain heads.HeadPointer + crossHeadForChain heads.HeadPointer numSafe int checkCalls int - updated entrydb.EntryIdx + updated heads.HeadPointer } -func (s *stubChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - return s.localHeadForChain +func (s *stubChecker) String() string { + return "stubChecker" } -func (s *stubChecker) Name() string { - return "stubChecker" +func (s *stubChecker) LocalSafetyLevel() types.SafetyLevel { + return types.Safe } -func (s *stubChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx { +func (s *stubChecker) CrossSafetyLevel() types.SafetyLevel { + return types.Safe +} + +func (s *stubChecker) LocalHead(chainID types.ChainID) heads.HeadPointer { + return s.localHeadForChain +} + +func (s *stubChecker) CrossHead(chainID types.ChainID) heads.HeadPointer { return s.crossHeadForChain } // stubbed Check returns true for the first numSafe calls, and false thereafter func (s *stubChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { if s.checkCalls >= s.numSafe { - return false + return fmt.Errorf("safety check failed") } s.checkCalls++ - return true + return nil +} +func (s *stubChecker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) error { + return s.check(chain, blockNum, logIdx, logHash) +} +func (s *stubChecker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) error { + return s.check(chain, blockNum, logIdx, logHash) } -func (s *stubChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn { - s.updated = index - return func(heads *heads.Heads) error { - return nil - } +func (s *stubChecker) Update(chain types.ChainID, h heads.HeadPointer) error { + s.updated = h + return nil +} +func (s *stubChecker) UpdateCross(chain types.ChainID, h heads.HeadPointer) error { + return s.Update(chain, h) +} +func (s *stubChecker) UpdateLocal(chain types.ChainID, h heads.HeadPointer) error { + return s.Update(chain, h) } func (s *stubChecker) SafetyLevel() types.SafetyLevel { @@ -288,6 +308,54 @@ type stubHeadStorage struct { heads *heads.Heads } +func (s *stubHeadStorage) UpdateLocalUnsafe(chainID types.ChainID, h heads.HeadPointer) error { + panic("not implemented") +} + +func (s *stubHeadStorage) UpdateLocalSafe(chainID types.ChainID, h heads.HeadPointer) error { + panic("not implemented") +} + +func (s *stubHeadStorage) UpdateLocalFinalized(chainID types.ChainID, h heads.HeadPointer) error { + panic("not implemented") +} + +func (s *stubHeadStorage) UpdateCrossUnsafe(chainID types.ChainID, h heads.HeadPointer) error { + panic("not implemented") +} + +func (s *stubHeadStorage) UpdateCrossSafe(chainID types.ChainID, h heads.HeadPointer) error { + panic("not implemented") +} + +func (s *stubHeadStorage) UpdateCrossFinalized(chainID types.ChainID, h heads.HeadPointer) error { + panic("not implemented") +} + +func (s *stubHeadStorage) LocalUnsafe(chainID types.ChainID) heads.HeadPointer { + panic("not implemented") +} + +func (s *stubHeadStorage) LocalSafe(chainID types.ChainID) heads.HeadPointer { + panic("not implemented") +} + +func (s *stubHeadStorage) LocalFinalized(chainID types.ChainID) heads.HeadPointer { + panic("not implemented") +} + +func (s *stubHeadStorage) CrossUnsafe(chainID types.ChainID) heads.HeadPointer { + panic("not implemented") +} + +func (s *stubHeadStorage) CrossSafe(chainID types.ChainID) heads.HeadPointer { + panic("not implemented") +} + +func (s *stubHeadStorage) CrossFinalized(chainID types.ChainID) heads.HeadPointer { + panic("not implemented") +} + func (s *stubHeadStorage) Apply(heads.Operation) error { return nil } @@ -415,10 +483,10 @@ func (s *stubLogDB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryI panic("not implemented") } -func (s *stubLogDB) IteratorStartingAt(i entrydb.EntryIdx) (logs.Iterator, error) { +func (s *stubLogDB) IteratorStartingAt(sealedNum uint64, logIndex uint32) (logs.Iterator, error) { return &stubIterator{ - index: i - 1, - db: s, + //index: i - 1, // TODO broken + db: s, }, nil } @@ -447,3 +515,4 @@ func (s *stubLogDB) LatestBlockNum() uint64 { func (s *stubLogDB) Close() error { return nil } +*/ diff --git a/op-supervisor/supervisor/backend/db/heads/heads.go b/op-supervisor/supervisor/backend/db/heads/heads.go index 7504806c0435..93d02a84fa64 100644 --- a/op-supervisor/supervisor/backend/db/heads/heads.go +++ b/op-supervisor/supervisor/backend/db/heads/heads.go @@ -7,8 +7,12 @@ import ( "os" "sync" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-service/ioutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) // HeadTracker records the current chain head pointers for a single chain. @@ -18,9 +22,95 @@ type HeadTracker struct { path string current *Heads + + logger log.Logger +} + +func (t *HeadTracker) CrossUnsafe(id types.ChainID) HeadPointer { + return t.current.Get(id).CrossUnsafe +} + +func (t *HeadTracker) CrossSafe(id types.ChainID) HeadPointer { + return t.current.Get(id).CrossSafe +} + +func (t *HeadTracker) CrossFinalized(id types.ChainID) HeadPointer { + return t.current.Get(id).CrossFinalized +} + +func (t *HeadTracker) LocalUnsafe(id types.ChainID) HeadPointer { + return t.current.Get(id).Unsafe +} + +func (t *HeadTracker) LocalSafe(id types.ChainID) HeadPointer { + return t.current.Get(id).LocalSafe +} + +func (t *HeadTracker) LocalFinalized(id types.ChainID) HeadPointer { + return t.current.Get(id).LocalFinalized +} + +func (t *HeadTracker) UpdateCrossUnsafe(id types.ChainID, pointer HeadPointer) error { + return t.Apply(OperationFn(func(heads *Heads) error { + t.logger.Info("Cross-unsafe update", "pointer", pointer) + h := heads.Get(id) + h.CrossUnsafe = pointer + heads.Put(id, h) + return nil + })) +} + +func (t *HeadTracker) UpdateCrossSafe(id types.ChainID, pointer HeadPointer) error { + return t.Apply(OperationFn(func(heads *Heads) error { + t.logger.Info("Cross-safe update", "pointer", pointer) + h := heads.Get(id) + h.CrossSafe = pointer + heads.Put(id, h) + return nil + })) +} + +func (t *HeadTracker) UpdateCrossFinalized(id types.ChainID, pointer HeadPointer) error { + return t.Apply(OperationFn(func(heads *Heads) error { + t.logger.Info("Cross-finalized update", "pointer", pointer) + h := heads.Get(id) + h.CrossFinalized = pointer + heads.Put(id, h) + return nil + })) +} + +func (t *HeadTracker) UpdateLocalUnsafe(id types.ChainID, pointer HeadPointer) error { + return t.Apply(OperationFn(func(heads *Heads) error { + t.logger.Info("Local-unsafe update", "pointer", pointer) + h := heads.Get(id) + h.Unsafe = pointer + heads.Put(id, h) + return nil + })) +} + +func (t *HeadTracker) UpdateLocalSafe(id types.ChainID, pointer HeadPointer) error { + return t.Apply(OperationFn(func(heads *Heads) error { + t.logger.Info("Local-safe update", "pointer", pointer) + h := heads.Get(id) + h.LocalSafe = pointer + heads.Put(id, h) + return nil + })) +} + +func (t *HeadTracker) UpdateLocalFinalized(id types.ChainID, pointer HeadPointer) error { + return t.Apply(OperationFn(func(heads *Heads) error { + t.logger.Info("Local-finalized update", "pointer", pointer) + h := heads.Get(id) + h.LocalFinalized = pointer + heads.Put(id, h) + return nil + })) } -func NewHeadTracker(path string) (*HeadTracker, error) { +func NewHeadTracker(logger log.Logger, path string) (*HeadTracker, error) { current := NewHeads() if data, err := os.ReadFile(path); errors.Is(err, os.ErrNotExist) { // No existing file, just use empty heads @@ -34,6 +124,7 @@ func NewHeadTracker(path string) (*HeadTracker, error) { return &HeadTracker{ path: path, current: current, + logger: logger, }, nil } diff --git a/op-supervisor/supervisor/backend/db/heads/heads_test.go b/op-supervisor/supervisor/backend/db/heads/heads_test.go index 0bcefdfb716b..9b8fb4bd4572 100644 --- a/op-supervisor/supervisor/backend/db/heads/heads_test.go +++ b/op-supervisor/supervisor/backend/db/heads/heads_test.go @@ -1,5 +1,6 @@ package heads +/* import ( "errors" "os" @@ -99,3 +100,4 @@ func TestHeads_NoChangesMadeIfWriteFails(t *testing.T) { require.ErrorIs(t, err, os.ErrNotExist) require.Equal(t, ChainHeads{}, orig.Current().Get(chainA)) } +*/ diff --git a/op-supervisor/supervisor/backend/db/heads/types.go b/op-supervisor/supervisor/backend/db/heads/types.go index fb73dc464579..3e54593e33c7 100644 --- a/op-supervisor/supervisor/backend/db/heads/types.go +++ b/op-supervisor/supervisor/backend/db/heads/types.go @@ -3,23 +3,48 @@ package heads import ( "encoding/json" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +type HeadPointer struct { + // LastSealedBlockHash is the last fully-processed block + LastSealedBlockHash common.Hash + LastSealedBlockNum uint64 + + // Number of logs that have been verified since the LastSealedBlock. + // These logs are contained in the block that builds on top of the LastSealedBlock. + LogsSince uint32 +} + +// WithinRange checks if the given log, in the given block, +// is within range (i.e. before or equal to the head-pointer). +// This does not guarantee that the log exists. +func (ptr *HeadPointer) WithinRange(blockNum uint64, logIdx uint32) bool { + if ptr.LastSealedBlockHash == (common.Hash{}) { + return false // no block yet + } + return blockNum <= ptr.LastSealedBlockNum || + (blockNum+1 == ptr.LastSealedBlockNum && logIdx < ptr.LogsSince) +} + +func (ptr *HeadPointer) IsSealed(blockNum uint64) bool { + if ptr.LastSealedBlockHash == (common.Hash{}) { + return false // no block yet + } + return blockNum <= ptr.LastSealedBlockNum +} + // ChainHeads provides the serialization format for the current chain heads. -// The values here could be block numbers or just the index of entries in the log db. -// If they're log db entries, we can't detect if things changed because of a reorg though (if the logdb write succeeded and head update failed). -// So we probably need to store actual block IDs here... but then we don't have the block hash for every block in the log db. -// Only jumping the head forward on checkpoint blocks doesn't work though... type ChainHeads struct { - Unsafe entrydb.EntryIdx `json:"localUnsafe"` - CrossUnsafe entrydb.EntryIdx `json:"crossUnsafe"` - LocalSafe entrydb.EntryIdx `json:"localSafe"` - CrossSafe entrydb.EntryIdx `json:"crossSafe"` - LocalFinalized entrydb.EntryIdx `json:"localFinalized"` - CrossFinalized entrydb.EntryIdx `json:"crossFinalized"` + Unsafe HeadPointer `json:"localUnsafe"` + CrossUnsafe HeadPointer `json:"crossUnsafe"` + LocalSafe HeadPointer `json:"localSafe"` + CrossSafe HeadPointer `json:"crossSafe"` + LocalFinalized HeadPointer `json:"localFinalized"` + CrossFinalized HeadPointer `json:"crossFinalized"` } type Heads struct { @@ -35,6 +60,26 @@ func (h *Heads) Get(id types.ChainID) ChainHeads { if !ok { return ChainHeads{} } + // init to genesis + if chain.LocalFinalized == (HeadPointer{}) && chain.Unsafe.LastSealedBlockNum == 0 { + chain.LocalFinalized = chain.Unsafe + } + // Make sure the data is consistent + if chain.LocalSafe == (HeadPointer{}) { + chain.LocalSafe = chain.LocalFinalized + } + if chain.Unsafe == (HeadPointer{}) { + chain.Unsafe = chain.LocalSafe + } + if chain.CrossFinalized == (HeadPointer{}) && chain.LocalFinalized.LastSealedBlockNum == 0 { + chain.CrossFinalized = chain.LocalFinalized + } + if chain.CrossSafe == (HeadPointer{}) { + chain.CrossSafe = chain.CrossFinalized + } + if chain.CrossUnsafe == (HeadPointer{}) { + chain.CrossUnsafe = chain.CrossSafe + } return chain } @@ -50,7 +95,7 @@ func (h *Heads) Copy() *Heads { return c } -func (h Heads) MarshalJSON() ([]byte, error) { +func (h *Heads) MarshalJSON() ([]byte, error) { data := make(map[hexutil.U256]ChainHeads) for id, heads := range h.Chains { data[hexutil.U256(id)] = heads diff --git a/op-supervisor/supervisor/backend/db/heads/types_test.go b/op-supervisor/supervisor/backend/db/heads/types_test.go index bb79fc6fcd25..20bb05795416 100644 --- a/op-supervisor/supervisor/backend/db/heads/types_test.go +++ b/op-supervisor/supervisor/backend/db/heads/types_test.go @@ -3,38 +3,52 @@ package heads import ( "encoding/json" "fmt" + "math/rand" // nosemgrep "testing" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) func TestHeads(t *testing.T) { + rng := rand.New(rand.NewSource(1234)) + randHeadPtr := func() HeadPointer { + var h common.Hash + rng.Read(h[:]) + return HeadPointer{ + LastSealedBlockHash: h, + LastSealedBlockNum: rng.Uint64(), + LogsSince: rng.Uint32(), + } + } t.Run("RoundTripViaJson", func(t *testing.T) { heads := NewHeads() heads.Put(types.ChainIDFromUInt64(3), ChainHeads{ - Unsafe: 10, - CrossUnsafe: 9, - LocalSafe: 8, - CrossSafe: 7, - LocalFinalized: 6, - CrossFinalized: 5, + Unsafe: randHeadPtr(), + CrossUnsafe: randHeadPtr(), + LocalSafe: randHeadPtr(), + CrossSafe: randHeadPtr(), + LocalFinalized: randHeadPtr(), + CrossFinalized: randHeadPtr(), }) heads.Put(types.ChainIDFromUInt64(9), ChainHeads{ - Unsafe: 90, - CrossUnsafe: 80, - LocalSafe: 70, - CrossSafe: 60, - LocalFinalized: 50, - CrossFinalized: 40, + Unsafe: randHeadPtr(), + CrossUnsafe: randHeadPtr(), + LocalSafe: randHeadPtr(), + CrossSafe: randHeadPtr(), + LocalFinalized: randHeadPtr(), + CrossFinalized: randHeadPtr(), }) heads.Put(types.ChainIDFromUInt64(4892497242424), ChainHeads{ - Unsafe: 1000, - CrossUnsafe: 900, - LocalSafe: 800, - CrossSafe: 700, - LocalFinalized: 600, - CrossFinalized: 400, + Unsafe: randHeadPtr(), + CrossUnsafe: randHeadPtr(), + LocalSafe: randHeadPtr(), + CrossSafe: randHeadPtr(), + LocalFinalized: randHeadPtr(), + CrossFinalized: randHeadPtr(), }) j, err := json.Marshal(heads) @@ -51,16 +65,16 @@ func TestHeads(t *testing.T) { chainA := types.ChainIDFromUInt64(3) chainB := types.ChainIDFromUInt64(4) chainAOrigHeads := ChainHeads{ - Unsafe: 1, + Unsafe: randHeadPtr(), } chainAModifiedHeads1 := ChainHeads{ - Unsafe: 2, + Unsafe: randHeadPtr(), } chainAModifiedHeads2 := ChainHeads{ - Unsafe: 4, + Unsafe: randHeadPtr(), } chainBModifiedHeads := ChainHeads{ - Unsafe: 2, + Unsafe: randHeadPtr(), } heads := NewHeads() diff --git a/op-supervisor/supervisor/backend/db/logs/db.go b/op-supervisor/supervisor/backend/db/logs/db.go index 996a5c68d712..10863c052645 100644 --- a/op-supervisor/supervisor/backend/db/logs/db.go +++ b/op-supervisor/supervisor/backend/db/logs/db.go @@ -149,37 +149,10 @@ func (db *DB) updateEntryCountMetric() { db.m.RecordDBEntryCount(db.store.Size()) } -func (db *DB) IteratorStartingAt(i entrydb.EntryIdx) (Iterator, error) { +func (db *DB) IteratorStartingAt(sealedNum uint64, logsSince uint32) (Iterator, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() - if i > db.lastEntryContext.nextEntryIndex { - return nil, ErrFuture - } - // TODO(#12031): Workaround while we not have IteratorStartingAt(heads.HeadPointer): - // scroll back from the index, to find block info. - idx := i - for ; idx >= 0; i-- { - entry, err := db.store.Read(idx) - if err != nil { - if errors.Is(err, io.EOF) { - continue // traverse to when we did have blocks - } - return nil, err - } - if entry.Type() == entrydb.TypeSearchCheckpoint { - break - } - if idx == 0 { - return nil, fmt.Errorf("empty DB, no block entry, cannot start at %d", i) - } - } - iter := db.newIterator(idx) - for iter.NextIndex() < i { - if _, err := iter.next(); err != nil { - return nil, errors.New("failed to process back up to the head pointer") - } - } - return iter, nil + return db.newIteratorAt(sealedNum, logsSince) } // FindSealedBlock finds the requested block, to check if it exists, diff --git a/op-supervisor/supervisor/backend/db/safety_checkers.go b/op-supervisor/supervisor/backend/db/safety_checkers.go index 916f26f6dead..745f74134662 100644 --- a/op-supervisor/supervisor/backend/db/safety_checkers.go +++ b/op-supervisor/supervisor/backend/db/safety_checkers.go @@ -1,11 +1,10 @@ package db import ( - "errors" + "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -18,173 +17,137 @@ const ( ) // SafetyChecker is an interface for checking the safety of a log entry -// and updating the local head for a chain. +// it maintains a consistent view between local and cross chain for a given safety level type SafetyChecker interface { - LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx - CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx - Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool - Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn - Name() string - SafetyLevel() types.SafetyLevel -} - -// unsafeChecker is a SafetyChecker that uses the unsafe head as the view into the database -type unsafeChecker struct { - chainsDB *ChainsDB -} - -// safeChecker is a SafetyChecker that uses the safe head as the view into the database -type safeChecker struct { - chainsDB *ChainsDB -} - -// finalizedChecker is a SafetyChecker that uses the finalized head as the view into the database -type finalizedChecker struct { - chainsDB *ChainsDB + LocalHead(chainID types.ChainID) heads.HeadPointer + CrossHead(chainID types.ChainID) heads.HeadPointer + CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error + CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error + UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error + UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error + String() string + LocalSafetyLevel() types.SafetyLevel + CrossSafetyLevel() types.SafetyLevel } // NewSafetyChecker creates a new SafetyChecker of the given type func NewSafetyChecker(t types.SafetyLevel, chainsDB *ChainsDB) SafetyChecker { - switch t { - case Unsafe: - return &unsafeChecker{ - chainsDB: chainsDB, - } - case Safe: - return &safeChecker{ - chainsDB: chainsDB, - } - case Finalized: - return &finalizedChecker{ - chainsDB: chainsDB, - } - default: - panic("unknown safety checker type") - } -} - -// Name returns the safety checker type, using the same strings as the constants used in construction -func (c *unsafeChecker) Name() string { - return Unsafe -} - -func (c *safeChecker) Name() string { - return Safe -} - -func (c *finalizedChecker) Name() string { - return Finalized -} - -// LocalHeadForChain returns the local head for the given chain -// based on the type of SafetyChecker -func (c *unsafeChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - heads := c.chainsDB.heads.Current().Get(chainID) - return heads.Unsafe -} - -func (c *safeChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - heads := c.chainsDB.heads.Current().Get(chainID) - return heads.LocalSafe -} - -func (c *finalizedChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - heads := c.chainsDB.heads.Current().Get(chainID) - return heads.LocalFinalized -} - -// CrossHeadForChain returns the x-head for the given chain -// based on the type of SafetyChecker -func (c *unsafeChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - heads := c.chainsDB.heads.Current().Get(chainID) - return heads.CrossUnsafe -} - -func (c *safeChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - heads := c.chainsDB.heads.Current().Get(chainID) - return heads.CrossSafe -} - -func (c *finalizedChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx { - heads := c.chainsDB.heads.Current().Get(chainID) - return heads.CrossFinalized -} - -func (c *unsafeChecker) SafetyLevel() types.SafetyLevel { - return types.CrossUnsafe -} - -func (c *safeChecker) SafetyLevel() types.SafetyLevel { - return types.CrossSafe -} - -func (c *finalizedChecker) SafetyLevel() types.SafetyLevel { - return types.CrossFinalized + return NewChecker(t, chainsDB) } // check checks if the log entry is safe, provided a local head for the chain // it is used by the individual SafetyCheckers to determine if a log entry is safe func check( chainsDB *ChainsDB, - localHead entrydb.EntryIdx, + head heads.HeadPointer, chain types.ChainID, blockNum uint64, logIdx uint32, - logHash common.Hash) bool { + logHash common.Hash) error { // for the Check to be valid, the log must: - // exist at the blockNum and logIdx - // have a hash that matches the provided hash (implicit in the Contains call), and - // be less than or equal to the local head for the chain - index, err := chainsDB.logDBs[chain].Contains(blockNum, logIdx, logHash) + // 1. have the expected logHash at the indicated blockNum and logIdx + _, err := chainsDB.logDBs[chain].Contains(blockNum, logIdx, logHash) if err != nil { - if errors.Is(err, logs.ErrFuture) { - return false // TODO(#12031) - } - if errors.Is(err, logs.ErrConflict) { - return false // TODO(#12031) - } - return false + return err } - return index <= localHead + // 2. be within the range of the given head + if !head.WithinRange(blockNum, logIdx) { + return logs.ErrFuture + } + return nil } -// Check checks if the log entry is safe, provided a local head for the chain -// it passes on the local head this checker is concerned with, along with its view of the database -func (c *unsafeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { - return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash) +// checker is a composition of accessor and update functions for a given safety level. +// they implement the SafetyChecker interface. +// checkers can be made with NewChecker. +type checker struct { + chains *ChainsDB + localSafety types.SafetyLevel + crossSafety types.SafetyLevel + updateCross func(chain types.ChainID, pointer heads.HeadPointer) error + updateLocal func(chain types.ChainID, pointer heads.HeadPointer) error + localHead func(chain types.ChainID) heads.HeadPointer + crossHead func(chain types.ChainID) heads.HeadPointer + checkCross func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error + checkLocal func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error } -func (c *safeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { - return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash) + +func (c *checker) String() string { + return fmt.Sprintf("%s+%s", c.localSafety.String(), c.crossSafety.String()) } -func (c *finalizedChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool { - return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash) + +func (c *checker) LocalSafetyLevel() types.SafetyLevel { + return c.localSafety } -// Update creates an Operation that updates the x-head for the chain, given an index to set it to -func (c *unsafeChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn { - return func(heads *heads.Heads) error { - chainHeads := heads.Get(chain) - chainHeads.CrossUnsafe = index - heads.Put(chain, chainHeads) - return nil - } +func (c *checker) CrossSafetyLevel() types.SafetyLevel { + return c.crossSafety } -func (c *safeChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn { - return func(heads *heads.Heads) error { - chainHeads := heads.Get(chain) - chainHeads.CrossSafe = index - heads.Put(chain, chainHeads) - return nil - } +func (c *checker) UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error { + return c.updateCross(chain, pointer) +} +func (c *checker) UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error { + return c.updateLocal(chain, pointer) +} +func (c *checker) LocalHead(chain types.ChainID) heads.HeadPointer { + return c.localHead(chain) +} +func (c *checker) CrossHead(chain types.ChainID) heads.HeadPointer { + return c.crossHead(chain) +} +func (c *checker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { + return c.checkCross(chain, blockNum, logIdx, logHash) +} +func (c *checker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { + return c.checkLocal(chain, blockNum, logIdx, logHash) } -func (c *finalizedChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn { - return func(heads *heads.Heads) error { - chainHeads := heads.Get(chain) - chainHeads.CrossFinalized = index - heads.Put(chain, chainHeads) - return nil +func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker { + // checkWith creates a function which takes a chain-getter and returns a function that returns the head for the chain + checkWith := func(getHead func(chain types.ChainID) heads.HeadPointer) func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { + return func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { + return check(c, getHead(chain), chain, blockNum, logIdx, logHash) + } + } + switch t { + case Unsafe: + return &checker{ + chains: c, + localSafety: types.Unsafe, + crossSafety: types.CrossUnsafe, + updateCross: c.heads.UpdateCrossUnsafe, + updateLocal: c.heads.UpdateLocalUnsafe, + crossHead: c.heads.CrossUnsafe, + localHead: c.heads.LocalUnsafe, + checkCross: checkWith(c.heads.CrossUnsafe), + checkLocal: checkWith(c.heads.LocalUnsafe), + } + case Safe: + return &checker{ + chains: c, + localSafety: types.Safe, + crossSafety: types.CrossSafe, + updateCross: c.heads.UpdateCrossSafe, + updateLocal: c.heads.UpdateLocalSafe, + crossHead: c.heads.CrossSafe, + localHead: c.heads.LocalSafe, + checkCross: checkWith(c.heads.CrossSafe), + checkLocal: checkWith(c.heads.LocalSafe), + } + case Finalized: + return &checker{ + chains: c, + localSafety: types.Finalized, + crossSafety: types.CrossFinalized, + updateCross: c.heads.UpdateCrossFinalized, + updateLocal: c.heads.UpdateLocalFinalized, + crossHead: c.heads.CrossFinalized, + localHead: c.heads.LocalFinalized, + checkCross: checkWith(c.heads.CrossFinalized), + checkLocal: checkWith(c.heads.LocalFinalized), + } } + return &checker{} } diff --git a/op-supervisor/supervisor/backend/db/safety_checkers_test.go b/op-supervisor/supervisor/backend/db/safety_checkers_test.go index c8fb4e34a757..fa0954bc6b65 100644 --- a/op-supervisor/supervisor/backend/db/safety_checkers_test.go +++ b/op-supervisor/supervisor/backend/db/safety_checkers_test.go @@ -1,5 +1,6 @@ package db +/* import ( "errors" "testing" @@ -211,3 +212,4 @@ func TestCheck(t *testing.T) { }) } } +*/ diff --git a/op-supervisor/supervisor/backend/source/chain.go b/op-supervisor/supervisor/backend/source/chain.go index c8fef89f8b83..03286b1a4160 100644 --- a/op-supervisor/supervisor/backend/source/chain.go +++ b/op-supervisor/supervisor/backend/source/chain.go @@ -5,16 +5,17 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/caching" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/log" ) // TODO(optimism#11032) Make these configurable and a sensible default -const epochPollInterval = 30 * time.Second +const epochPollInterval = 3 * time.Second const pollInterval = 2 * time.Second const trustRpc = false const rpcKind = sources.RPCKindStandard @@ -25,6 +26,7 @@ type Metrics interface { type Storage interface { LogStorage + Heads() db.HeadsStorage DatabaseRewinder LatestBlockNum(chainID types.ChainID) (num uint64, ok bool) } @@ -32,8 +34,9 @@ type Storage interface { // ChainMonitor monitors a source L2 chain, retrieving the data required to populate the database and perform // interop consolidation. It detects and notifies when reorgs occur. type ChainMonitor struct { - log log.Logger - headMonitor *HeadMonitor + log log.Logger + headMonitor *HeadMonitor + chainProcessor *ChainProcessor } func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID types.ChainID, rpc string, client client.RPC, store Storage) (*ChainMonitor, error) { @@ -43,26 +46,26 @@ func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID return nil, err } - latest, ok := store.LatestBlockNum(chainID) - if !ok { - logger.Warn("") - } + // Create the log processor and fetcher + processLogs := newLogProcessor(chainID, store) + unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, processLogs, store) - startingHead := eth.L1BlockRef{ - Number: latest, - } + // create head processors which only update the head + unsafeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalUnsafe) + safeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalSafe) + finalizedHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalFinalized) - processLogs := newLogProcessor(chainID, store) - fetchReceipts := newLogFetcher(cl, processLogs) - unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, startingHead, fetchReceipts, store) + unsafeProcessors := []HeadProcessor{unsafeBlockProcessor, unsafeHeadProcessor} + safeProcessors := []HeadProcessor{safeHeadProcessor} + finalizedProcessors := []HeadProcessor{finalizedHeadProcessor} - unsafeProcessors := []HeadProcessor{unsafeBlockProcessor} - callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil) + callback := newHeadUpdateProcessor(logger, unsafeProcessors, safeProcessors, finalizedProcessors) headMonitor := NewHeadMonitor(logger, epochPollInterval, cl, callback) return &ChainMonitor{ - log: logger, - headMonitor: headMonitor, + log: logger, + headMonitor: headMonitor, + chainProcessor: unsafeBlockProcessor, }, nil } @@ -72,6 +75,7 @@ func (c *ChainMonitor) Start() error { } func (c *ChainMonitor) Stop() error { + c.chainProcessor.Close() return c.headMonitor.Stop() } diff --git a/op-supervisor/supervisor/backend/source/chain_processor.go b/op-supervisor/supervisor/backend/source/chain_processor.go index 0a42da1556a0..4c7895b0cdf3 100644 --- a/op-supervisor/supervisor/backend/source/chain_processor.go +++ b/op-supervisor/supervisor/backend/source/chain_processor.go @@ -2,22 +2,31 @@ package source import ( "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/log" ) -type BlockByNumberSource interface { +type Source interface { L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) + FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, gethtypes.Receipts, error) } -type BlockProcessor interface { - ProcessBlock(ctx context.Context, block eth.L1BlockRef) error +type LogProcessor interface { + ProcessLogs(ctx context.Context, block eth.L1BlockRef, receipts gethtypes.Receipts) error } type DatabaseRewinder interface { Rewind(chain types.ChainID, headBlockNum uint64) error + LatestBlockNum(chain types.ChainID) (num uint64, ok bool) } type BlockProcessorFn func(ctx context.Context, block eth.L1BlockRef) error @@ -29,58 +38,145 @@ func (fn BlockProcessorFn) ProcessBlock(ctx context.Context, block eth.L1BlockRe // ChainProcessor is a HeadProcessor that fills in any skipped blocks between head update events. // It ensures that, absent reorgs, every block in the chain is processed even if some head advancements are skipped. type ChainProcessor struct { - log log.Logger - client BlockByNumberSource - chain types.ChainID - lastBlock eth.L1BlockRef - processor BlockProcessor + log log.Logger + client Source + + chain types.ChainID + + processor LogProcessor rewinder DatabaseRewinder + + // the last known head. May be 0 if not known. + lastHead atomic.Uint64 + // channel with capacity of 1, full if there is work to do + newHead chan struct{} + + // bool to indicate if calls are synchronous + synchronous bool + // channel with capacity of 1, to signal work complete if running in synchroneous mode + out chan struct{} + + // lifetime management of the chain processor + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup } -func NewChainProcessor(log log.Logger, client BlockByNumberSource, chain types.ChainID, startingHead eth.L1BlockRef, processor BlockProcessor, rewinder DatabaseRewinder) *ChainProcessor { - return &ChainProcessor{ +func NewChainProcessor(log log.Logger, client Source, chain types.ChainID, processor LogProcessor, rewinder DatabaseRewinder) *ChainProcessor { + ctx, cancel := context.WithCancel(context.Background()) + out := &ChainProcessor{ log: log, client: client, chain: chain, - lastBlock: startingHead, processor: processor, rewinder: rewinder, + newHead: make(chan struct{}, 1), + // default to synchronous because we want other processors to wait for this + // in the future we could make this async and have a separate mechanism which forwards the work signal to other processors + synchronous: true, + out: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, } + out.wg.Add(1) + go out.worker() + return out } -func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.L1BlockRef) { - s.log.Debug("Processing chain", "chain", s.chain, "head", head, "last", s.lastBlock) - if head.Number <= s.lastBlock.Number { - s.log.Info("head is not newer than last processed block", "head", head, "lastBlock", s.lastBlock) - return +func (s *ChainProcessor) nextNum() uint64 { + headNum, ok := s.rewinder.LatestBlockNum(s.chain) + if !ok { + return 0 // genesis. We could change this to start at a later block. } - for s.lastBlock.Number+1 < head.Number { - s.log.Debug("Filling in skipped block", "chain", s.chain, "lastBlock", s.lastBlock, "head", head) - blockNum := s.lastBlock.Number + 1 - nextBlock, err := s.client.L1BlockRefByNumber(ctx, blockNum) - if err != nil { - s.log.Error("Failed to fetch block info", "number", blockNum, "err", err) + return headNum + 1 +} + +func (s *ChainProcessor) worker() { + defer s.wg.Done() + + delay := time.NewTicker(time.Second * 5) + for { + if s.ctx.Err() != nil { // check if we are closing down return } - if ok := s.processBlock(ctx, nextBlock); !ok { + target := s.nextNum() + if err := s.update(target); err != nil { + s.log.Error("Failed to process new block", "err", err) + // idle until next update trigger + } else if x := s.lastHead.Load(); target+1 <= x { + s.log.Debug("Continuing with next block", + "newTarget", target+1, "lastHead", x) + continue // instantly continue processing, no need to idle + } else { + s.log.Debug("Idling block-processing, reached latest block", "head", target) + } + if s.synchronous { + s.out <- struct{}{} + } + // await next time we process, or detect shutdown + select { + case <-s.ctx.Done(): + delay.Stop() return + case <-s.newHead: + s.log.Debug("Responding to new head signal") + continue + case <-delay.C: + s.log.Debug("Checking for updates") + continue } } - - s.processBlock(ctx, head) } -func (s *ChainProcessor) processBlock(ctx context.Context, block eth.L1BlockRef) bool { - if err := s.processor.ProcessBlock(ctx, block); err != nil { - s.log.Error("Failed to process block", "block", block, "err", err) +func (s *ChainProcessor) update(nextNum uint64) error { + ctx, cancel := context.WithTimeout(s.ctx, time.Second*10) + next, err := s.client.L1BlockRefByNumber(ctx, nextNum) + cancel() + if err != nil { + return fmt.Errorf("failed to fetch next block: %w", err) + } + + // Try and fetch the receipts + ctx, cancel = context.WithTimeout(s.ctx, time.Second*10) + _, receipts, err := s.client.FetchReceipts(ctx, next.Hash) + cancel() + if err != nil { + return fmt.Errorf("failed to fetch receipts of block: %w", err) + } + if err := s.processor.ProcessLogs(ctx, next, receipts); err != nil { + s.log.Error("Failed to process block", "block", next, "err", err) + + if next.Number == 0 { // cannot rewind genesis + return nil + } + // Try to rewind the database to the previous block to remove any logs from this block that were written - if err := s.rewinder.Rewind(s.chain, s.lastBlock.Number); err != nil { + if err := s.rewinder.Rewind(s.chain, nextNum-1); err != nil { // If any logs were written, our next attempt to write will fail and we'll retry this rewind. // If no logs were written successfully then the rewind wouldn't have done anything anyway. - s.log.Error("Failed to rewind after error processing block", "block", block, "err", err) + s.log.Error("Failed to rewind after error processing block", "block", next, "err", err) } - return false // Don't update the last processed block so we will retry on next update } - s.lastBlock = block - return true + return nil +} + +func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.L1BlockRef) error { + // update the latest target + s.lastHead.Store(head.Number) + // signal that we have something to process + select { + case s.newHead <- struct{}{}: + default: + // already requested an update + } + // if we are running synchronously, wait for the work to complete + if s.synchronous { + <-s.out + } + return nil +} + +func (s *ChainProcessor) Close() { + s.cancel() + s.wg.Wait() } diff --git a/op-supervisor/supervisor/backend/source/chain_processor_test.go b/op-supervisor/supervisor/backend/source/chain_processor_test.go index 6b26f7477c53..af48d5ecdd30 100644 --- a/op-supervisor/supervisor/backend/source/chain_processor_test.go +++ b/op-supervisor/supervisor/backend/source/chain_processor_test.go @@ -1,5 +1,6 @@ package source +/* TODO import ( "context" "errors" @@ -22,7 +23,7 @@ func TestUnsafeBlocksStage(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) client := &stubBlockByNumberSource{} processor := &stubBlockProcessor{} - stage := NewChainProcessor(logger, client, processorChainID, eth.L1BlockRef{Number: 100}, processor, &stubRewinder{}) + stage := NewChainProcessor(logger, client, processorChainID, processor, &stubRewinder{}) stage.OnNewHead(ctx, eth.L1BlockRef{Number: 100}) stage.OnNewHead(ctx, eth.L1BlockRef{Number: 99}) @@ -185,3 +186,4 @@ func (s *stubRewinder) Rewind(chainID types.ChainID, headBlockNum uint64) error s.rewindCalled = true return nil } +*/ diff --git a/op-supervisor/supervisor/backend/source/fetch_logs.go b/op-supervisor/supervisor/backend/source/fetch_logs.go deleted file mode 100644 index 880a9ddcda4d..000000000000 --- a/op-supervisor/supervisor/backend/source/fetch_logs.go +++ /dev/null @@ -1,46 +0,0 @@ -package source - -import ( - "context" - "fmt" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -type LogSource interface { - FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) -} - -type ReceiptProcessor interface { - ProcessLogs(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error -} - -type ReceiptProcessorFn func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error - -func (r ReceiptProcessorFn) ProcessLogs(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error { - return r(ctx, block, rcpts) -} - -type logFetcher struct { - client LogSource - processor ReceiptProcessor -} - -func newLogFetcher(client LogSource, processor ReceiptProcessor) *logFetcher { - return &logFetcher{ - client: client, - processor: processor, - } -} - -var _ BlockProcessor = (*logFetcher)(nil) - -func (l *logFetcher) ProcessBlock(ctx context.Context, block eth.L1BlockRef) error { - _, rcpts, err := l.client.FetchReceipts(ctx, block.Hash) - if err != nil { - return fmt.Errorf("failed to fetch receipts for block %v: %w", block, err) - } - return l.processor.ProcessLogs(ctx, block, rcpts) -} diff --git a/op-supervisor/supervisor/backend/source/fetch_logs_test.go b/op-supervisor/supervisor/backend/source/fetch_logs_test.go deleted file mode 100644 index 4e05f5530b72..000000000000 --- a/op-supervisor/supervisor/backend/source/fetch_logs_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package source - -import ( - "context" - "errors" - "testing" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/require" -) - -func TestFetchLogs(t *testing.T) { - ctx := context.Background() - rcpts := types.Receipts{&types.Receipt{Type: 3}, &types.Receipt{Type: 4}} - - t.Run("Success", func(t *testing.T) { - client := &stubLogSource{ - rcpts: rcpts, - } - var processed []types.Receipts - processor := ReceiptProcessorFn(func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error { - processed = append(processed, rcpts) - return nil - }) - fetcher := newLogFetcher(client, processor) - block := eth.L1BlockRef{Number: 11, Hash: common.Hash{0xaa}} - - err := fetcher.ProcessBlock(ctx, block) - require.NoError(t, err) - - require.Equal(t, []types.Receipts{rcpts}, processed) - }) - - t.Run("ReceiptFetcherError", func(t *testing.T) { - client := &stubLogSource{ - err: errors.New("boom"), - } - processor := ReceiptProcessorFn(func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error { - t.Fatal("should not be called") - return nil - }) - fetcher := newLogFetcher(client, processor) - block := eth.L1BlockRef{Number: 11, Hash: common.Hash{0xaa}} - - err := fetcher.ProcessBlock(ctx, block) - require.ErrorIs(t, err, client.err) - }) - - t.Run("ProcessorError", func(t *testing.T) { - expectedErr := errors.New("boom") - client := &stubLogSource{ - rcpts: rcpts, - } - processor := ReceiptProcessorFn(func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error { - return expectedErr - }) - fetcher := newLogFetcher(client, processor) - block := eth.L1BlockRef{Number: 11, Hash: common.Hash{0xaa}} - - err := fetcher.ProcessBlock(ctx, block) - require.ErrorIs(t, err, expectedErr) - }) -} - -type stubLogSource struct { - err error - rcpts types.Receipts -} - -func (s *stubLogSource) FetchReceipts(_ context.Context, _ common.Hash) (eth.BlockInfo, types.Receipts, error) { - if s.err != nil { - return nil, nil, s.err - } - return nil, s.rcpts, nil -} diff --git a/op-supervisor/supervisor/backend/source/head_processor.go b/op-supervisor/supervisor/backend/source/head_processor.go index ff97deadc543..6a0f867ac61a 100644 --- a/op-supervisor/supervisor/backend/source/head_processor.go +++ b/op-supervisor/supervisor/backend/source/head_processor.go @@ -3,18 +3,21 @@ package source import ( "context" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type HeadProcessor interface { - OnNewHead(ctx context.Context, head eth.L1BlockRef) + OnNewHead(ctx context.Context, head eth.L1BlockRef) error } -type HeadProcessorFn func(ctx context.Context, head eth.L1BlockRef) +type HeadProcessorFn func(ctx context.Context, head eth.L1BlockRef) error -func (f HeadProcessorFn) OnNewHead(ctx context.Context, head eth.L1BlockRef) { - f(ctx, head) +func (f HeadProcessorFn) OnNewHead(ctx context.Context, head eth.L1BlockRef) error { + return f(ctx, head) } // headUpdateProcessor handles head update events and routes them to the appropriate handlers @@ -37,19 +40,37 @@ func newHeadUpdateProcessor(log log.Logger, unsafeProcessors []HeadProcessor, sa func (n *headUpdateProcessor) OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) { n.log.Debug("New unsafe head", "block", block) for _, processor := range n.unsafeProcessors { - processor.OnNewHead(ctx, block) + if err := processor.OnNewHead(ctx, block); err != nil { + n.log.Error("unsafe-head processing failed", "err", err) + } } } func (n *headUpdateProcessor) OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) { n.log.Debug("New safe head", "block", block) for _, processor := range n.safeProcessors { - processor.OnNewHead(ctx, block) + if err := processor.OnNewHead(ctx, block); err != nil { + n.log.Error("safe-head processing failed", "err", err) + } } } + func (n *headUpdateProcessor) OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) { n.log.Debug("New finalized head", "block", block) for _, processor := range n.finalizedProcessors { - processor.OnNewHead(ctx, block) + if err := processor.OnNewHead(ctx, block); err != nil { + n.log.Error("finalized-head processing failed", "err", err) + } + } +} + +// OnNewHead is a util function to turn a head-signal processor into head-pointer updater +func OnNewHead(id types.ChainID, apply func(id types.ChainID, v heads.HeadPointer) error) HeadProcessorFn { + return func(ctx context.Context, head eth.L1BlockRef) error { + return apply(id, heads.HeadPointer{ + LastSealedBlockHash: head.Hash, + LastSealedBlockNum: head.Number, + LogsSince: 0, + }) } } diff --git a/op-supervisor/supervisor/backend/source/head_processor_test.go b/op-supervisor/supervisor/backend/source/head_processor_test.go index 0ef375fe4524..f684667fa62b 100644 --- a/op-supervisor/supervisor/backend/source/head_processor_test.go +++ b/op-supervisor/supervisor/backend/source/head_processor_test.go @@ -16,8 +16,9 @@ func TestHeadUpdateProcessor(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) processed := make([]eth.L1BlockRef, 3) makeProcessor := func(idx int) HeadProcessor { - return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) { + return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error { processed[idx] = head + return nil }) } headUpdates := newHeadUpdateProcessor(logger, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil, nil) @@ -30,8 +31,9 @@ func TestHeadUpdateProcessor(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) processed := make([]eth.L1BlockRef, 3) makeProcessor := func(idx int) HeadProcessor { - return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) { + return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error { processed[idx] = head + return nil }) } headUpdates := newHeadUpdateProcessor(logger, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil) @@ -44,8 +46,9 @@ func TestHeadUpdateProcessor(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) processed := make([]eth.L1BlockRef, 3) makeProcessor := func(idx int) HeadProcessor { - return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) { + return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error { processed[idx] = head + return nil }) } headUpdates := newHeadUpdateProcessor(logger, nil, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}) diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index b035e26abcef..e89e8e9515bb 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -73,7 +73,7 @@ func (lvl SafetyLevel) String() string { func (lvl SafetyLevel) Valid() bool { switch lvl { - case Finalized, Safe, CrossUnsafe, Unsafe: + case CrossFinalized, Finalized, Safe, CrossUnsafe, Unsafe: return true default: return false From c6d3adf6e13569f243e964e47a6d5ccec31a367b Mon Sep 17 00:00:00 2001 From: DenseDenise Date: Wed, 25 Sep 2024 22:20:39 +0000 Subject: [PATCH 028/116] Update README.md (#12125) --- op-preimage/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-preimage/README.md b/op-preimage/README.md index 584209ae3a8a..82a3cf247196 100644 --- a/op-preimage/README.md +++ b/op-preimage/README.md @@ -4,5 +4,5 @@ Read more about the Preimage Oracle in the [specs](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/index.md#pre-image-oracle). -See [op-program](../op-program) and [Cannon client examples](../cannon/example) for client-side usage. +See [op-program](../op-program) and [Cannon client examples](../cannon/testdata/example) for client-side usage. See [Cannon `mipsevm`](../cannon/mipsevm) for server-side usage. From d8cb523d51e0f9bc1ed6e3829cc2bfd944a0c9d2 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Wed, 25 Sep 2024 18:20:40 -0400 Subject: [PATCH 029/116] fix(ct): correct input and return argument formatting (#12131) Standardizes the input and return argument formatting everywhere. --- .semgrepignore | 18 ++- .../scripts/DeployAuthSystem.s.sol | 4 +- .../scripts/DeployImplementations.s.sol | 54 +++---- .../scripts/DeployOPChain.s.sol | 32 ++-- .../scripts/DeploySuperchain.s.sol | 12 +- .../scripts/checks/check-semver-diff.sh | 7 +- .../scripts/deploy/Deploy.s.sol | 6 +- .../scripts/deploy/DeployConfig.s.sol | 56 ++++--- .../scripts/libraries/Config.sol | 40 ++--- .../scripts/libraries/ForgeArtifacts.sol | 6 +- .../deploy/PeripheryDeployConfig.s.sol | 4 +- .../periphery/drippie/DrippieConfig.s.sol | 4 +- .../periphery/drippie/ManageDrippie.s.sol | 12 +- packages/contracts-bedrock/semver-lock.json | 40 ++--- .../abi/DataAvailabilityChallenge.json | 24 +-- .../snapshots/abi/DelayedVetoable.json | 10 +- .../abi/L2ToL2CrossDomainMessenger.json | 4 +- .../snapshots/abi/LivenessGuard.json | 22 +-- .../abi/OptimismSuperchainERC20Factory.json | 2 +- .../snapshots/abi/StorageSetter.json | 2 +- .../src/L1/DataAvailabilityChallenge.sol | 140 +++++++++--------- .../src/L1/DelayedVetoable.sol | 32 ++-- .../src/L1/L1CrossDomainMessenger.sol | 8 +- .../src/L1/OPContractsManager.sol | 10 +- .../src/L1/OPContractsManagerInterop.sol | 6 +- .../interfaces/IDataAvailabilityChallenge.sol | 22 +-- .../src/L1/interfaces/IDelayedVetoable.sol | 4 +- .../src/L1/interfaces/IOptimismPortal.sol | 4 +- .../src/L1/interfaces/IOptimismPortal2.sol | 4 +- .../L1/interfaces/IOptimismPortalInterop.sol | 4 +- .../src/L1/interfaces/IProtocolVersions.sol | 2 +- .../src/L1/interfaces/IResourceMetering.sol | 2 +- .../src/L1/interfaces/ISystemConfig.sol | 2 +- .../L1/interfaces/ISystemConfigInterop.sol | 2 +- .../src/L2/L2ToL2CrossDomainMessenger.sol | 16 +- .../src/L2/OptimismSuperchainERC20.sol | 8 +- .../src/L2/OptimismSuperchainERC20Factory.sol | 14 +- .../src/L2/interfaces/ICrossL2Inbox.sol | 20 +-- .../IL2ToL2CrossDomainMessenger.sol | 8 +- .../L2/interfaces/IOptimismERC20Factory.sol | 4 +- .../contracts-bedrock/src/cannon/MIPS2.sol | 16 +- .../src/cannon/libraries/MIPSState.sol | 4 +- .../interfaces/IAnchorStateRegistry.sol | 2 +- .../src/dispute/interfaces/IDelayedWETH.sol | 2 +- .../interfaces/IDisputeGameFactory.sol | 2 +- .../dispute/interfaces/IFaultDisputeGame.sol | 6 +- .../interfaces/IPermissionedDisputeGame.sol | 6 +- .../governance/interfaces/IMintManager.sol | 2 +- .../src/libraries/Encoding.sol | 54 +++---- .../src/safe/LivenessGuard.sol | 48 +++--- .../src/safe/SafeSigners.sol | 44 +++--- .../src/universal/StorageSetter.sol | 10 +- .../src/universal/interfaces/IOwnable.sol | 2 +- .../test/L1/DelayedVetoable.t.sol | 8 +- .../test/safe/LivenessGuard.t.sol | 22 +-- .../test/safe/SafeSigners.t.sol | 2 +- 56 files changed, 471 insertions(+), 430 deletions(-) diff --git a/.semgrepignore b/.semgrepignore index 3ee97221f406..3208e2604b89 100644 --- a/.semgrepignore +++ b/.semgrepignore @@ -19,9 +19,25 @@ tests/ .semgrep_logs/ op-chain-ops/script/testdata +op-chain-ops/script/testdata/scripts/ScriptExample.s.sol packages/*/node_modules packages/*/test -# Autogenerated solidity library +# TODO: Define these exclusions inside of the semgrep rules once those rules +# are all defined locally in the repository instead of the semgrep app. + +# Contracts: autogenerated solidity library packages/contracts-bedrock/scripts/libraries/Solarray.sol + +# Contracts: vendor interfaces +packages/contracts-bedrock/scripts/interfaces/IGnosisSafe.sol +packages/contracts-bedrock/src/EAS/ + +# Contracts: deliberate exclusions +packages/contracts-bedrock/src/universal/WETH98.sol +packages/contracts-bedrock/src/universal/interfaces/IWETH.sol +packages/contracts-bedrock/src/L2/SuperchainWETH.sol +packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol +packages/contracts-bedrock/src/governance/GovernanceToken.sol +packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol diff --git a/packages/contracts-bedrock/scripts/DeployAuthSystem.s.sol b/packages/contracts-bedrock/scripts/DeployAuthSystem.s.sol index 0ddcbc1a7aa0..a0f8715f3b21 100644 --- a/packages/contracts-bedrock/scripts/DeployAuthSystem.s.sol +++ b/packages/contracts-bedrock/scripts/DeployAuthSystem.s.sol @@ -49,8 +49,8 @@ contract DeployAuthSystemInput is CommonBase { contract DeployAuthSystemOutput is CommonBase { Safe internal _safe; - function set(bytes4 sel, address _address) public { - if (sel == this.safe.selector) _safe = Safe(payable(_address)); + function set(bytes4 _sel, address _address) public { + if (_sel == this.safe.selector) _safe = Safe(payable(_address)); else revert("DeployAuthSystemOutput: unknown selector"); } diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index cf992ab582b7..3dffee5d32b0 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -64,41 +64,41 @@ contract DeployImplementationsInput is BaseDeployIO { string internal _standardVersionsToml; - function set(bytes4 sel, uint256 _value) public { + function set(bytes4 _sel, uint256 _value) public { require(_value != 0, "DeployImplementationsInput: cannot set zero value"); - if (sel == this.withdrawalDelaySeconds.selector) { + if (_sel == this.withdrawalDelaySeconds.selector) { _withdrawalDelaySeconds = _value; - } else if (sel == this.minProposalSizeBytes.selector) { + } else if (_sel == this.minProposalSizeBytes.selector) { _minProposalSizeBytes = _value; - } else if (sel == this.challengePeriodSeconds.selector) { + } else if (_sel == this.challengePeriodSeconds.selector) { require(_value <= type(uint64).max, "DeployImplementationsInput: challengePeriodSeconds too large"); _challengePeriodSeconds = _value; - } else if (sel == this.proofMaturityDelaySeconds.selector) { + } else if (_sel == this.proofMaturityDelaySeconds.selector) { _proofMaturityDelaySeconds = _value; - } else if (sel == this.disputeGameFinalityDelaySeconds.selector) { + } else if (_sel == this.disputeGameFinalityDelaySeconds.selector) { _disputeGameFinalityDelaySeconds = _value; } else { revert("DeployImplementationsInput: unknown selector"); } } - function set(bytes4 sel, string memory _value) public { + function set(bytes4 _sel, string memory _value) public { require(!LibString.eq(_value, ""), "DeployImplementationsInput: cannot set empty string"); - if (sel == this.release.selector) _release = _value; - else if (sel == this.standardVersionsToml.selector) _standardVersionsToml = _value; + if (_sel == this.release.selector) _release = _value; + else if (_sel == this.standardVersionsToml.selector) _standardVersionsToml = _value; else revert("DeployImplementationsInput: unknown selector"); } - function set(bytes4 sel, address _addr) public { + function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployImplementationsInput: cannot set zero address"); - if (sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_addr); - else if (sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_addr); + if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_addr); + else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_addr); else revert("DeployImplementationsInput: unknown selector"); } - function set(bytes4 sel, bytes32 _value) public { - if (sel == this.salt.selector) _salt = _value; + function set(bytes4 _sel, bytes32 _value) public { + if (_sel == this.salt.selector) _salt = _value; else revert("DeployImplementationsInput: unknown selector"); } @@ -179,22 +179,22 @@ contract DeployImplementationsOutput is BaseDeployIO { OptimismMintableERC20Factory internal _optimismMintableERC20FactoryImpl; DisputeGameFactory internal _disputeGameFactoryImpl; - function set(bytes4 sel, address _addr) public { + function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployImplementationsOutput: cannot set zero address"); // forgefmt: disable-start - if (sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(payable(_addr)); - else if (sel == this.opcmImpl.selector) _opcmImpl = OPContractsManager(payable(_addr)); - else if (sel == this.optimismPortalImpl.selector) _optimismPortalImpl = OptimismPortal2(payable(_addr)); - else if (sel == this.delayedWETHImpl.selector) _delayedWETHImpl = DelayedWETH(payable(_addr)); - else if (sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = PreimageOracle(_addr); - else if (sel == this.mipsSingleton.selector) _mipsSingleton = MIPS(_addr); - else if (sel == this.systemConfigImpl.selector) _systemConfigImpl = SystemConfig(_addr); - else if (sel == this.l1CrossDomainMessengerImpl.selector) _l1CrossDomainMessengerImpl = L1CrossDomainMessenger(_addr); - else if (sel == this.l1ERC721BridgeImpl.selector) _l1ERC721BridgeImpl = L1ERC721Bridge(_addr); - else if (sel == this.l1StandardBridgeImpl.selector) _l1StandardBridgeImpl = L1StandardBridge(payable(_addr)); - else if (sel == this.optimismMintableERC20FactoryImpl.selector) _optimismMintableERC20FactoryImpl = OptimismMintableERC20Factory(_addr); - else if (sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = DisputeGameFactory(_addr); + if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(payable(_addr)); + else if (_sel == this.opcmImpl.selector) _opcmImpl = OPContractsManager(payable(_addr)); + else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = OptimismPortal2(payable(_addr)); + else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = DelayedWETH(payable(_addr)); + else if (_sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = PreimageOracle(_addr); + else if (_sel == this.mipsSingleton.selector) _mipsSingleton = MIPS(_addr); + else if (_sel == this.systemConfigImpl.selector) _systemConfigImpl = SystemConfig(_addr); + else if (_sel == this.l1CrossDomainMessengerImpl.selector) _l1CrossDomainMessengerImpl = L1CrossDomainMessenger(_addr); + else if (_sel == this.l1ERC721BridgeImpl.selector) _l1ERC721BridgeImpl = L1ERC721Bridge(_addr); + else if (_sel == this.l1StandardBridgeImpl.selector) _l1StandardBridgeImpl = L1StandardBridge(payable(_addr)); + else if (_sel == this.optimismMintableERC20FactoryImpl.selector) _optimismMintableERC20FactoryImpl = OptimismMintableERC20Factory(_addr); + else if (_sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = DisputeGameFactory(_addr); else revert("DeployImplementationsOutput: unknown selector"); // forgefmt: disable-end } diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 6cecb41d36eb..fb4eb986a631 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -164,24 +164,24 @@ contract DeployOPChainOutput is BaseDeployIO { DelayedWETH internal _delayedWETHPermissionedGameProxy; DelayedWETH internal _delayedWETHPermissionlessGameProxy; - function set(bytes4 sel, address _addr) public { + function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployOPChainOutput: cannot set zero address"); // forgefmt: disable-start - if (sel == this.opChainProxyAdmin.selector) _opChainProxyAdmin = ProxyAdmin(_addr) ; - else if (sel == this.addressManager.selector) _addressManager = AddressManager(_addr) ; - else if (sel == this.l1ERC721BridgeProxy.selector) _l1ERC721BridgeProxy = L1ERC721Bridge(_addr) ; - else if (sel == this.systemConfigProxy.selector) _systemConfigProxy = SystemConfig(_addr) ; - else if (sel == this.optimismMintableERC20FactoryProxy.selector) _optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory(_addr) ; - else if (sel == this.l1StandardBridgeProxy.selector) _l1StandardBridgeProxy = L1StandardBridge(payable(_addr)) ; - else if (sel == this.l1CrossDomainMessengerProxy.selector) _l1CrossDomainMessengerProxy = L1CrossDomainMessenger(_addr) ; - else if (sel == this.optimismPortalProxy.selector) _optimismPortalProxy = OptimismPortal2(payable(_addr)) ; - else if (sel == this.disputeGameFactoryProxy.selector) _disputeGameFactoryProxy = DisputeGameFactory(_addr) ; - else if (sel == this.anchorStateRegistryProxy.selector) _anchorStateRegistryProxy = AnchorStateRegistry(_addr) ; - else if (sel == this.anchorStateRegistryImpl.selector) _anchorStateRegistryImpl = AnchorStateRegistry(_addr) ; - else if (sel == this.faultDisputeGame.selector) _faultDisputeGame = FaultDisputeGame(_addr) ; - else if (sel == this.permissionedDisputeGame.selector) _permissionedDisputeGame = PermissionedDisputeGame(_addr) ; - else if (sel == this.delayedWETHPermissionedGameProxy.selector) _delayedWETHPermissionedGameProxy = DelayedWETH(payable(_addr)) ; - else if (sel == this.delayedWETHPermissionlessGameProxy.selector) _delayedWETHPermissionlessGameProxy = DelayedWETH(payable(_addr)) ; + if (_sel == this.opChainProxyAdmin.selector) _opChainProxyAdmin = ProxyAdmin(_addr) ; + else if (_sel == this.addressManager.selector) _addressManager = AddressManager(_addr) ; + else if (_sel == this.l1ERC721BridgeProxy.selector) _l1ERC721BridgeProxy = L1ERC721Bridge(_addr) ; + else if (_sel == this.systemConfigProxy.selector) _systemConfigProxy = SystemConfig(_addr) ; + else if (_sel == this.optimismMintableERC20FactoryProxy.selector) _optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory(_addr) ; + else if (_sel == this.l1StandardBridgeProxy.selector) _l1StandardBridgeProxy = L1StandardBridge(payable(_addr)) ; + else if (_sel == this.l1CrossDomainMessengerProxy.selector) _l1CrossDomainMessengerProxy = L1CrossDomainMessenger(_addr) ; + else if (_sel == this.optimismPortalProxy.selector) _optimismPortalProxy = OptimismPortal2(payable(_addr)) ; + else if (_sel == this.disputeGameFactoryProxy.selector) _disputeGameFactoryProxy = DisputeGameFactory(_addr) ; + else if (_sel == this.anchorStateRegistryProxy.selector) _anchorStateRegistryProxy = AnchorStateRegistry(_addr) ; + else if (_sel == this.anchorStateRegistryImpl.selector) _anchorStateRegistryImpl = AnchorStateRegistry(_addr) ; + else if (_sel == this.faultDisputeGame.selector) _faultDisputeGame = FaultDisputeGame(_addr) ; + else if (_sel == this.permissionedDisputeGame.selector) _permissionedDisputeGame = PermissionedDisputeGame(_addr) ; + else if (_sel == this.delayedWETHPermissionedGameProxy.selector) _delayedWETHPermissionedGameProxy = DelayedWETH(payable(_addr)) ; + else if (_sel == this.delayedWETHPermissionlessGameProxy.selector) _delayedWETHPermissionlessGameProxy = DelayedWETH(payable(_addr)) ; else revert("DeployOPChainOutput: unknown selector"); // forgefmt: disable-end } diff --git a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol index 7726d5709076..b26755c755ec 100644 --- a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol @@ -164,13 +164,13 @@ contract DeploySuperchainOutput is BaseDeployIO { // This method lets each field be set individually. The selector of an output's getter method // is used to determine which field to set. - function set(bytes4 sel, address _address) public { + function set(bytes4 _sel, address _address) public { require(_address != address(0), "DeploySuperchainOutput: cannot set zero address"); - if (sel == this.superchainProxyAdmin.selector) _superchainProxyAdmin = ProxyAdmin(_address); - else if (sel == this.superchainConfigImpl.selector) _superchainConfigImpl = SuperchainConfig(_address); - else if (sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_address); - else if (sel == this.protocolVersionsImpl.selector) _protocolVersionsImpl = ProtocolVersions(_address); - else if (sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_address); + if (_sel == this.superchainProxyAdmin.selector) _superchainProxyAdmin = ProxyAdmin(_address); + else if (_sel == this.superchainConfigImpl.selector) _superchainConfigImpl = SuperchainConfig(_address); + else if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_address); + else if (_sel == this.protocolVersionsImpl.selector) _protocolVersionsImpl = ProtocolVersions(_address); + else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_address); else revert("DeploySuperchainOutput: unknown selector"); } diff --git a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh index 15a3ebb50da2..81e7c6476d3a 100755 --- a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh +++ b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh @@ -16,7 +16,7 @@ temp_dir=$(mktemp -d) trap 'rm -rf "$temp_dir"' EXIT # Exit early if semver-lock.json has not changed. -if ! git diff origin/develop...HEAD --name-only | grep -q "$SEMVER_LOCK"; then +if ! { git diff origin/develop...HEAD --name-only; git diff --name-only; git diff --cached --name-only; } | grep -q "$SEMVER_LOCK"; then echo "No changes detected in semver-lock.json" exit 0 fi @@ -71,9 +71,12 @@ for contract in $changed_contracts; do has_errors=true fi + # TODO: Use an existing semver comparison function since this will only + # check if the version has changed at all and not that the version has + # increased properly. # Check if the version changed. if [ "$old_version" = "$new_version" ]; then - echo "❌ Error: src/$contract has changes in semver-lock.json but no version change" + echo "❌ Error: $contract has changes in semver-lock.json but no version change" echo " Old version: $old_version" echo " New version: $new_version" has_errors=true diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 722cd7c61ecf..dc58911dfa53 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -281,11 +281,11 @@ contract Deploy is Deployer { /// @notice Deploy a new OP Chain using an existing SuperchainConfig and ProtocolVersions /// @param _superchainConfigProxy Address of the existing SuperchainConfig proxy /// @param _protocolVersionsProxy Address of the existing ProtocolVersions proxy - /// @param includeDump Whether to include a state dump after deployment + /// @param _includeDump Whether to include a state dump after deployment function runWithSuperchain( address payable _superchainConfigProxy, address payable _protocolVersionsProxy, - bool includeDump + bool _includeDump ) public { @@ -306,7 +306,7 @@ contract Deploy is Deployer { _run(false); - if (includeDump) { + if (_includeDump) { vm.dumpState(Config.stateDumpPath("")); } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol index 6433509f6764..d9a8abcf8805 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol @@ -94,8 +94,8 @@ contract DeployConfig is Script { function read(string memory _path) public { console.log("DeployConfig: reading file %s", _path); - try vm.readFile(_path) returns (string memory data) { - _json = data; + try vm.readFile(_path) returns (string memory data_) { + _json = data_; } catch { require(false, string.concat("Cannot find deploy config file at ", _path)); } @@ -191,14 +191,14 @@ contract DeployConfig is Script { } function l1StartingBlockTag() public returns (bytes32) { - try vm.parseJsonBytes32(_json, "$.l1StartingBlockTag") returns (bytes32 tag) { - return tag; + try vm.parseJsonBytes32(_json, "$.l1StartingBlockTag") returns (bytes32 tag_) { + return tag_; } catch { - try vm.parseJsonString(_json, "$.l1StartingBlockTag") returns (string memory tag) { - return _getBlockByTag(tag); + try vm.parseJsonString(_json, "$.l1StartingBlockTag") returns (string memory tag_) { + return _getBlockByTag(tag_); } catch { - try vm.parseJsonUint(_json, "$.l1StartingBlockTag") returns (uint256 tag) { - return _getBlockByTag(vm.toString(tag)); + try vm.parseJsonUint(_json, "$.l1StartingBlockTag") returns (uint256 tag_) { + return _getBlockByTag(vm.toString(tag_)); } catch { } } } @@ -266,32 +266,48 @@ contract DeployConfig is Script { return abi.decode(res, (bytes32)); } - function _readOr(string memory json, string memory key, bool defaultValue) internal view returns (bool) { - return vm.keyExistsJson(json, key) ? json.readBool(key) : defaultValue; + function _readOr(string memory _jsonInp, string memory _key, bool _defaultValue) internal view returns (bool) { + return vm.keyExistsJson(_jsonInp, _key) ? _jsonInp.readBool(_key) : _defaultValue; } - function _readOr(string memory json, string memory key, uint256 defaultValue) internal view returns (uint256) { - return (vm.keyExistsJson(json, key) && !_isNull(json, key)) ? json.readUint(key) : defaultValue; + function _readOr( + string memory _jsonInp, + string memory _key, + uint256 _defaultValue + ) + internal + view + returns (uint256) + { + return (vm.keyExistsJson(_jsonInp, _key) && !_isNull(_json, _key)) ? _jsonInp.readUint(_key) : _defaultValue; } - function _readOr(string memory json, string memory key, address defaultValue) internal view returns (address) { - return vm.keyExistsJson(json, key) ? json.readAddress(key) : defaultValue; + function _readOr( + string memory _jsonInp, + string memory _key, + address _defaultValue + ) + internal + view + returns (address) + { + return vm.keyExistsJson(_jsonInp, _key) ? _jsonInp.readAddress(_key) : _defaultValue; } - function _isNull(string memory json, string memory key) internal pure returns (bool) { - string memory value = json.readString(key); + function _isNull(string memory _jsonInp, string memory _key) internal pure returns (bool) { + string memory value = _jsonInp.readString(_key); return (keccak256(bytes(value)) == keccak256(bytes("null"))); } function _readOr( - string memory json, - string memory key, - string memory defaultValue + string memory _jsonInp, + string memory _key, + string memory _defaultValue ) internal view returns (string memory) { - return vm.keyExists(json, key) ? json.readString(key) : defaultValue; + return vm.keyExists(_jsonInp, _key) ? _jsonInp.readString(_key) : _defaultValue; } } diff --git a/packages/contracts-bedrock/scripts/libraries/Config.sol b/packages/contracts-bedrock/scripts/libraries/Config.sol index 06e59e902246..41d2380c46d0 100644 --- a/packages/contracts-bedrock/scripts/libraries/Config.sol +++ b/packages/contracts-bedrock/scripts/libraries/Config.sol @@ -65,45 +65,45 @@ library Config { /// @notice Returns the path on the local filesystem where the deployment artifact is /// written to disk after doing a deployment. - function deploymentOutfile() internal view returns (string memory _env) { - _env = vm.envOr( + function deploymentOutfile() internal view returns (string memory env_) { + env_ = vm.envOr( "DEPLOYMENT_OUTFILE", string.concat(vm.projectRoot(), "/deployments/", vm.toString(block.chainid), "-deploy.json") ); } /// @notice Returns the path on the local filesystem where the deploy config is - function deployConfigPath() internal view returns (string memory _env) { + function deployConfigPath() internal view returns (string memory env_) { if (vm.isContext(VmSafe.ForgeContext.TestGroup)) { - _env = string.concat(vm.projectRoot(), "/deploy-config/hardhat.json"); + env_ = string.concat(vm.projectRoot(), "/deploy-config/hardhat.json"); } else { - _env = vm.envOr("DEPLOY_CONFIG_PATH", string("")); - require(bytes(_env).length > 0, "Config: must set DEPLOY_CONFIG_PATH to filesystem path of deploy config"); + env_ = vm.envOr("DEPLOY_CONFIG_PATH", string("")); + require(bytes(env_).length > 0, "Config: must set DEPLOY_CONFIG_PATH to filesystem path of deploy config"); } } /// @notice Returns the chainid from the EVM context or the value of the CHAIN_ID env var as /// an override. - function chainID() internal view returns (uint256 _env) { - _env = vm.envOr("CHAIN_ID", block.chainid); + function chainID() internal view returns (uint256 env_) { + env_ = vm.envOr("CHAIN_ID", block.chainid); } /// @notice Returns the value of the env var CONTRACT_ADDRESSES_PATH which is a JSON key/value /// pair of contract names and their addresses. Each key/value pair is passed to `save` /// which then backs the `getAddress` function. - function contractAddressesPath() internal view returns (string memory _env) { - _env = vm.envOr("CONTRACT_ADDRESSES_PATH", string("")); + function contractAddressesPath() internal view returns (string memory env_) { + env_ = vm.envOr("CONTRACT_ADDRESSES_PATH", string("")); } /// @notice The CREATE2 salt to be used when deploying the implementations. - function implSalt() internal view returns (string memory _env) { - _env = vm.envOr("IMPL_SALT", string("ethers phoenix")); + function implSalt() internal view returns (string memory env_) { + env_ = vm.envOr("IMPL_SALT", string("ethers phoenix")); } /// @notice Returns the path that the state dump file should be written to or read from /// on the local filesystem. - function stateDumpPath(string memory _suffix) internal view returns (string memory _env) { - _env = vm.envOr( + function stateDumpPath(string memory _suffix) internal view returns (string memory env_) { + env_ = vm.envOr( "STATE_DUMP_PATH", string.concat(vm.projectRoot(), "/state-dump-", vm.toString(block.chainid), _suffix, ".json") ); @@ -112,13 +112,13 @@ library Config { /// @notice Returns the name of the file that the forge deployment artifact is written to on the local /// filesystem. By default, it is the name of the deploy script with the suffix `-latest.json`. /// This was useful for creating hardhat deploy style artifacts and will be removed in a future release. - function deployFile(string memory _sig) internal view returns (string memory _env) { - _env = vm.envOr("DEPLOY_FILE", string.concat(_sig, "-latest.json")); + function deployFile(string memory _sig) internal view returns (string memory env_) { + env_ = vm.envOr("DEPLOY_FILE", string.concat(_sig, "-latest.json")); } /// @notice Returns the private key that is used to configure drippie. - function drippieOwnerPrivateKey() internal view returns (uint256 _env) { - _env = vm.envUint("DRIPPIE_OWNER_PRIVATE_KEY"); + function drippieOwnerPrivateKey() internal view returns (uint256 env_) { + env_ = vm.envUint("DRIPPIE_OWNER_PRIVATE_KEY"); } /// @notice Returns the OutputMode for genesis allocs generation. @@ -139,8 +139,8 @@ library Config { } /// @notice Returns true if multithreaded Cannon is used for the deployment. - function useMultithreadedCannon() internal view returns (bool _enabled) { - _enabled = vm.envOr("USE_MT_CANNON", false); + function useMultithreadedCannon() internal view returns (bool enabled_) { + enabled_ = vm.envOr("USE_MT_CANNON", false); } /// @notice Returns the latest fork to use for genesis allocs generation. diff --git a/packages/contracts-bedrock/scripts/libraries/ForgeArtifacts.sol b/packages/contracts-bedrock/scripts/libraries/ForgeArtifacts.sol index 4aa4309fad83..944206694d78 100644 --- a/packages/contracts-bedrock/scripts/libraries/ForgeArtifacts.sol +++ b/packages/contracts-bedrock/scripts/libraries/ForgeArtifacts.sol @@ -268,13 +268,13 @@ library ForgeArtifacts { /// @notice Returns the function ABIs of all L1 contracts. function getContractFunctionAbis( - string memory path, - string[] memory pathExcludes + string memory _path, + string[] memory _pathExcludes ) internal returns (Abi[] memory abis_) { - string[] memory contractNames = getContractNames(path, pathExcludes); + string[] memory contractNames = getContractNames(_path, _pathExcludes); abis_ = new Abi[](contractNames.length); for (uint256 i; i < contractNames.length; i++) { diff --git a/packages/contracts-bedrock/scripts/periphery/deploy/PeripheryDeployConfig.s.sol b/packages/contracts-bedrock/scripts/periphery/deploy/PeripheryDeployConfig.s.sol index a4a6e2bfa379..e86bc98f7154 100644 --- a/packages/contracts-bedrock/scripts/periphery/deploy/PeripheryDeployConfig.s.sol +++ b/packages/contracts-bedrock/scripts/periphery/deploy/PeripheryDeployConfig.s.sol @@ -40,8 +40,8 @@ contract PeripheryDeployConfig is Script { constructor(string memory _path) { console.log("PeripheryDeployConfig: reading file %s", _path); - try vm.readFile(_path) returns (string memory data) { - _json = data; + try vm.readFile(_path) returns (string memory data_) { + _json = data_; } catch { console.log("Warning: unable to read config. Do not deploy unless you are not using config."); return; diff --git a/packages/contracts-bedrock/scripts/periphery/drippie/DrippieConfig.s.sol b/packages/contracts-bedrock/scripts/periphery/drippie/DrippieConfig.s.sol index 3ab115ba326f..d72a909804cd 100644 --- a/packages/contracts-bedrock/scripts/periphery/drippie/DrippieConfig.s.sol +++ b/packages/contracts-bedrock/scripts/periphery/drippie/DrippieConfig.s.sol @@ -66,8 +66,8 @@ contract DrippieConfig is Script { constructor(string memory _path) { // Load the configuration file. console.log("DrippieConfig: reading file %s", _path); - try vm.readFile(_path) returns (string memory data) { - _json = data; + try vm.readFile(_path) returns (string memory data_) { + _json = data_; } catch { console.log("WARNING: unable to read config, do not deploy unless you are not using config"); return; diff --git a/packages/contracts-bedrock/scripts/periphery/drippie/ManageDrippie.s.sol b/packages/contracts-bedrock/scripts/periphery/drippie/ManageDrippie.s.sol index f37a16f547f9..4886844486e0 100644 --- a/packages/contracts-bedrock/scripts/periphery/drippie/ManageDrippie.s.sol +++ b/packages/contracts-bedrock/scripts/periphery/drippie/ManageDrippie.s.sol @@ -103,14 +103,14 @@ contract ManageDrippie is Script { /// @notice Generates the data for a Gelato task that would trigger a drip. /// @param _drippie The drippie contract. /// @param _name The name of the drip. - /// @return _taskData Gelato task data. + /// @return taskData_ Gelato task data. function _makeGelatoDripTaskData( Drippie _drippie, string memory _name ) internal view - returns (GelatoTaskData memory _taskData) + returns (GelatoTaskData memory taskData_) { // Get the drip interval. uint256 dripInterval = _drippie.getDripInterval(_name); @@ -131,7 +131,7 @@ contract ManageDrippie is Script { args[1] = abi.encode(uint128(GelatoDataTypes.TriggerType.TIME), abi.encode(uint128(0), interval)); // Create the task data. - _taskData = GelatoTaskData({ + taskData_ = GelatoTaskData({ taskCreator: msg.sender, execAddress: address(_drippie), execData: abi.encodeCall(Drippie.drip, (_name)), @@ -158,7 +158,7 @@ contract ManageDrippie is Script { /// @param _gelato The gelato contract. /// @param _drippie The drippie contract. /// @param _name The name of the drip being triggered. - /// @return _active True if the task is active, false otherwise. + /// @return active_ True if the task is active, false otherwise. function _isGelatoDripTaskActive( IGelato _gelato, Drippie _drippie, @@ -166,7 +166,7 @@ contract ManageDrippie is Script { ) internal view - returns (bool _active) + returns (bool active_) { GelatoTaskData memory taskData = _makeGelatoDripTaskData({ _drippie: _drippie, _name: _name }); bytes32 taskId = GelatoTaskId.getTaskId({ @@ -181,7 +181,7 @@ contract ManageDrippie is Script { bytes32[] memory taskIds = _gelato.getTaskIdsByUser(taskData.taskCreator); for (uint256 i = 0; i < taskIds.length; i++) { if (taskIds[i] == taskId) { - _active = true; + active_ = true; } } } diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index b9962af979cc..40026d47199a 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -8,16 +8,16 @@ "sourceCodeHash": "0x9ec99e63a991691e8756a663edf2ccfbe9b91161c134e24f38298da61ecd66dd" }, "src/L1/DataAvailabilityChallenge.sol": { - "initCodeHash": "0xcc96cf2e4d841adb7ecb9dd84abeb0893dd62d913c0d47ab5b66a893c6e47e88", - "sourceCodeHash": "0xce01773740f4d50ac77f868343d654f6ca24f85d2770eb7e4043e98f609b1c15" + "initCodeHash": "0xbd00d6568abab3e7fc211c40d682862242f25493010a4a097bd1f3b45c8c87c3", + "sourceCodeHash": "0x58b587034a67b4bb718abbaded8ac23b082c0971105874bcc42c23f051c67f6e" }, "src/L1/DelayedVetoable.sol": { - "initCodeHash": "0xd504ab0568719a0fb960ebe73d0437645f5c4bd8f6619219858209ef002516dd", - "sourceCodeHash": "0x60af558156543d639a0a92e983ad0f045aac1f9ac4c3adaa1d4d97b37175e03a" + "initCodeHash": "0x9fe8ade6f6332262ff1f3539ac0bf57660edbad3cf4c4cb230c2ddac18aa0a3f", + "sourceCodeHash": "0x30e83a535ef27b2e900c831c4e1a4ec2750195350011c4fdacda1da9db2d167b" }, "src/L1/L1CrossDomainMessenger.sol": { - "initCodeHash": "0x48db42620b9f16e0dec2355f4076314f82fd0f60ef04c10cdbc266eac9472515", - "sourceCodeHash": "0xb77342e6b55b835e9597f7a1c4a2d52ddd56f5cfb7cd38da0bcc488c79a9011e" + "initCodeHash": "0x2e9cb3ceb5e55341b311f0666ef7655df4fafae75afdfbcd701cd9c9b2b017d5", + "sourceCodeHash": "0x848ec3774be17bcc8ba65a23d08e35e979b3f39f9d2ac8a810188f945c69c9ea" }, "src/L1/L1ERC721Bridge.sol": { "initCodeHash": "0xfb8b3c51e1790a0b951eaba05ed7368309fbfc7ddc558b4ce1de29da087fb4bd", @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x7903f225091334a1910470bb1b5c111f13f6f2572faf03e0c74ad625e4c0d6f5", - "sourceCodeHash": "0x3a25b0ac70b1d434773c86f46b1f2a995722e33d3273762fd5abbb541bffa7db" + "initCodeHash": "0x08be0367ee031ee292b74aa9b6fc86c5d65cbbdadd455bb8120748eec79cf2d8", + "sourceCodeHash": "0x84fd2b583ddf44e900c58861ddda103f7bea793d71fb845f76ed28afd1e757bc" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", @@ -112,20 +112,20 @@ "sourceCodeHash": "0xd08a2e6514dbd44e16aa312a1b27b2841a9eab5622cbd05a39c30f543fad673c" }, "src/L2/L2ToL2CrossDomainMessenger.sol": { - "initCodeHash": "0x3e4337542234c732a55e60fc20dcb1ad639ff2fb378e3f29e94b4059df9a637b", - "sourceCodeHash": "0x4b806cc85cead74c8df34ab08f4b6c6a95a1a387a335ec8a7cb2de4ea4e1cf41" + "initCodeHash": "0x2ec4cdf62baf9dfb2c4c211d8b914f3dd1e0c0133c15c739ff81c5a2504d4359", + "sourceCodeHash": "0x54dfffed789dafe11b7f7bb16dfb29988713a19da5209452119c5d6539e48c48" }, "src/L2/OptimismSuperchainERC20.sol": { - "initCodeHash": "0x4fd71b5352b78d51d39625b6defa77a75be53067b32f3cba86bd17a46917adf9", - "sourceCodeHash": "0xad3934ea533544b3c130c80be26201354af85f9166cb2ce54d96e5e383ebb5c1" + "initCodeHash": "0x192bb3abd2a103832172d913f548e36bcf6f2c0220cd224a83f8420e2e86b4ec", + "sourceCodeHash": "0x09d3367612dee674e3708da1c70eebbd0c6835fbcbba339780e678338bdfd3ca" }, "src/L2/OptimismSuperchainERC20Beacon.sol": { "initCodeHash": "0x99ce8095b23c124850d866cbc144fee6cee05dbc6bb5d83acadfe00b90cf42c7", "sourceCodeHash": "0x5e58b7c867fafa49fe39d68d83875425e9cf94f05f2835bdcdaa08fc8bc6b68e" }, "src/L2/OptimismSuperchainERC20Factory.sol": { - "initCodeHash": "0x98011045722178751e4a1112892f7d9a11bc1f5e42ac18205b6d30a1f1476d24", - "sourceCodeHash": "0x9e72b2a77d82fcf3963734232ba9faff9d63962594a032041c2561f0a9f1b0b5" + "initCodeHash": "0x524bc58927ca60ba2fbc4b036ad00c5055758d5c5b2ebb3d75cb9b996175f2cb", + "sourceCodeHash": "0x155a4b22ff8e266560d1fae72e1db7fc164afd84b8a81afb74c69414e0d5438e" }, "src/L2/SequencerFeeVault.sol": { "initCodeHash": "0x2e6551705e493bacba8cffe22e564d5c401ae5bb02577a5424e0d32784e13e74", @@ -144,8 +144,8 @@ "sourceCodeHash": "0xba4674e1846afbbc708877332a38dfabd4b8d1e48ce07d8ebf0a45c9f27f16b0" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0x67fb4107e25561ffcb3a9b6653f695e125773408d626a92036ea4b0814797021", - "sourceCodeHash": "0x5f4851e04dc9369552c94fb23aee8e8ca4ea9a9602917f0abb3b5f1347460bd5" + "initCodeHash": "0xbb8c2370460e66274210d16ae527a29cb432bb646ebdccc0db0b21e53a4e428c", + "sourceCodeHash": "0x50ed780b621521047ed36ffb260032f2e5ec287f3e1ab3d742c7de45febb280d" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0x801e52f9c8439fcf7089575fa93272dfb874641dbfc7d82f36d979c987271c0b", @@ -200,8 +200,8 @@ "sourceCodeHash": "0xde1a289c1cb0bf92138daf8f3db7457be2f84bedaa111b536f646dd6e121718c" }, "src/safe/LivenessGuard.sol": { - "initCodeHash": "0xfd74ff89e7b689b38ab97515d64429ffaf6c0cd1ea6488c6a4743a0665419c85", - "sourceCodeHash": "0xa40ea6472d9c7e124791489c0899822d6f6b19b16e583d3b437674c615e4bac3" + "initCodeHash": "0x9ac0b039b1591f7c00cf11cb758d118c9b42e6e08250b619d6b6fd605a43d5ee", + "sourceCodeHash": "0xc1a968b0c6fbc4d82c2821c917b273feaaa224d258886b394416e84ee250d026" }, "src/safe/LivenessModule.sol": { "initCodeHash": "0xcfccdd9e423c95a0ddc6e09ccb6333d5fc8429ed2b8fc872f1290d392ae13aad", @@ -224,7 +224,7 @@ "sourceCodeHash": "0x1c4bc4727f08d80e8364561b49397ee57bb485072cb004b7a430559cbfa019a6" }, "src/universal/StorageSetter.sol": { - "initCodeHash": "0x00b8b883597e67e5c3548e7ba4139ed720893c0acb217dd170bec520cefdfab5", - "sourceCodeHash": "0xf63aff9c38f4c5e9cdbd1f910bc002e16008a592d26c0dcc67929e0024638edd" + "initCodeHash": "0x21b3059e9b13b330f76d02b61f61dcfa3abf3517a0b56afa0895c4b8291740bf", + "sourceCodeHash": "0xc1ea12a87e3a7ef9c950f0a41a4e35b60d4d9c4c816ff671dbfca663861c16f4" } } \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/DataAvailabilityChallenge.json b/packages/contracts-bedrock/snapshots/abi/DataAvailabilityChallenge.json index fdf59e1a0026..90857e7e6ffd 100644 --- a/packages/contracts-bedrock/snapshots/abi/DataAvailabilityChallenge.json +++ b/packages/contracts-bedrock/snapshots/abi/DataAvailabilityChallenge.json @@ -44,12 +44,12 @@ "inputs": [ { "internalType": "uint256", - "name": "challengedBlockNumber", + "name": "_challengedBlockNumber", "type": "uint256" }, { "internalType": "bytes", - "name": "challengedCommitment", + "name": "_challengedCommitment", "type": "bytes" } ], @@ -95,12 +95,12 @@ "inputs": [ { "internalType": "uint256", - "name": "challengedBlockNumber", + "name": "_challengedBlockNumber", "type": "uint256" }, { "internalType": "bytes", - "name": "challengedCommitment", + "name": "_challengedCommitment", "type": "bytes" } ], @@ -141,12 +141,12 @@ "inputs": [ { "internalType": "uint256", - "name": "challengedBlockNumber", + "name": "_challengedBlockNumber", "type": "uint256" }, { "internalType": "bytes", - "name": "challengedCommitment", + "name": "_challengedCommitment", "type": "bytes" } ], @@ -218,17 +218,17 @@ "inputs": [ { "internalType": "uint256", - "name": "challengedBlockNumber", + "name": "_challengedBlockNumber", "type": "uint256" }, { "internalType": "bytes", - "name": "challengedCommitment", + "name": "_challengedCommitment", "type": "bytes" }, { "internalType": "bytes", - "name": "resolveData", + "name": "_resolveData", "type": "bytes" } ], @@ -306,12 +306,12 @@ "inputs": [ { "internalType": "uint256", - "name": "challengedBlockNumber", + "name": "_challengedBlockNumber", "type": "uint256" }, { "internalType": "bytes", - "name": "challengedCommitment", + "name": "_challengedCommitment", "type": "bytes" } ], @@ -324,7 +324,7 @@ "inputs": [ { "internalType": "bytes", - "name": "commitment", + "name": "_commitment", "type": "bytes" } ], diff --git a/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json b/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json index fad9861a1374..d76d1c8b108b 100644 --- a/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json +++ b/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json @@ -3,22 +3,22 @@ "inputs": [ { "internalType": "address", - "name": "vetoer_", + "name": "_vetoer", "type": "address" }, { "internalType": "address", - "name": "initiator_", + "name": "_initiator", "type": "address" }, { "internalType": "address", - "name": "target_", + "name": "_target", "type": "address" }, { "internalType": "uint256", - "name": "operatingDelay_", + "name": "_operatingDelay", "type": "uint256" } ], @@ -59,7 +59,7 @@ "inputs": [ { "internalType": "bytes32", - "name": "callHash", + "name": "_callHash", "type": "bytes32" } ], diff --git a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json index f8468eb81e8c..f5e21dedebdf 100644 --- a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json +++ b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json @@ -5,7 +5,7 @@ "outputs": [ { "internalType": "address", - "name": "_sender", + "name": "sender_", "type": "address" } ], @@ -18,7 +18,7 @@ "outputs": [ { "internalType": "uint256", - "name": "_source", + "name": "source_", "type": "uint256" } ], diff --git a/packages/contracts-bedrock/snapshots/abi/LivenessGuard.json b/packages/contracts-bedrock/snapshots/abi/LivenessGuard.json index 53c5bac847d4..6193a882c15b 100644 --- a/packages/contracts-bedrock/snapshots/abi/LivenessGuard.json +++ b/packages/contracts-bedrock/snapshots/abi/LivenessGuard.json @@ -32,57 +32,57 @@ "inputs": [ { "internalType": "address", - "name": "to", + "name": "_to", "type": "address" }, { "internalType": "uint256", - "name": "value", + "name": "_value", "type": "uint256" }, { "internalType": "bytes", - "name": "data", + "name": "_data", "type": "bytes" }, { "internalType": "enum Enum.Operation", - "name": "operation", + "name": "_operation", "type": "uint8" }, { "internalType": "uint256", - "name": "safeTxGas", + "name": "_safeTxGas", "type": "uint256" }, { "internalType": "uint256", - "name": "baseGas", + "name": "_baseGas", "type": "uint256" }, { "internalType": "uint256", - "name": "gasPrice", + "name": "_gasPrice", "type": "uint256" }, { "internalType": "address", - "name": "gasToken", + "name": "_gasToken", "type": "address" }, { "internalType": "address payable", - "name": "refundReceiver", + "name": "_refundReceiver", "type": "address" }, { "internalType": "bytes", - "name": "signatures", + "name": "_signatures", "type": "bytes" }, { "internalType": "address", - "name": "msgSender", + "name": "_msgSender", "type": "address" } ], diff --git a/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20Factory.json b/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20Factory.json index 7171cf1f3198..eb1315194e44 100644 --- a/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20Factory.json +++ b/packages/contracts-bedrock/snapshots/abi/OptimismSuperchainERC20Factory.json @@ -26,7 +26,7 @@ "outputs": [ { "internalType": "address", - "name": "_superchainERC20", + "name": "superchainERC20_", "type": "address" } ], diff --git a/packages/contracts-bedrock/snapshots/abi/StorageSetter.json b/packages/contracts-bedrock/snapshots/abi/StorageSetter.json index 22896246e141..b64f62b3504e 100644 --- a/packages/contracts-bedrock/snapshots/abi/StorageSetter.json +++ b/packages/contracts-bedrock/snapshots/abi/StorageSetter.json @@ -127,7 +127,7 @@ } ], "internalType": "struct StorageSetter.Slot[]", - "name": "slots", + "name": "_slots", "type": "tuple[]" } ], diff --git a/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol b/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol index 1bb0d1bf6680..2a725fc4f200 100644 --- a/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol +++ b/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol @@ -94,8 +94,8 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { event BalanceChanged(address account, uint256 balance); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.1 - string public constant version = "1.0.1-beta.1"; + /// @custom:semver 1.0.1-beta.2 + string public constant version = "1.0.1-beta.2"; /// @notice The fixed cost of resolving a challenge. /// @dev The value is estimated by measuring the cost of resolving with `bytes(0)` @@ -210,48 +210,48 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { } /// @notice Checks if the current block is within the challenge window for a given challenged block number. - /// @param challengedBlockNumber The block number at which the commitment was made. + /// @param _challengedBlockNumber The block number at which the commitment was made. /// @return True if the current block is within the challenge window, false otherwise. - function _isInChallengeWindow(uint256 challengedBlockNumber) internal view returns (bool) { - return (block.number >= challengedBlockNumber && block.number <= challengedBlockNumber + challengeWindow); + function _isInChallengeWindow(uint256 _challengedBlockNumber) internal view returns (bool) { + return (block.number >= _challengedBlockNumber && block.number <= _challengedBlockNumber + challengeWindow); } /// @notice Checks if the current block is within the resolve window for a given challenge start block number. - /// @param challengeStartBlockNumber The block number at which the challenge was initiated. + /// @param _challengeStartBlockNumber The block number at which the challenge was initiated. /// @return True if the current block is within the resolve window, false otherwise. - function _isInResolveWindow(uint256 challengeStartBlockNumber) internal view returns (bool) { - return block.number <= challengeStartBlockNumber + resolveWindow; + function _isInResolveWindow(uint256 _challengeStartBlockNumber) internal view returns (bool) { + return block.number <= _challengeStartBlockNumber + resolveWindow; } /// @notice Returns a challenge for the given block number and commitment. /// @dev Unlike with a public `challenges` mapping, we can return a Challenge struct instead of tuple. - /// @param challengedBlockNumber The block number at which the commitment was made. - /// @param challengedCommitment The commitment that is being challenged. + /// @param _challengedBlockNumber The block number at which the commitment was made. + /// @param _challengedCommitment The commitment that is being challenged. /// @return The challenge struct. function getChallenge( - uint256 challengedBlockNumber, - bytes calldata challengedCommitment + uint256 _challengedBlockNumber, + bytes calldata _challengedCommitment ) public view returns (Challenge memory) { - return challenges[challengedBlockNumber][challengedCommitment]; + return challenges[_challengedBlockNumber][_challengedCommitment]; } /// @notice Returns the status of a challenge for a given challenged block number and challenged commitment. - /// @param challengedBlockNumber The block number at which the commitment was made. - /// @param challengedCommitment The commitment that is being challenged. + /// @param _challengedBlockNumber The block number at which the commitment was made. + /// @param _challengedCommitment The commitment that is being challenged. /// @return The status of the challenge. function getChallengeStatus( - uint256 challengedBlockNumber, - bytes calldata challengedCommitment + uint256 _challengedBlockNumber, + bytes calldata _challengedCommitment ) public view returns (ChallengeStatus) { - Challenge memory _challenge = challenges[challengedBlockNumber][challengedCommitment]; + Challenge memory _challenge = challenges[_challengedBlockNumber][_challengedCommitment]; // if the address is 0, the challenge is uninitialized if (_challenge.challenger == address(0)) return ChallengeStatus.Uninitialized; @@ -267,22 +267,22 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { /// @notice Extract the commitment type from a given commitment. /// @dev The commitment type is located in the first byte of the commitment. - /// @param commitment The commitment from which to extract the commitment type. + /// @param _commitment The commitment from which to extract the commitment type. /// @return The commitment type of the given commitment. - function _getCommitmentType(bytes calldata commitment) internal pure returns (uint8) { - return uint8(bytes1(commitment)); + function _getCommitmentType(bytes calldata _commitment) internal pure returns (uint8) { + return uint8(bytes1(_commitment)); } /// @notice Validate that a given commitment has a known type and the expected length for this type. /// @dev The type of a commitment is stored in its first byte. /// The function reverts with `UnknownCommitmentType` if the type is not known and /// with `InvalidCommitmentLength` if the commitment has an unexpected length. - /// @param commitment The commitment for which to check the type. - function validateCommitment(bytes calldata commitment) public pure { - uint8 commitmentType = _getCommitmentType(commitment); + /// @param _commitment The commitment for which to check the type. + function validateCommitment(bytes calldata _commitment) public pure { + uint8 commitmentType = _getCommitmentType(_commitment); if (commitmentType == uint8(CommitmentType.Keccak256)) { - if (commitment.length != 33) { - revert InvalidCommitmentLength(uint8(CommitmentType.Keccak256), 33, commitment.length); + if (_commitment.length != 33) { + revert InvalidCommitmentLength(uint8(CommitmentType.Keccak256), 33, _commitment.length); } return; } @@ -295,11 +295,11 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { /// since the contract cannot access the block number of the commitment. /// The function reverts if the commitment type (first byte) is unknown, /// if the caller does not have a bond or if the challenge already exists. - /// @param challengedBlockNumber The block number at which the commitment was made. - /// @param challengedCommitment The commitment that is being challenged. - function challenge(uint256 challengedBlockNumber, bytes calldata challengedCommitment) external payable { + /// @param _challengedBlockNumber The block number at which the commitment was made. + /// @param _challengedCommitment The commitment that is being challenged. + function challenge(uint256 _challengedBlockNumber, bytes calldata _challengedCommitment) external payable { // require the commitment type to be known - validateCommitment(challengedCommitment); + validateCommitment(_challengedCommitment); // deposit value sent with the transaction as bond deposit(); @@ -310,12 +310,12 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { } // require the challenge status to be uninitialized - if (getChallengeStatus(challengedBlockNumber, challengedCommitment) != ChallengeStatus.Uninitialized) { + if (getChallengeStatus(_challengedBlockNumber, _challengedCommitment) != ChallengeStatus.Uninitialized) { revert ChallengeExists(); } // require the current block to be in the challenge window - if (!_isInChallengeWindow(challengedBlockNumber)) { + if (!_isInChallengeWindow(_challengedBlockNumber)) { revert ChallengeWindowNotOpen(); } @@ -323,11 +323,11 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { balances[msg.sender] -= bondSize; // store the challenger's address, bond size, and start block of the challenge - challenges[challengedBlockNumber][challengedCommitment] = + challenges[_challengedBlockNumber][_challengedCommitment] = Challenge({ challenger: msg.sender, lockedBond: bondSize, startBlock: block.number, resolvedBlock: 0 }); // emit an event to notify that the challenge status is now active - emit ChallengeStatusChanged(challengedBlockNumber, challengedCommitment, ChallengeStatus.Active); + emit ChallengeStatusChanged(_challengedBlockNumber, _challengedCommitment, ChallengeStatus.Active); } /// @notice Resolve a challenge by providing the data corresponding to the challenged commitment. @@ -335,45 +335,45 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { /// challenged commitment. /// It reverts if the commitment type is unknown, if the data doesn't match the commitment, /// if the challenge is not active or if the resolve window is not open. - /// @param challengedBlockNumber The block number at which the commitment was made. - /// @param challengedCommitment The challenged commitment that is being resolved. - /// @param resolveData The pre-image data corresponding to the challenged commitment. + /// @param _challengedBlockNumber The block number at which the commitment was made. + /// @param _challengedCommitment The challenged commitment that is being resolved. + /// @param _resolveData The pre-image data corresponding to the challenged commitment. function resolve( - uint256 challengedBlockNumber, - bytes calldata challengedCommitment, - bytes calldata resolveData + uint256 _challengedBlockNumber, + bytes calldata _challengedCommitment, + bytes calldata _resolveData ) external { // require the commitment type to be known - validateCommitment(challengedCommitment); + validateCommitment(_challengedCommitment); // require the challenge to be active (started, not resolved, and resolve window still open) - if (getChallengeStatus(challengedBlockNumber, challengedCommitment) != ChallengeStatus.Active) { + if (getChallengeStatus(_challengedBlockNumber, _challengedCommitment) != ChallengeStatus.Active) { revert ChallengeNotActive(); } // compute the commitment corresponding to the given resolveData - uint8 commitmentType = _getCommitmentType(challengedCommitment); + uint8 commitmentType = _getCommitmentType(_challengedCommitment); bytes memory computedCommitment; if (commitmentType == uint8(CommitmentType.Keccak256)) { - computedCommitment = computeCommitmentKeccak256(resolveData); + computedCommitment = computeCommitmentKeccak256(_resolveData); } // require the provided input data to correspond to the challenged commitment - if (keccak256(computedCommitment) != keccak256(challengedCommitment)) { - revert InvalidInputData(computedCommitment, challengedCommitment); + if (keccak256(computedCommitment) != keccak256(_challengedCommitment)) { + revert InvalidInputData(computedCommitment, _challengedCommitment); } // store the block number at which the challenge was resolved - Challenge storage activeChallenge = challenges[challengedBlockNumber][challengedCommitment]; + Challenge storage activeChallenge = challenges[_challengedBlockNumber][_challengedCommitment]; activeChallenge.resolvedBlock = block.number; // emit an event to notify that the challenge status is now resolved - emit ChallengeStatusChanged(challengedBlockNumber, challengedCommitment, ChallengeStatus.Resolved); + emit ChallengeStatusChanged(_challengedBlockNumber, _challengedCommitment, ChallengeStatus.Resolved); // distribute the bond among challenger, resolver and address(0) - _distributeBond(activeChallenge, resolveData.length, msg.sender); + _distributeBond(activeChallenge, _resolveData.length, msg.sender); } /// @notice Distribute the bond of a resolved challenge among the resolver, challenger and address(0). @@ -385,16 +385,22 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { /// pre-image. /// The real resolution cost might vary, because calldata is priced differently for zero and non-zero bytes. /// Computing the exact cost adds too much gas overhead to be worth the tradeoff. - /// @param resolvedChallenge The resolved challenge in storage. - /// @param preImageLength The size of the pre-image used to resolve the challenge. - /// @param resolver The address of the resolver. - function _distributeBond(Challenge storage resolvedChallenge, uint256 preImageLength, address resolver) internal { - uint256 lockedBond = resolvedChallenge.lockedBond; - address challenger = resolvedChallenge.challenger; + /// @param _resolvedChallenge The resolved challenge in storage. + /// @param _preImageLength The size of the pre-image used to resolve the challenge. + /// @param _resolver The address of the resolver. + function _distributeBond( + Challenge storage _resolvedChallenge, + uint256 _preImageLength, + address _resolver + ) + internal + { + uint256 lockedBond = _resolvedChallenge.lockedBond; + address challenger = _resolvedChallenge.challenger; // approximate the cost of resolving a challenge with the provided pre-image size uint256 resolutionCost = ( - fixedResolutionCost + preImageLength * variableResolutionCost / variableResolutionCostPrecision + fixedResolutionCost + _preImageLength * variableResolutionCost / variableResolutionCostPrecision ) * block.basefee; // refund bond exceeding the resolution cost to the challenger @@ -410,31 +416,31 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { resolverRefund = lockedBond; } if (resolverRefund > 0) { - balances[resolver] += resolverRefund; + balances[_resolver] += resolverRefund; lockedBond -= resolverRefund; - emit BalanceChanged(resolver, balances[resolver]); + emit BalanceChanged(_resolver, balances[_resolver]); } // burn the remaining bond if (lockedBond > 0) { payable(address(0)).transfer(lockedBond); } - resolvedChallenge.lockedBond = 0; + _resolvedChallenge.lockedBond = 0; } /// @notice Unlock the bond associated wth an expired challenge. /// @dev The function reverts if the challenge is not expired. /// If the expiration is successful, the challenger's bond is unlocked. - /// @param challengedBlockNumber The block number at which the commitment was made. - /// @param challengedCommitment The commitment that is being challenged. - function unlockBond(uint256 challengedBlockNumber, bytes calldata challengedCommitment) external { + /// @param _challengedBlockNumber The block number at which the commitment was made. + /// @param _challengedCommitment The commitment that is being challenged. + function unlockBond(uint256 _challengedBlockNumber, bytes calldata _challengedCommitment) external { // require the challenge to be active (started, not resolved, and in the resolve window) - if (getChallengeStatus(challengedBlockNumber, challengedCommitment) != ChallengeStatus.Expired) { + if (getChallengeStatus(_challengedBlockNumber, _challengedCommitment) != ChallengeStatus.Expired) { revert ChallengeNotExpired(); } // Unlock the bond associated with the challenge - Challenge storage expiredChallenge = challenges[challengedBlockNumber][challengedCommitment]; + Challenge storage expiredChallenge = challenges[_challengedBlockNumber][_challengedCommitment]; balances[expiredChallenge.challenger] += expiredChallenge.lockedBond; expiredChallenge.lockedBond = 0; @@ -444,8 +450,8 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { } /// @notice Compute the expected commitment for a given blob of data. -/// @param data The blob of data to compute a commitment for. +/// @param _data The blob of data to compute a commitment for. /// @return The commitment for the given blob of data. -function computeCommitmentKeccak256(bytes memory data) pure returns (bytes memory) { - return bytes.concat(bytes1(uint8(CommitmentType.Keccak256)), keccak256(data)); +function computeCommitmentKeccak256(bytes memory _data) pure returns (bytes memory) { + return bytes.concat(bytes1(uint8(CommitmentType.Keccak256)), keccak256(_data)); } diff --git a/packages/contracts-bedrock/src/L1/DelayedVetoable.sol b/packages/contracts-bedrock/src/L1/DelayedVetoable.sol index ad45b4c9b20a..d968af214975 100644 --- a/packages/contracts-bedrock/src/L1/DelayedVetoable.sol +++ b/packages/contracts-bedrock/src/L1/DelayedVetoable.sol @@ -69,21 +69,21 @@ contract DelayedVetoable is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.1 - string public constant version = "1.0.1-beta.1"; + /// @custom:semver 1.0.1-beta.2 + string public constant version = "1.0.1-beta.2"; /// @notice Sets the target admin during contract deployment. - /// @param vetoer_ Address of the vetoer. - /// @param initiator_ Address of the initiator. - /// @param target_ Address of the target. - /// @param operatingDelay_ Time to delay when the system is operational. - constructor(address vetoer_, address initiator_, address target_, uint256 operatingDelay_) { + /// @param _vetoer Address of the vetoer. + /// @param _initiator Address of the initiator. + /// @param _target Address of the target. + /// @param _operatingDelay Time to delay when the system is operational. + constructor(address _vetoer, address _initiator, address _target, uint256 _operatingDelay) { // Note that the _delay value is not set here. Having an initial delay of 0 is helpful // during the deployment of a new system. - VETOER = vetoer_; - INITIATOR = initiator_; - TARGET = target_; - OPERATING_DELAY = operatingDelay_; + VETOER = _vetoer; + INITIATOR = _initiator; + TARGET = _target; + OPERATING_DELAY = _operatingDelay; } /// @notice Gets the initiator @@ -111,10 +111,10 @@ contract DelayedVetoable is ISemver { } /// @notice Gets entries in the _queuedAt mapping. - /// @param callHash The hash of the call data. + /// @param _callHash The hash of the call data. /// @return queuedAt_ The time the callHash was recorded. - function queuedAt(bytes32 callHash) external readOrHandle returns (uint256 queuedAt_) { - queuedAt_ = _queuedAt[callHash]; + function queuedAt(bytes32 _callHash) external readOrHandle returns (uint256 queuedAt_) { + queuedAt_ = _queuedAt[_callHash]; } /// @notice Used for all calls that pass data to the contract. @@ -176,9 +176,9 @@ contract DelayedVetoable is ISemver { } /// @notice Forwards the call to the target and halts the call frame. - function _forwardAndHalt(bytes32 callHash) internal { + function _forwardAndHalt(bytes32 _callHash) internal { // Forward the call - emit Forwarded(callHash, msg.data); + emit Forwarded(_callHash, msg.data); (bool success, bytes memory returndata) = TARGET.call(msg.data); if (success == true) { assembly { diff --git a/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol index 8df4d9bfe765..27be4a7332fa 100644 --- a/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol @@ -30,8 +30,8 @@ contract L1CrossDomainMessenger is CrossDomainMessenger, ISemver { ISystemConfig public systemConfig; /// @notice Semantic version. - /// @custom:semver 2.4.1-beta.1 - string public constant version = "2.4.1-beta.1"; + /// @custom:semver 2.4.1-beta.2 + string public constant version = "2.4.1-beta.2"; /// @notice Constructs the L1CrossDomainMessenger contract. constructor() CrossDomainMessenger() { @@ -61,8 +61,8 @@ contract L1CrossDomainMessenger is CrossDomainMessenger, ISemver { } /// @inheritdoc CrossDomainMessenger - function gasPayingToken() internal view override returns (address _addr, uint8 _decimals) { - (_addr, _decimals) = systemConfig.gasPayingToken(); + function gasPayingToken() internal view override returns (address addr_, uint8 decimals_) { + (addr_, decimals_) = systemConfig.gasPayingToken(); } /// @notice Getter function for the OptimismPortal contract on this chain. diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index d05ba7c8821a..93b9b71b00c3 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -124,8 +124,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.7 - string public constant version = "1.0.0-beta.7"; + /// @custom:semver 1.0.0-beta.8 + string public constant version = "1.0.0-beta.8"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -432,7 +432,7 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Helper method for encoding the SystemConfig initializer data. function encodeSystemConfigInitializer( - bytes4 selector, + bytes4 _selector, DeployInput memory _input, DeployOutput memory _output ) @@ -442,10 +442,10 @@ contract OPContractsManager is ISemver, Initializable { returns (bytes memory) { (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) = - defaultSystemConfigParams(selector, _input, _output); + defaultSystemConfigParams(_selector, _input, _output); return abi.encodeWithSelector( - selector, + _selector, _input.roles.systemConfigOwner, _input.basefeeScalar, _input.blobBasefeeScalar, diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index 90fa13e7455c..ae7ac71c2ae9 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -20,7 +20,7 @@ contract OPContractsManagerInterop is OPContractsManager { // The `SystemConfigInterop` contract has an extra `address _dependencyManager` argument // that we must account for. function encodeSystemConfigInitializer( - bytes4 selector, + bytes4 _selector, DeployInput memory _input, DeployOutput memory _output ) @@ -31,7 +31,7 @@ contract OPContractsManagerInterop is OPContractsManager { returns (bytes memory) { (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) = - defaultSystemConfigParams(selector, _input, _output); + defaultSystemConfigParams(_selector, _input, _output); // TODO For now we assume that the dependency manager is the same as the proxy admin owner. // This is currently undefined since it's not part of the standard config, so we may need @@ -41,7 +41,7 @@ contract OPContractsManagerInterop is OPContractsManager { address dependencyManager = address(_input.roles.opChainProxyAdminOwner); return abi.encodeWithSelector( - selector, + _selector, _input.roles.systemConfigOwner, _input.basefeeScalar, _input.blobBasefeeScalar, diff --git a/packages/contracts-bedrock/src/L1/interfaces/IDataAvailabilityChallenge.sol b/packages/contracts-bedrock/src/L1/interfaces/IDataAvailabilityChallenge.sol index db3c1680e6c5..2f940e92f76f 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IDataAvailabilityChallenge.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IDataAvailabilityChallenge.sol @@ -44,20 +44,20 @@ interface IDataAvailabilityChallenge { function balances(address) external view returns (uint256); function bondSize() external view returns (uint256); - function challenge(uint256 challengedBlockNumber, bytes memory challengedCommitment) external payable; + function challenge(uint256 _challengedBlockNumber, bytes memory _challengedCommitment) external payable; function challengeWindow() external view returns (uint256); function deposit() external payable; function fixedResolutionCost() external view returns (uint256); function getChallenge( - uint256 challengedBlockNumber, - bytes memory challengedCommitment + uint256 _challengedBlockNumber, + bytes memory _challengedCommitment ) external view returns (Challenge memory); function getChallengeStatus( - uint256 challengedBlockNumber, - bytes memory challengedCommitment + uint256 _challengedBlockNumber, + bytes memory _challengedCommitment ) external view @@ -73,18 +73,18 @@ interface IDataAvailabilityChallenge { function owner() external view returns (address); function renounceOwnership() external; function resolve( - uint256 challengedBlockNumber, - bytes memory challengedCommitment, - bytes memory resolveData + uint256 _challengedBlockNumber, + bytes memory _challengedCommitment, + bytes memory _resolveData ) external; function resolveWindow() external view returns (uint256); function resolverRefundPercentage() external view returns (uint256); function setBondSize(uint256 _bondSize) external; function setResolverRefundPercentage(uint256 _resolverRefundPercentage) external; - function transferOwnership(address newOwner) external; - function unlockBond(uint256 challengedBlockNumber, bytes memory challengedCommitment) external; - function validateCommitment(bytes memory commitment) external pure; + function transferOwnership(address newOwner) external; // nosemgrep + function unlockBond(uint256 _challengedBlockNumber, bytes memory _challengedCommitment) external; + function validateCommitment(bytes memory _commitment) external pure; function variableResolutionCost() external view returns (uint256); function variableResolutionCostPrecision() external view returns (uint256); function version() external view returns (string memory); diff --git a/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol b/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol index 0bea81fed34b..53fd16812763 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol @@ -14,10 +14,10 @@ interface IDelayedVetoable { function delay() external returns (uint256 delay_); function initiator() external returns (address initiator_); - function queuedAt(bytes32 callHash) external returns (uint256 queuedAt_); + function queuedAt(bytes32 _callHash) external returns (uint256 queuedAt_); function target() external returns (address target_); function version() external view returns (string memory); function vetoer() external returns (address vetoer_); - function __constructor__(address vetoer_, address initiator_, address target_, uint256 operatingDelay_) external; + function __constructor__(address _vetoer, address _initiator, address _target, uint256 _operatingDelay) external; } diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol index e80bad00b910..4155c62352e9 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol @@ -65,7 +65,7 @@ interface IOptimismPortal { function l2Oracle() external view returns (IL2OutputOracle); function l2Sender() external view returns (address); function minimumGasLimit(uint64 _byteCount) external pure returns (uint64); - function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); + function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); // nosemgrep function paused() external view returns (bool paused_); function proveWithdrawalTransaction( Types.WithdrawalTransaction memory _tx, @@ -77,7 +77,7 @@ interface IOptimismPortal { function provenWithdrawals(bytes32) external view - returns (bytes32 outputRoot, uint128 timestamp, uint128 l2OutputIndex); + returns (bytes32 outputRoot, uint128 timestamp, uint128 l2OutputIndex); // nosemgrep function setGasPayingToken(address _token, uint8 _decimals, bytes32 _name, bytes32 _symbol) external; function superchainConfig() external view returns (ISuperchainConfig); function systemConfig() external view returns (ISystemConfig); diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol index 551bd2832b05..91f09d714314 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal2.sol @@ -88,7 +88,7 @@ interface IOptimismPortal2 { function l2Sender() external view returns (address); function minimumGasLimit(uint64 _byteCount) external pure returns (uint64); function numProofSubmitters(bytes32 _withdrawalHash) external view returns (uint256); - function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); + function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); // nosemgrep function paused() external view returns (bool); function proofMaturityDelaySeconds() external view returns (uint256); function proofSubmitters(bytes32, uint256) external view returns (address); @@ -105,7 +105,7 @@ interface IOptimismPortal2 { ) external view - returns (IDisputeGame disputeGameProxy, uint64 timestamp); + returns (IDisputeGame disputeGameProxy, uint64 timestamp); // nosemgrep function respectedGameType() external view returns (GameType); function respectedGameTypeUpdatedAt() external view returns (uint64); function setGasPayingToken(address _token, uint8 _decimals, bytes32 _name, bytes32 _symbol) external; diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol index 682518897362..fc2d7528f802 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol @@ -89,7 +89,7 @@ interface IOptimismPortalInterop { function l2Sender() external view returns (address); function minimumGasLimit(uint64 _byteCount) external pure returns (uint64); function numProofSubmitters(bytes32 _withdrawalHash) external view returns (uint256); - function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); + function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); // nosemgrep function paused() external view returns (bool); function proofMaturityDelaySeconds() external view returns (uint256); function proofSubmitters(bytes32, uint256) external view returns (address); @@ -106,7 +106,7 @@ interface IOptimismPortalInterop { ) external view - returns (IDisputeGame disputeGameProxy, uint64 timestamp); + returns (IDisputeGame disputeGameProxy, uint64 timestamp); // nosemgrep function respectedGameType() external view returns (GameType); function respectedGameTypeUpdatedAt() external view returns (uint64); function setConfig(ConfigType _type, bytes memory _value) external; diff --git a/packages/contracts-bedrock/src/L1/interfaces/IProtocolVersions.sol b/packages/contracts-bedrock/src/L1/interfaces/IProtocolVersions.sol index aa2de51d4846..9b4aef16956f 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IProtocolVersions.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IProtocolVersions.sol @@ -23,7 +23,7 @@ interface IProtocolVersions { function required() external view returns (ProtocolVersion out_); function setRecommended(ProtocolVersion _recommended) external; function setRequired(ProtocolVersion _required) external; - function transferOwnership(address newOwner) external; + function transferOwnership(address newOwner) external; // nosemgrep function version() external view returns (string memory); function __constructor__() external; diff --git a/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol b/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol index 4a4ccc133bb8..21672340fd4f 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol @@ -21,5 +21,5 @@ interface IResourceMetering { event Initialized(uint8 version); - function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); + function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); // nosemgrep } diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol index 59ae98668cf0..37ab1512a031 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol @@ -75,7 +75,7 @@ interface ISystemConfig { function setGasLimit(uint64 _gasLimit) external; function setUnsafeBlockSigner(address _unsafeBlockSigner) external; function startBlock() external view returns (uint256 startBlock_); - function transferOwnership(address newOwner) external; + function transferOwnership(address newOwner) external; // nosemgrep function unsafeBlockSigner() external view returns (address addr_); function version() external pure returns (string memory); diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol index cffa30dd3efc..346220e60b95 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol @@ -59,7 +59,7 @@ interface ISystemConfigInterop { function setGasLimit(uint64 _gasLimit) external; function setUnsafeBlockSigner(address _unsafeBlockSigner) external; function startBlock() external view returns (uint256 startBlock_); - function transferOwnership(address newOwner) external; + function transferOwnership(address newOwner) external; // nosemgrep function unsafeBlockSigner() external view returns (address addr_); function addDependency(uint256 _chainId) external; diff --git a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol index 4570e8191ca6..9f85a0fe1b89 100644 --- a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol @@ -57,8 +57,8 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra uint16 public constant messageVersion = uint16(0); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.4 - string public constant version = "1.0.0-beta.4"; + /// @custom:semver 1.0.0-beta.5 + string public constant version = "1.0.0-beta.5"; /// @notice Mapping of message hashes to boolean receipt values. Note that a message will only be present in this /// mapping if it has successfully been relayed on this chain, and can therefore not be relayed again. @@ -78,18 +78,18 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra event FailedRelayedMessage(bytes32 indexed messageHash); /// @notice Retrieves the sender of the current cross domain message. If not entered, reverts. - /// @return _sender Address of the sender of the current cross domain message. - function crossDomainMessageSender() external view onlyEntered returns (address _sender) { + /// @return sender_ Address of the sender of the current cross domain message. + function crossDomainMessageSender() external view onlyEntered returns (address sender_) { assembly { - _sender := tload(CROSS_DOMAIN_MESSAGE_SENDER_SLOT) + sender_ := tload(CROSS_DOMAIN_MESSAGE_SENDER_SLOT) } } /// @notice Retrieves the source of the current cross domain message. If not entered, reverts. - /// @return _source Chain ID of the source of the current cross domain message. - function crossDomainMessageSource() external view onlyEntered returns (uint256 _source) { + /// @return source_ Chain ID of the source of the current cross domain message. + function crossDomainMessageSource() external view onlyEntered returns (uint256 source_) { assembly { - _source := tload(CROSS_DOMAIN_MESSAGE_SOURCE_SLOT) + source_ := tload(CROSS_DOMAIN_MESSAGE_SOURCE_SLOT) } } diff --git a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol index 81cef632bfbe..c59eb7d04d78 100644 --- a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol +++ b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol @@ -50,9 +50,9 @@ contract OptimismSuperchainERC20 is } /// @notice Returns the storage for the OptimismSuperchainERC20Metadata. - function _getStorage() private pure returns (OptimismSuperchainERC20Metadata storage _storage) { + function _getStorage() private pure returns (OptimismSuperchainERC20Metadata storage storage_) { assembly { - _storage.slot := OPTIMISM_SUPERCHAIN_ERC20_METADATA_SLOT + storage_.slot := OPTIMISM_SUPERCHAIN_ERC20_METADATA_SLOT } } @@ -63,8 +63,8 @@ contract OptimismSuperchainERC20 is } /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.2 - string public constant version = "1.0.0-beta.2"; + /// @custom:semver 1.0.0-beta.3 + string public constant version = "1.0.0-beta.3"; /// @notice Constructs the OptimismSuperchainERC20 contract. constructor() { diff --git a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol index 426510159720..ffeb132e6cdf 100644 --- a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol +++ b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20Factory.sol @@ -27,15 +27,15 @@ contract OptimismSuperchainERC20Factory is IOptimismERC20Factory, ISemver { ); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.1 - string public constant version = "1.0.0-beta.1"; + /// @custom:semver 1.0.0-beta.2 + string public constant version = "1.0.0-beta.2"; /// @notice Deploys a OptimismSuperchainERC20 Beacon Proxy using CREATE3. /// @param _remoteToken Address of the remote token. /// @param _name Name of the OptimismSuperchainERC20. /// @param _symbol Symbol of the OptimismSuperchainERC20. /// @param _decimals Decimals of the OptimismSuperchainERC20. - /// @return _superchainERC20 Address of the OptimismSuperchainERC20 deployment. + /// @return superchainERC20_ Address of the OptimismSuperchainERC20 deployment. function deploy( address _remoteToken, string memory _name, @@ -43,7 +43,7 @@ contract OptimismSuperchainERC20Factory is IOptimismERC20Factory, ISemver { uint8 _decimals ) external - returns (address _superchainERC20) + returns (address superchainERC20_) { bytes memory initCallData = abi.encodeCall(OptimismSuperchainERC20.initialize, (_remoteToken, _name, _symbol, _decimals)); @@ -53,10 +53,10 @@ contract OptimismSuperchainERC20Factory is IOptimismERC20Factory, ISemver { ); bytes32 salt = keccak256(abi.encode(_remoteToken, _name, _symbol, _decimals)); - _superchainERC20 = CREATE3.deploy({ salt: salt, creationCode: creationCode, value: 0 }); + superchainERC20_ = CREATE3.deploy({ salt: salt, creationCode: creationCode, value: 0 }); - deployments[_superchainERC20] = _remoteToken; + deployments[superchainERC20_] = _remoteToken; - emit OptimismSuperchainERC20Created(_superchainERC20, _remoteToken, msg.sender); + emit OptimismSuperchainERC20Created(superchainERC20_, _remoteToken, msg.sender); } } diff --git a/packages/contracts-bedrock/src/L2/interfaces/ICrossL2Inbox.sol b/packages/contracts-bedrock/src/L2/interfaces/ICrossL2Inbox.sol index 3d8fa8a471a8..3267122fc0b1 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ICrossL2Inbox.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/ICrossL2Inbox.sol @@ -18,24 +18,24 @@ interface ICrossL2Inbox { function interopStart() external view returns (uint256 interopStart_); /// @notice Returns the origin address of the Identifier. - /// @return _origin The origin address of the Identifier. - function origin() external view returns (address _origin); + /// @return origin_ The origin address of the Identifier. + function origin() external view returns (address origin_); /// @notice Returns the block number of the Identifier. - /// @return _blockNumber The block number of the Identifier. - function blockNumber() external view returns (uint256 _blockNumber); + /// @return blockNumber_ The block number of the Identifier. + function blockNumber() external view returns (uint256 blockNumber_); /// @notice Returns the log index of the Identifier. - /// @return _logIndex The log index of the Identifier. - function logIndex() external view returns (uint256 _logIndex); + /// @return logIndex_ The log index of the Identifier. + function logIndex() external view returns (uint256 logIndex_); /// @notice Returns the timestamp of the Identifier. - /// @return _timestamp The timestamp of the Identifier. - function timestamp() external view returns (uint256 _timestamp); + /// @return timestamp_ The timestamp of the Identifier. + function timestamp() external view returns (uint256 timestamp_); /// @notice Returns the chain ID of the Identifier. - /// @return _chainId The chain ID of the Identifier. - function chainId() external view returns (uint256 _chainId); + /// @return chainId_ The chain ID of the Identifier. + function chainId() external view returns (uint256 chainId_); /// @notice Executes a cross chain message on the destination chain. /// @param _id An Identifier pointing to the initiating message. diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol index cb8d1952de65..0794fb11ec35 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol @@ -18,12 +18,12 @@ interface IL2ToL2CrossDomainMessenger { function messageNonce() external view returns (uint256); /// @notice Retrieves the sender of the current cross domain message. - /// @return _sender Address of the sender of the current cross domain message. - function crossDomainMessageSender() external view returns (address _sender); + /// @return sender_ Address of the sender of the current cross domain message. + function crossDomainMessageSender() external view returns (address sender_); /// @notice Retrieves the source of the current cross domain message. - /// @return _source Chain ID of the source of the current cross domain message. - function crossDomainMessageSource() external view returns (uint256 _source); + /// @return source_ Chain ID of the source of the current cross domain message. + function crossDomainMessageSource() external view returns (uint256 source_); /// @notice Sends a message to some target address on a destination chain. Note that if the call /// always reverts, then the message will be unrelayable, and any ETH sent will be diff --git a/packages/contracts-bedrock/src/L2/interfaces/IOptimismERC20Factory.sol b/packages/contracts-bedrock/src/L2/interfaces/IOptimismERC20Factory.sol index 5e0040aa83cf..3ca6357b402d 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IOptimismERC20Factory.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IOptimismERC20Factory.sol @@ -7,6 +7,6 @@ pragma solidity ^0.8.0; interface IOptimismERC20Factory { /// @notice Checks if a ERC20 token is deployed by the factory. /// @param _localToken The address of the ERC20 token to check the deployment. - /// @return _remoteToken The address of the remote token if it is deployed or `address(0)` if not. - function deployments(address _localToken) external view returns (address _remoteToken); + /// @return remoteToken_ The address of the remote token if it is deployed or `address(0)` if not. + function deployments(address _localToken) external view returns (address remoteToken_); } diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index 487ea0aac6df..fb8409f6b41c 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -57,8 +57,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.11 - string public constant version = "1.0.0-beta.11"; + /// @custom:semver 1.0.0-beta.12 + string public constant version = "1.0.0-beta.12"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -595,11 +595,11 @@ contract MIPS2 is ISemver { ) internal view - returns (uint32 v0, uint32 v1) + returns (uint32 v0_, uint32 v1_) { bool memUpdated; uint32 memAddr; - (v0, v1, _state.preimageOffset, _state.memRoot, memUpdated, memAddr) = sys.handleSysRead(_args); + (v0_, v1_, _state.preimageOffset, _state.memRoot, memUpdated, memAddr) = sys.handleSysRead(_args); if (memUpdated) { handleMemoryUpdate(_state, memAddr); } @@ -717,7 +717,7 @@ contract MIPS2 is ISemver { ) internal pure - returns (bool _changedDirections) + returns (bool changedDirections_) { // pop thread from the current stack and push to the other stack if (_state.traverseRight) { @@ -732,7 +732,7 @@ contract MIPS2 is ISemver { bytes32 current = _state.traverseRight ? _state.rightThreadStack : _state.leftThreadStack; if (current == EMPTY_THREAD_ROOT) { _state.traverseRight = !_state.traverseRight; - _changedDirections = true; + changedDirections_ = true; } _state.stepsSinceLastContextSwitch = 0; } @@ -768,10 +768,10 @@ contract MIPS2 is ISemver { return inactiveStack == EMPTY_THREAD_ROOT && currentStackIsAlmostEmpty; } - function computeThreadRoot(bytes32 _currentRoot, ThreadState memory _thread) internal pure returns (bytes32 _out) { + function computeThreadRoot(bytes32 _currentRoot, ThreadState memory _thread) internal pure returns (bytes32 out_) { // w_i = hash(w_0 ++ hash(thread)) bytes32 threadRoot = outputThreadState(_thread); - _out = keccak256(abi.encodePacked(_currentRoot, threadRoot)); + out_ = keccak256(abi.encodePacked(_currentRoot, threadRoot)); } function outputThreadState(ThreadState memory _thread) internal pure returns (bytes32 out_) { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol index 2f76a2e0dda5..f9631e29e082 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSState.sol @@ -11,8 +11,8 @@ library MIPSState { uint32 hi; } - function assertExitedIsValid(uint32 exited) internal pure { - if (exited > 1) { + function assertExitedIsValid(uint32 _exited) internal pure { + if (_exited > 1) { revert InvalidExitedValue(); } } diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol b/packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol index 28c544c0d408..4de2bb1deab6 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IAnchorStateRegistry.sol @@ -18,7 +18,7 @@ interface IAnchorStateRegistry { event Initialized(uint8 version); - function anchors(GameType) external view returns (Hash root, uint256 l2BlockNumber); + function anchors(GameType) external view returns (Hash root, uint256 l2BlockNumber); // nosemgrep function disputeGameFactory() external view returns (IDisputeGameFactory); function initialize( StartingAnchorRoot[] memory _startingAnchorRoots, diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol b/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol index 7a7b36052f3d..ef60aaa90298 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol @@ -21,7 +21,7 @@ interface IDelayedWETH is IWETH { function initialize(address _owner, ISuperchainConfig _config) external; function owner() external view returns (address); function recover(uint256 _wad) external; - function transferOwnership(address newOwner) external; + function transferOwnership(address newOwner) external; // nosemgrep function renounceOwnership() external; function unlock(address _guy, uint256 _wad) external; function withdraw(address _guy, uint256 _wad) external; diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol b/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol index 1e70cbbb05bf..0f21d42aa27a 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IDisputeGameFactory.sol @@ -67,7 +67,7 @@ interface IDisputeGameFactory { function renounceOwnership() external; function setImplementation(GameType _gameType, IDisputeGame _impl) external; function setInitBond(GameType _gameType, uint256 _initBond) external; - function transferOwnership(address newOwner) external; + function transferOwnership(address newOwner) external; // nosemgrep function version() external view returns (string memory); function __constructor__() external; diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol index 379c4fcb6a48..ec0f86ff709c 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol @@ -72,7 +72,7 @@ interface IFaultDisputeGame is IDisputeGame { function claimCredit(address _recipient) external; function claimData(uint256) external - view + view // nosemgrep returns ( uint32 parentIndex, address counteredBy, @@ -100,12 +100,12 @@ interface IFaultDisputeGame is IDisputeGame { function resolutionCheckpoints(uint256) external view - returns (bool initialCheckpointComplete, uint32 subgameIndex, Position leftmostPosition, address counteredBy); + returns (bool initialCheckpointComplete, uint32 subgameIndex, Position leftmostPosition, address counteredBy); // nosemgrep function resolveClaim(uint256 _claimIndex, uint256 _numToResolve) external; function resolvedSubgames(uint256) external view returns (bool); function splitDepth() external view returns (uint256 splitDepth_); function startingBlockNumber() external view returns (uint256 startingBlockNumber_); - function startingOutputRoot() external view returns (Hash root, uint256 l2BlockNumber); + function startingOutputRoot() external view returns (Hash root, uint256 l2BlockNumber); // nosemgrep function startingRootHash() external view returns (Hash startingRootHash_); function step(uint256 _claimIndex, bool _isAttack, bytes memory _stateData, bytes memory _proof) external; function subgames(uint256, uint256) external view returns (uint256); diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol index 5fda4e9163b2..980d3460c048 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol @@ -73,7 +73,7 @@ interface IPermissionedDisputeGame is IDisputeGame { function claimCredit(address _recipient) external; function claimData(uint256) external - view + view // nosemgrep returns ( uint32 parentIndex, address counteredBy, @@ -101,12 +101,12 @@ interface IPermissionedDisputeGame is IDisputeGame { function resolutionCheckpoints(uint256) external view - returns (bool initialCheckpointComplete, uint32 subgameIndex, Position leftmostPosition, address counteredBy); + returns (bool initialCheckpointComplete, uint32 subgameIndex, Position leftmostPosition, address counteredBy); // nosemgrep function resolveClaim(uint256 _claimIndex, uint256 _numToResolve) external; function resolvedSubgames(uint256) external view returns (bool); function splitDepth() external view returns (uint256 splitDepth_); function startingBlockNumber() external view returns (uint256 startingBlockNumber_); - function startingOutputRoot() external view returns (Hash root, uint256 l2BlockNumber); + function startingOutputRoot() external view returns (Hash root, uint256 l2BlockNumber); // nosemgrep function startingRootHash() external view returns (Hash startingRootHash_); function step(uint256 _claimIndex, bool _isAttack, bytes memory _stateData, bytes memory _proof) external; function subgames(uint256, uint256) external view returns (uint256); diff --git a/packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol b/packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol index e769f3042e53..68399f3336c9 100644 --- a/packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol +++ b/packages/contracts-bedrock/src/governance/interfaces/IMintManager.sol @@ -14,7 +14,7 @@ interface IMintManager { function mintPermittedAfter() external view returns (uint256); function owner() external view returns (address); function renounceOwnership() external; - function transferOwnership(address newOwner) external; + function transferOwnership(address newOwner) external; // nosemgrep function upgrade(address _newMintManager) external; function __constructor__(address _upgrader, address _governanceToken) external; diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index 7ab1a285841f..6c3b9a29aaa9 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -135,25 +135,25 @@ library Encoding { } /// @notice Returns an appropriately encoded call to L1Block.setL1BlockValuesEcotone - /// @param baseFeeScalar L1 base fee Scalar - /// @param blobBaseFeeScalar L1 blob base fee Scalar - /// @param sequenceNumber Number of L2 blocks since epoch start. - /// @param timestamp L1 timestamp. - /// @param number L1 blocknumber. - /// @param baseFee L1 base fee. - /// @param blobBaseFee L1 blob base fee. - /// @param hash L1 blockhash. - /// @param batcherHash Versioned hash to authenticate batcher by. + /// @param _baseFeeScalar L1 base fee Scalar + /// @param _blobBaseFeeScalar L1 blob base fee Scalar + /// @param _sequenceNumber Number of L2 blocks since epoch start. + /// @param _timestamp L1 timestamp. + /// @param _number L1 blocknumber. + /// @param _baseFee L1 base fee. + /// @param _blobBaseFee L1 blob base fee. + /// @param _hash L1 blockhash. + /// @param _batcherHash Versioned hash to authenticate batcher by. function encodeSetL1BlockValuesEcotone( - uint32 baseFeeScalar, - uint32 blobBaseFeeScalar, - uint64 sequenceNumber, - uint64 timestamp, - uint64 number, - uint256 baseFee, - uint256 blobBaseFee, - bytes32 hash, - bytes32 batcherHash + uint32 _baseFeeScalar, + uint32 _blobBaseFeeScalar, + uint64 _sequenceNumber, + uint64 _timestamp, + uint64 _number, + uint256 _baseFee, + uint256 _blobBaseFee, + bytes32 _hash, + bytes32 _batcherHash ) internal pure @@ -162,15 +162,15 @@ library Encoding { bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesEcotone()")); return abi.encodePacked( functionSignature, - baseFeeScalar, - blobBaseFeeScalar, - sequenceNumber, - timestamp, - number, - baseFee, - blobBaseFee, - hash, - batcherHash + _baseFeeScalar, + _blobBaseFeeScalar, + _sequenceNumber, + _timestamp, + _number, + _baseFee, + _blobBaseFee, + _hash, + _batcherHash ); } diff --git a/packages/contracts-bedrock/src/safe/LivenessGuard.sol b/packages/contracts-bedrock/src/safe/LivenessGuard.sol index d4fe5c98c89b..aa9a231a4b25 100644 --- a/packages/contracts-bedrock/src/safe/LivenessGuard.sol +++ b/packages/contracts-bedrock/src/safe/LivenessGuard.sol @@ -25,8 +25,8 @@ contract LivenessGuard is ISemver, BaseGuard { event OwnerRecorded(address owner); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.1 - string public constant version = "1.0.1-beta.1"; + /// @custom:semver 1.0.1-beta.2 + string public constant version = "1.0.1-beta.2"; /// @notice The safe account for which this contract will be the guard. Safe internal immutable SAFE; @@ -66,21 +66,21 @@ contract LivenessGuard is ISemver, BaseGuard { /// @notice Records the most recent time which any owner has signed a transaction. /// @dev Called by the Safe contract before execution of a transaction. function checkTransaction( - address to, - uint256 value, - bytes memory data, - Enum.Operation operation, - uint256 safeTxGas, - uint256 baseGas, - uint256 gasPrice, - address gasToken, - address payable refundReceiver, - bytes memory signatures, - address msgSender + address _to, + uint256 _value, + bytes memory _data, + Enum.Operation _operation, + uint256 _safeTxGas, + uint256 _baseGas, + uint256 _gasPrice, + address _gasToken, + address payable _refundReceiver, + bytes memory _signatures, + address _msgSender ) external { - msgSender; // silence unused variable warning + _msgSender; // silence unused variable warning _requireOnlySafe(); // Cache the set of owners prior to execution. @@ -93,21 +93,21 @@ contract LivenessGuard is ISemver, BaseGuard { // This call will reenter to the Safe which is calling it. This is OK because it is only reading the // nonce, and using the getTransactionHash() method. bytes32 txHash = SAFE.getTransactionHash({ - to: to, - value: value, - data: data, - operation: operation, - safeTxGas: safeTxGas, - baseGas: baseGas, - gasPrice: gasPrice, - gasToken: gasToken, - refundReceiver: refundReceiver, + to: _to, + value: _value, + data: _data, + operation: _operation, + safeTxGas: _safeTxGas, + baseGas: _baseGas, + gasPrice: _gasPrice, + gasToken: _gasToken, + refundReceiver: _refundReceiver, _nonce: SAFE.nonce() - 1 }); uint256 threshold = SAFE.getThreshold(); address[] memory signers = - SafeSigners.getNSigners({ dataHash: txHash, signatures: signatures, requiredSignatures: threshold }); + SafeSigners.getNSigners({ _dataHash: txHash, _signatures: _signatures, _requiredSignatures: threshold }); for (uint256 i = 0; i < signers.length; i++) { lastLive[signers[i]] = block.timestamp; diff --git a/packages/contracts-bedrock/src/safe/SafeSigners.sol b/packages/contracts-bedrock/src/safe/SafeSigners.sol index 18c443582eba..47bfa09e0ed9 100644 --- a/packages/contracts-bedrock/src/safe/SafeSigners.sol +++ b/packages/contracts-bedrock/src/safe/SafeSigners.sol @@ -8,31 +8,31 @@ library SafeSigners { /// @dev Make sure to perform a bounds check for @param pos, to avoid out of bounds access on @param signatures /// The signature format is a compact form of {bytes32 r}{bytes32 s}{uint8 v} /// Compact means uint8 is not padded to 32 bytes. - /// @param pos Which signature to read. + /// @param _pos Which signature to read. /// A prior bounds check of this parameter should be performed, to avoid out of bounds access. - /// @param signatures Concatenated {r, s, v} signatures. - /// @return v Recovery ID or Safe signature type. - /// @return r Output value r of the signature. - /// @return s Output value s of the signature. + /// @param _signatures Concatenated {r, s, v} signatures. + /// @return v_ Recovery ID or Safe signature type. + /// @return r_ Output value r of the signature. + /// @return s_ Output value s of the signature. function signatureSplit( - bytes memory signatures, - uint256 pos + bytes memory _signatures, + uint256 _pos ) internal pure - returns (uint8 v, bytes32 r, bytes32 s) + returns (uint8 v_, bytes32 r_, bytes32 s_) { assembly { - let signaturePos := mul(0x41, pos) - r := mload(add(signatures, add(signaturePos, 0x20))) - s := mload(add(signatures, add(signaturePos, 0x40))) + let signaturePos := mul(0x41, _pos) + r_ := mload(add(_signatures, add(signaturePos, 0x20))) + s_ := mload(add(_signatures, add(signaturePos, 0x40))) /** * Here we are loading the last 32 bytes, including 31 bytes * of 's'. There is no 'mload8' to do this. * 'byte' is not working due to the Solidity parser, so lets * use the second best option, 'and' */ - v := and(mload(add(signatures, add(signaturePos, 0x41))), 0xff) + v_ := and(mload(add(_signatures, add(signaturePos, 0x41))), 0xff) } } @@ -43,23 +43,23 @@ library SafeSigners { /// the signatures. /// This method therefore simply extracts the addresses from the signatures. function getNSigners( - bytes32 dataHash, - bytes memory signatures, - uint256 requiredSignatures + bytes32 _dataHash, + bytes memory _signatures, + uint256 _requiredSignatures ) internal pure - returns (address[] memory _owners) + returns (address[] memory owners_) { - _owners = new address[](requiredSignatures); + owners_ = new address[](_requiredSignatures); address currentOwner; uint8 v; bytes32 r; bytes32 s; uint256 i; - for (i = 0; i < requiredSignatures; i++) { - (v, r, s) = signatureSplit(signatures, i); + for (i = 0; i < _requiredSignatures; i++) { + (v, r, s) = signatureSplit(_signatures, i); if (v == 0) { // If v is 0 then it is a contract signature // When handling contract signatures the address of the contract is encoded into r @@ -73,13 +73,13 @@ library SafeSigners { // To support eth_sign and similar we adjust v and hash the messageHash with the Ethereum message prefix // before applying ecrecover currentOwner = - ecrecover(keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", dataHash)), v - 4, r, s); + ecrecover(keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", _dataHash)), v - 4, r, s); } else { // Default is the ecrecover flow with the provided data hash // Use ecrecover with the messageHash for EOA signatures - currentOwner = ecrecover(dataHash, v, r, s); + currentOwner = ecrecover(_dataHash, v, r, s); } - _owners[i] = currentOwner; + owners_[i] = currentOwner; } } } diff --git a/packages/contracts-bedrock/src/universal/StorageSetter.sol b/packages/contracts-bedrock/src/universal/StorageSetter.sol index b7f7614b4ea0..5bd53a75b366 100644 --- a/packages/contracts-bedrock/src/universal/StorageSetter.sol +++ b/packages/contracts-bedrock/src/universal/StorageSetter.sol @@ -16,8 +16,8 @@ contract StorageSetter is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.1 - string public constant version = "1.2.1-beta.1"; + /// @custom:semver 1.2.1-beta.2 + string public constant version = "1.2.1-beta.2"; /// @notice Stores a bytes32 `_value` at `_slot`. Any storage slots that /// are packed should be set through this interface. @@ -26,10 +26,10 @@ contract StorageSetter is ISemver { } /// @notice Stores a bytes32 value at each key in `_slots`. - function setBytes32(Slot[] calldata slots) public { - uint256 length = slots.length; + function setBytes32(Slot[] calldata _slots) public { + uint256 length = _slots.length; for (uint256 i; i < length; i++) { - Storage.setBytes32(slots[i].key, slots[i].value); + Storage.setBytes32(_slots[i].key, _slots[i].value); } } diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol b/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol index 968ad63a7652..3523d9bf9b49 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol +++ b/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol @@ -8,5 +8,5 @@ interface IOwnable { function owner() external view returns (address); function renounceOwnership() external; - function transferOwnership(address newOwner) external; // nosemgrep: sol-style-input-arg-fmt. + function transferOwnership(address newOwner) external; // nosemgrep } diff --git a/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol b/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol index 8c35ced064d5..4d33a0784972 100644 --- a/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol +++ b/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol @@ -29,10 +29,10 @@ contract DelayedVetoable_Init is Test { delayedVetoable = IDelayedVetoable( address( new DelayedVetoable({ - initiator_: initiator, - vetoer_: vetoer, - target_: address(target), - operatingDelay_: operatingDelay + _initiator: initiator, + _vetoer: vetoer, + _target: address(target), + _operatingDelay: operatingDelay }) ) ); diff --git a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol index 41db45ac974a..f1386a6608a2 100644 --- a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol @@ -71,17 +71,17 @@ contract LivenessGuard_CheckTx_TestFails is LivenessGuard_TestInit { function test_checkTransaction_callerIsNotSafe_revert() external { vm.expectRevert("LivenessGuard: only Safe can call this function"); livenessGuard.checkTransaction({ - to: address(0), - value: 0, - data: hex"00", - operation: Enum.Operation.Call, - safeTxGas: 0, - baseGas: 0, - gasPrice: 0, - gasToken: address(0), - refundReceiver: payable(address(0)), - signatures: hex"00", - msgSender: address(0) + _to: address(0), + _value: 0, + _data: hex"00", + _operation: Enum.Operation.Call, + _safeTxGas: 0, + _baseGas: 0, + _gasPrice: 0, + _gasToken: address(0), + _refundReceiver: payable(address(0)), + _signatures: hex"00", + _msgSender: address(0) }); } } diff --git a/packages/contracts-bedrock/test/safe/SafeSigners.t.sol b/packages/contracts-bedrock/test/safe/SafeSigners.t.sol index a6caf2a487ff..9cfa91869899 100644 --- a/packages/contracts-bedrock/test/safe/SafeSigners.t.sol +++ b/packages/contracts-bedrock/test/safe/SafeSigners.t.sol @@ -98,7 +98,7 @@ contract SafeSigners_Test is Test, SafeTestTools { // Recover the signatures using the _getNSigners() method. address[] memory gotSigners = - SafeSigners.getNSigners({ dataHash: digest, signatures: signatures, requiredSignatures: numSigs }); + SafeSigners.getNSigners({ _dataHash: digest, _signatures: signatures, _requiredSignatures: numSigs }); // Compare the list of recovered signers to the expected signers. assertEq(gotSigners.length, numSigs); From e2599c60a7db951cc94d3fe9590b563de2281c7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 15:50:19 -0700 Subject: [PATCH 030/116] dependabot(gomod): bump github.com/minio/minio-go/v7 (#12117) Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.76 to 7.0.77. - [Release notes](https://github.com/minio/minio-go/releases) - [Commits](https://github.com/minio/minio-go/compare/v7.0.76...v7.0.77) --- updated-dependencies: - dependency-name: github.com/minio/minio-go/v7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4681c720ba8c..1e3d2fa2b43d 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/libp2p/go-libp2p-pubsub v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.76 + github.com/minio/minio-go/v7 v7.0.77 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-multiaddr-dns v0.3.1 diff --git a/go.sum b/go.sum index a92b17d143eb..922d0cf58a11 100644 --- a/go.sum +++ b/go.sum @@ -519,8 +519,8 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4S github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng= -github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= +github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw= +github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= From 0b2d0b2abfd7a9b8379e68ba5e691aa8ef0fe376 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Thu, 26 Sep 2024 00:31:10 +0100 Subject: [PATCH 031/116] use DeployUtils lib for deployments in deploy script (#12070) * use DeployUtils lib for deployments in deploy script * type safety for constructor args and use encode constructor * add comments to helper functions * add commentsd, deploy scripts cleanup * fix assertion * modify interface checker script to always expect a pseudo-constructor, modify failing interfaces * assert contracts with no constructors to have pseudo-constructors with no input * use pseudo-constructor encoding for contracts with no constructor defined --- .../scripts/checks/check-interfaces.sh | 23 ++ .../scripts/deploy/Deploy.s.sol | 279 ++++++++++++------ .../scripts/fpac/FPACOPS2.s.sol | 37 ++- .../scripts/libraries/DeployUtils.sol | 10 + .../L1/interfaces/IL1CrossDomainMessenger.sol | 2 + .../src/L1/interfaces/IL1ERC721Bridge.sol | 2 + .../src/L1/interfaces/IL1StandardBridge.sol | 2 + .../src/L1/interfaces/IOptimismPortal.sol | 2 + .../src/L1/interfaces/IResourceMetering.sol | 2 + .../L1/interfaces/ISystemConfigInterop.sol | 2 + .../src/L2/interfaces/IBaseFeeVault.sol | 18 +- .../src/L2/interfaces/IETHLiquidity.sol | 2 + .../src/L2/interfaces/IGasPriceOracle.sol | 2 + .../src/L2/interfaces/IL1Block.sol | 2 + .../src/L2/interfaces/IL1BlockIsthmus.sol | 2 + .../src/L2/interfaces/IL1FeeVault.sol | 18 +- .../interfaces/IL2StandardBridgeInterop.sol | 2 + .../L2/interfaces/IL2ToL1MessagePasser.sol | 2 + .../src/L2/interfaces/ISequencerFeeVault.sol | 18 +- .../src/L2/interfaces/ISuperchainERC20.sol | 4 +- .../src/L2/interfaces/ISuperchainWETH.sol | 2 + .../src/dispute/interfaces/IDelayedWETH.sol | 2 + .../src/legacy/interfaces/IAddressManager.sol | 2 + .../legacy/interfaces/IDeployerWhitelist.sol | 2 + .../src/legacy/interfaces/IL1BlockNumber.sol | 2 + .../interfaces/ILegacyMessagePasser.sol | 2 + .../interfaces/ICrossDomainMessenger.sol | 2 + .../universal/interfaces/IERC721Bridge.sol | 2 + .../src/universal/interfaces/IFeeVault.sol | 2 + .../src/universal/interfaces/IOwnable.sol | 2 + .../universal/interfaces/IStandardBridge.sol | 2 + 31 files changed, 341 insertions(+), 112 deletions(-) diff --git a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh index ed2c8e798eb1..2df1045ef101 100755 --- a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh +++ b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh @@ -208,6 +208,29 @@ for interface_file in $JSON_FILES; do normalized_interface_abi=$(normalize_abi "$interface_abi") normalized_contract_abi=$(normalize_abi "$contract_abi") + # Check if the contract ABI has no constructor but the interface is missing __constructor__ + contract_has_constructor=$(echo "$normalized_contract_abi" | jq 'any(.[]; .type == "constructor")') + interface_has_default_pseudo_constructor=$(echo "$normalized_interface_abi" | jq 'any(.[]; .type == "constructor" and .inputs == [])') + + # If any contract has no constructor and its corresponding interface also does not have one, flag it as a detected issue + if [ "$contract_has_constructor" = false ] && [ "$interface_has_default_pseudo_constructor" = false ]; then + if ! grep -q "^$contract_name$" "$REPORTED_INTERFACES_FILE"; then + echo "$contract_name" >> "$REPORTED_INTERFACES_FILE" + if ! is_excluded "$contract_name"; then + echo "Issue found in ABI for interface $contract_name from file $interface_file." + echo "Interface $contract_name must have a function named '__constructor__' as the corresponding contract has no constructor in its ABI." + issues_detected=true + fi + fi + continue + fi + + # removes the pseudo constructor json entry from the interface files where the corresponding contract file has no constructor + # this is to ensure it is not flagged as a diff in the next step below + if [ "$contract_has_constructor" = false ] && [ "$interface_has_default_pseudo_constructor" ]; then + normalized_interface_abi=$(echo "$normalized_interface_abi" | jq 'map(select(.type != "constructor"))') + fi + # Use jq to compare the ABIs if ! diff_result=$(diff -u <(echo "$normalized_interface_abi" | jq 'sort') <(echo "$normalized_contract_abi" | jq 'sort')); then if ! grep -q "^$contract_name$" "$REPORTED_INTERFACES_FILE"; then diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index dc58911dfa53..5456b88492d5 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -23,6 +23,7 @@ import { LibStateDiff } from "scripts/libraries/LibStateDiff.sol"; import { Process } from "scripts/libraries/Process.sol"; import { ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.sol"; import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Contracts import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; @@ -41,6 +42,7 @@ import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Types } from "scripts/libraries/Types.sol"; import "src/dispute/lib/Types.sol"; +import { LibClaim, Duration } from "src/dispute/lib/LibUDT.sol"; // Interfaces import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; @@ -51,6 +53,7 @@ import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMesseng import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; @@ -58,6 +61,8 @@ import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolV import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; @@ -665,7 +670,14 @@ contract Deploy is Deployer { /// @notice Deploy the SuperchainConfig contract function deploySuperchainConfig() public broadcast { - ISuperchainConfig superchainConfig = ISuperchainConfig(_deploy("SuperchainConfig", hex"")); + ISuperchainConfig superchainConfig = ISuperchainConfig( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "SuperchainConfig", + _args: abi.encodeCall(ISuperchainConfig.__constructor__, ()) + }) + ); require(superchainConfig.guardian() == address(0)); bytes32 initialized = vm.load(address(superchainConfig), bytes32(0)); @@ -674,7 +686,14 @@ contract Deploy is Deployer { /// @notice Deploy the L1CrossDomainMessenger function deployL1CrossDomainMessenger() public broadcast returns (address addr_) { - IL1CrossDomainMessenger messenger = IL1CrossDomainMessenger(_deploy("L1CrossDomainMessenger", hex"")); + IL1CrossDomainMessenger messenger = IL1CrossDomainMessenger( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "L1CrossDomainMessenger", + _args: abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ()) + }) + ); // Override the `L1CrossDomainMessenger` contract to the deployed implementation. This is necessary // to check the `L1CrossDomainMessenger` implementation alongside dependent contracts, which @@ -692,7 +711,12 @@ contract Deploy is Deployer { console.log("Attempting to deploy OptimismPortal with interop, this config is a noop"); } - addr_ = _deploy("OptimismPortal", hex""); + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "OptimismPortal", + _args: abi.encodeCall(IOptimismPortal.__constructor__, ()) + }); // Override the `OptimismPortal` contract to the deployed implementation. This is necessary // to check the `OptimismPortal` implementation alongside dependent contracts, which @@ -710,15 +734,30 @@ contract Deploy is Deployer { ); if (cfg.useInterop()) { - addr_ = _deploy( - "OptimismPortalInterop", - abi.encode(cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) - ); + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "OptimismPortalInterop", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOptimismPortalInterop.__constructor__, + (cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) + ) + ) + }); save("OptimismPortal2", addr_); } else { - addr_ = _deploy( - "OptimismPortal2", abi.encode(cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) - ); + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "OptimismPortal2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOptimismPortal2.__constructor__, + (cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) + ) + ) + }); } // Override the `OptimismPortal2` contract to the deployed implementation. This is necessary @@ -731,7 +770,14 @@ contract Deploy is Deployer { /// @notice Deploy the L2OutputOracle function deployL2OutputOracle() public broadcast returns (address addr_) { - IL2OutputOracle oracle = IL2OutputOracle(_deploy("L2OutputOracle", hex"")); + IL2OutputOracle oracle = IL2OutputOracle( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "L2OutputOracle", + _args: abi.encodeCall(IL2OutputOracle.__constructor__, ()) + }) + ); // Override the `L2OutputOracle` contract to the deployed implementation. This is necessary // to check the `L2OutputOracle` implementation alongside dependent contracts, which @@ -750,8 +796,14 @@ contract Deploy is Deployer { /// @notice Deploy the OptimismMintableERC20Factory function deployOptimismMintableERC20Factory() public broadcast returns (address addr_) { - IOptimismMintableERC20Factory factory = - IOptimismMintableERC20Factory(_deploy("OptimismMintableERC20Factory", hex"")); + IOptimismMintableERC20Factory factory = IOptimismMintableERC20Factory( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "OptimismMintableERC20Factory", + _args: abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ()) + }) + ); // Override the `OptimismMintableERC20Factory` contract to the deployed implementation. This is necessary // to check the `OptimismMintableERC20Factory` implementation alongside dependent contracts, which @@ -765,7 +817,14 @@ contract Deploy is Deployer { /// @notice Deploy the DisputeGameFactory function deployDisputeGameFactory() public broadcast returns (address addr_) { - IDisputeGameFactory factory = IDisputeGameFactory(_deploy("DisputeGameFactory", hex"")); + IDisputeGameFactory factory = IDisputeGameFactory( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "DisputeGameFactory", + _args: abi.encodeCall(IDisputeGameFactory.__constructor__, ()) + }) + ); // Override the `DisputeGameFactory` contract to the deployed implementation. This is necessary to check the // `DisputeGameFactory` implementation alongside dependent contracts, which are always proxies. @@ -777,7 +836,16 @@ contract Deploy is Deployer { } function deployDelayedWETH() public broadcast returns (address addr_) { - IDelayedWETH weth = IDelayedWETH(payable(_deploy("DelayedWETH", abi.encode(cfg.faultGameWithdrawalDelay())))); + IDelayedWETH weth = IDelayedWETH( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "DelayedWETH", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IDelayedWETH.__constructor__, (cfg.faultGameWithdrawalDelay())) + ) + }) + ); // Override the `DelayedWETH` contract to the deployed implementation. This is necessary // to check the `DelayedWETH` implementation alongside dependent contracts, which are @@ -796,7 +864,14 @@ contract Deploy is Deployer { /// @notice Deploy the ProtocolVersions function deployProtocolVersions() public broadcast returns (address addr_) { - IProtocolVersions versions = IProtocolVersions(_deploy("ProtocolVersions", hex"")); + IProtocolVersions versions = IProtocolVersions( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "ProtocolVersions", + _args: abi.encodeCall(IProtocolVersions.__constructor__, ()) + }) + ); // Override the `ProtocolVersions` contract to the deployed implementation. This is necessary // to check the `ProtocolVersions` implementation alongside dependent contracts, which @@ -849,8 +924,19 @@ contract Deploy is Deployer { /// @notice Deploy the AnchorStateRegistry function deployAnchorStateRegistry() public broadcast returns (address addr_) { - IAnchorStateRegistry anchorStateRegistry = - IAnchorStateRegistry(_deploy("AnchorStateRegistry", abi.encode(mustGetAddress("DisputeGameFactoryProxy")))); + IAnchorStateRegistry anchorStateRegistry = IAnchorStateRegistry( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "AnchorStateRegistry", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IAnchorStateRegistry.__constructor__, + (IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"))) + ) + ) + }) + ); addr_ = address(anchorStateRegistry); } @@ -858,10 +944,20 @@ contract Deploy is Deployer { /// @notice Deploy the SystemConfig function deploySystemConfig() public broadcast returns (address addr_) { if (cfg.useInterop()) { - addr_ = _deploy("SystemConfigInterop", hex""); + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "SystemConfigInterop", + _args: abi.encodeCall(ISystemConfigInterop.__constructor__, ()) + }); save("SystemConfig", addr_); } else { - addr_ = _deploy("SystemConfig", hex""); + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "SystemConfig", + _args: abi.encodeCall(ISystemConfig.__constructor__, ()) + }); } // Override the `SystemConfig` contract to the deployed implementation. This is necessary @@ -874,7 +970,14 @@ contract Deploy is Deployer { /// @notice Deploy the L1StandardBridge function deployL1StandardBridge() public broadcast returns (address addr_) { - IL1StandardBridge bridge = IL1StandardBridge(payable(_deploy("L1StandardBridge", hex""))); + IL1StandardBridge bridge = IL1StandardBridge( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "L1StandardBridge", + _args: abi.encodeCall(IL1StandardBridge.__constructor__, ()) + }) + ); // Override the `L1StandardBridge` contract to the deployed implementation. This is necessary // to check the `L1StandardBridge` implementation alongside dependent contracts, which @@ -888,7 +991,14 @@ contract Deploy is Deployer { /// @notice Deploy the L1ERC721Bridge function deployL1ERC721Bridge() public broadcast returns (address addr_) { - IL1ERC721Bridge bridge = IL1ERC721Bridge(_deploy("L1ERC721Bridge", hex"")); + IL1ERC721Bridge bridge = IL1ERC721Bridge( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "L1ERC721Bridge", + _args: abi.encodeCall(IL1ERC721Bridge.__constructor__, ()) + }) + ); // Override the `L1ERC721Bridge` contract to the deployed implementation. This is necessary // to check the `L1ERC721Bridge` implementation alongside dependent contracts, which @@ -917,8 +1027,14 @@ contract Deploy is Deployer { /// @notice Deploy the DataAvailabilityChallenge function deployDataAvailabilityChallenge() public broadcast returns (address addr_) { - IDataAvailabilityChallenge dac = - IDataAvailabilityChallenge(payable(_deploy("DataAvailabilityChallenge", hex""))); + IDataAvailabilityChallenge dac = IDataAvailabilityChallenge( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "DataAvailabilityChallenge", + _args: abi.encodeCall(IDataAvailabilityChallenge.__constructor__, ()) + }) + ); addr_ = address(dac); } @@ -1578,49 +1694,66 @@ contract Deploy is Deployer { } uint32 rawGameType = GameType.unwrap(_params.gameType); + + // Redefine _param variable to avoid stack too deep error during compilation + FaultDisputeGameParams memory _params_ = _params; if (rawGameType != GameTypes.PERMISSIONED_CANNON.raw()) { _factory.setImplementation( - _params.gameType, + _params_.gameType, IDisputeGame( - _deploy( - "FaultDisputeGame", - string.concat("FaultDisputeGame_", vm.toString(rawGameType)), - abi.encode( - _params.gameType, - _params.absolutePrestate, - _params.maxGameDepth, - cfg.faultGameSplitDepth(), - cfg.faultGameClockExtension(), - _params.maxClockDuration, - _params.faultVm, - _params.weth, - IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), - cfg.l2ChainID() + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "FaultDisputeGame", + _nick: string.concat("FaultDisputeGame_", vm.toString(rawGameType)), + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGame.__constructor__, + ( + _params_.gameType, + _params_.absolutePrestate, + _params_.maxGameDepth, + cfg.faultGameSplitDepth(), + Duration.wrap(uint64(cfg.faultGameClockExtension())), + _params_.maxClockDuration, + _params_.faultVm, + _params_.weth, + IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")), + cfg.l2ChainID() + ) + ) ) - ) + }) ) ); } else { _factory.setImplementation( - _params.gameType, + _params_.gameType, IDisputeGame( - _deploy( - "PermissionedDisputeGame", - abi.encode( - _params.gameType, - _params.absolutePrestate, - _params.maxGameDepth, - cfg.faultGameSplitDepth(), - cfg.faultGameClockExtension(), - _params.maxClockDuration, - _params.faultVm, - _params.weth, - _params.anchorStateRegistry, - cfg.l2ChainID(), - cfg.l2OutputOracleProposer(), - cfg.l2OutputOracleChallenger() + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "PermissionedDisputeGame", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IPermissionedDisputeGame.__constructor__, + ( + _params_.gameType, + _params_.absolutePrestate, + _params_.maxGameDepth, + cfg.faultGameSplitDepth(), + Duration.wrap(uint64(cfg.faultGameClockExtension())), + _params_.maxClockDuration, + _params_.faultVm, + _params_.weth, + _params_.anchorStateRegistry, + cfg.l2ChainID(), + cfg.l2OutputOracleProposer(), + cfg.l2OutputOracleChallenger() + ) + ) ) - ) + }) ) ); } @@ -1674,36 +1807,4 @@ contract Deploy is Deployer { require(dac.bondSize() == daBondSize); require(dac.resolverRefundPercentage() == daResolverRefundPercentage); } - - /// @notice Deploys a contract via CREATE2. - /// @param _name The name of the contract. - /// @param _constructorParams The constructor parameters. - function _deploy(string memory _name, bytes memory _constructorParams) internal returns (address addr_) { - return _deploy(_name, _name, _constructorParams); - } - - /// @notice Deploys a contract via CREATE2. - /// @param _name The name of the contract. - /// @param _nickname The nickname of the contract. - /// @param _constructorParams The constructor parameters. - function _deploy( - string memory _name, - string memory _nickname, - bytes memory _constructorParams - ) - internal - returns (address addr_) - { - console.log("Deploying %s", _nickname); - bytes32 salt = _implSalt(); - bytes memory initCode = abi.encodePacked(vm.getCode(_name), _constructorParams); - address preComputedAddress = vm.computeCreate2Address(salt, keccak256(initCode)); - require(preComputedAddress.code.length == 0, "Deploy: contract already deployed"); - assembly { - addr_ := create2(0, add(initCode, 0x20), mload(initCode), salt) - } - require(addr_ != address(0), "deployment failed"); - save(_nickname, addr_); - console.log("%s deployed at %s", _nickname, addr_); - } } diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol index 0f5962a50d02..5408d9acb151 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol @@ -91,12 +91,14 @@ contract FPACOPS2 is Deploy, StdAssertions { function deployCannonDisputeGame() internal broadcast { console.log("Deploying CannonFaultDisputeGame implementation"); - save( - "CannonFaultDisputeGame", - address( - _deploy( - "FaultDisputeGame", - abi.encode( + DeployUtils.create2AndSave({ + _save: this, + _name: "FaultDisputeGame", + _nick: "CannonFaultDisputeGame", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGame.__constructor__, + ( GameTypes.CANNON, loadMipsAbsolutePrestate(), cfg.faultGameMaxDepth(), @@ -109,20 +111,22 @@ contract FPACOPS2 is Deploy, StdAssertions { cfg.l2ChainID() ) ) - ) - ); + ), + _salt: _implSalt() + }); } /// @notice Deploys the PermissionedDisputeGame. function deployPermissionedDisputeGame() internal broadcast { console.log("Deploying PermissionedDisputeGame implementation"); - save( - "PermissionedDisputeGame", - address( - _deploy( - "PermissionedDisputeGame", - abi.encode( + DeployUtils.create2AndSave({ + _save: this, + _name: "PermissionedDisputeGame", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IPermissionedDisputeGame.__constructor__, + ( GameTypes.PERMISSIONED_CANNON, loadMipsAbsolutePrestate(), cfg.faultGameMaxDepth(), @@ -137,8 +141,9 @@ contract FPACOPS2 is Deploy, StdAssertions { cfg.l2OutputOracleChallenger() ) ) - ) - ); + ), + _salt: _implSalt() + }); } /// @notice Initializes the DelayedWETH proxy. diff --git a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol index 92870309657e..7b078b45e2d4 100644 --- a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol +++ b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol @@ -8,6 +8,7 @@ import { Artifacts } from "scripts/Artifacts.s.sol"; // Libraries import { LibString } from "@solady/utils/LibString.sol"; +import { Bytes } from "src/libraries/Bytes.sol"; // Contracts import { Proxy } from "src/universal/Proxy.sol"; @@ -198,6 +199,15 @@ library DeployUtils { return address(uint160(uint256(keccak256(abi.encode(_sender, _identifier))))); } + /// @notice Strips the first 4 bytes of `_data` and returns the remaining bytes + /// If `_data` is not greater than 4 bytes, it returns empty bytes type. + /// @param _data constructor arguments prefixed with a psuedo-constructor function signature + /// @return encodedData_ constructor arguments without the psuedo-constructor function signature prefix + function encodeConstructor(bytes memory _data) internal pure returns (bytes memory encodedData_) { + require(_data.length >= 4, "encodeConstructor takes in _data of length >= 4"); + encodedData_ = Bytes.slice(_data, 4); + } + /// @notice Asserts that the given address is a valid contract address. /// @param _who Address to check. function assertValidContractAddress(address _who) internal view { diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol index bb92e723c7c3..b8b7e3403d29 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol @@ -18,4 +18,6 @@ interface IL1CrossDomainMessenger is ICrossDomainMessenger { function superchainConfig() external view returns (address); function systemConfig() external view returns (address); function version() external view returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol index fd64f40fe5ac..51356bc8d346 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1ERC721Bridge.sol @@ -37,4 +37,6 @@ interface IL1ERC721Bridge is IERC721Bridge { function paused() external view returns (bool); function superchainConfig() external view returns (ISuperchainConfig); function version() external view returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol index 119c8c1f1d8e..816436cf1084 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridge.sol @@ -72,4 +72,6 @@ interface IL1StandardBridge is IStandardBridge { function superchainConfig() external view returns (ISuperchainConfig); function systemConfig() external view returns (ISystemConfig); function version() external view returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol index 4155c62352e9..b9035a6e5143 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortal.sol @@ -82,4 +82,6 @@ interface IOptimismPortal { function superchainConfig() external view returns (ISuperchainConfig); function systemConfig() external view returns (ISystemConfig); function version() external pure returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol b/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol index 21672340fd4f..1c5a5174b333 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IResourceMetering.sol @@ -22,4 +22,6 @@ interface IResourceMetering { event Initialized(uint8 version); function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); // nosemgrep + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol index 346220e60b95..b4617b8e6a42 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol @@ -79,4 +79,6 @@ interface ISystemConfigInterop { ) external; function version() external pure returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IBaseFeeVault.sol b/packages/contracts-bedrock/src/L2/interfaces/IBaseFeeVault.sol index 9843189bdb14..5906281cf747 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IBaseFeeVault.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IBaseFeeVault.sol @@ -3,13 +3,27 @@ pragma solidity ^0.8.0; import { IFeeVault } from "src/universal/interfaces/IFeeVault.sol"; -interface IBaseFeeVault is IFeeVault { +interface IBaseFeeVault { + event Withdrawal(uint256 value, address to, address from); + event Withdrawal(uint256 value, address to, address from, IFeeVault.WithdrawalNetwork withdrawalNetwork); + + receive() external payable; + + function MIN_WITHDRAWAL_AMOUNT() external view returns (uint256); + function RECIPIENT() external view returns (address); + function WITHDRAWAL_NETWORK() external view returns (IFeeVault.WithdrawalNetwork); + function minWithdrawalAmount() external view returns (uint256 amount_); + function recipient() external view returns (address recipient_); + function totalProcessed() external view returns (uint256); + function withdraw() external; + function withdrawalNetwork() external view returns (IFeeVault.WithdrawalNetwork network_); + function version() external view returns (string memory); function __constructor__( address _recipient, uint256 _minWithdrawalAmount, - WithdrawalNetwork _withdrawalNetwork + IFeeVault.WithdrawalNetwork _withdrawalNetwork ) external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IETHLiquidity.sol b/packages/contracts-bedrock/src/L2/interfaces/IETHLiquidity.sol index de463f543b71..77c1c0b3caf2 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IETHLiquidity.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IETHLiquidity.sol @@ -11,4 +11,6 @@ interface IETHLiquidity { function burn() external payable; function mint(uint256 _amount) external; function version() external view returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IGasPriceOracle.sol b/packages/contracts-bedrock/src/L2/interfaces/IGasPriceOracle.sol index 4ab0ef854c8a..8063725cb86b 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IGasPriceOracle.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IGasPriceOracle.sol @@ -20,4 +20,6 @@ interface IGasPriceOracle { function setEcotone() external; function setFjord() external; function version() external view returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol index 6ef4c2984ae4..a43b3c7c3963 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol @@ -36,4 +36,6 @@ interface IL1Block { function setL1BlockValuesEcotone() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol index 7ff15eda51b0..b464246e8f7a 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol @@ -55,4 +55,6 @@ interface IL1BlockIsthmus { function setL1BlockValuesIsthmus() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1FeeVault.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1FeeVault.sol index 89ac3b782fca..7853375bcd3a 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1FeeVault.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1FeeVault.sol @@ -3,13 +3,27 @@ pragma solidity ^0.8.0; import { IFeeVault } from "src/universal/interfaces/IFeeVault.sol"; -interface IL1FeeVault is IFeeVault { +interface IL1FeeVault { + event Withdrawal(uint256 value, address to, address from); + event Withdrawal(uint256 value, address to, address from, IFeeVault.WithdrawalNetwork withdrawalNetwork); + + receive() external payable; + + function MIN_WITHDRAWAL_AMOUNT() external view returns (uint256); + function RECIPIENT() external view returns (address); + function WITHDRAWAL_NETWORK() external view returns (IFeeVault.WithdrawalNetwork); + function minWithdrawalAmount() external view returns (uint256 amount_); + function recipient() external view returns (address recipient_); + function totalProcessed() external view returns (uint256); + function withdraw() external; + function withdrawalNetwork() external view returns (IFeeVault.WithdrawalNetwork network_); + function version() external view returns (string memory); function __constructor__( address _recipient, uint256 _minWithdrawalAmount, - WithdrawalNetwork _withdrawalNetwork + IFeeVault.WithdrawalNetwork _withdrawalNetwork ) external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol b/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol index 227b48881fa6..ed4ec1cef519 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL2StandardBridgeInterop.sol @@ -95,4 +95,6 @@ interface IL2StandardBridgeInterop is IStandardBridge { function convert(address _from, address _to, uint256 _amount) external; function version() external pure returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL1MessagePasser.sol b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL1MessagePasser.sol index 751cf51a40db..4629dbaba8d0 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL1MessagePasser.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL1MessagePasser.sol @@ -21,4 +21,6 @@ interface IL2ToL1MessagePasser { function messageNonce() external view returns (uint256); function sentMessages(bytes32) external view returns (bool); function version() external view returns (string memory); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISequencerFeeVault.sol b/packages/contracts-bedrock/src/L2/interfaces/ISequencerFeeVault.sol index 1987d07bb7f7..51d31d99322b 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ISequencerFeeVault.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/ISequencerFeeVault.sol @@ -3,14 +3,28 @@ pragma solidity ^0.8.0; import { IFeeVault } from "src/universal/interfaces/IFeeVault.sol"; -interface ISequencerFeeVault is IFeeVault { +interface ISequencerFeeVault { + event Withdrawal(uint256 value, address to, address from); + event Withdrawal(uint256 value, address to, address from, IFeeVault.WithdrawalNetwork withdrawalNetwork); + + receive() external payable; + + function MIN_WITHDRAWAL_AMOUNT() external view returns (uint256); + function RECIPIENT() external view returns (address); + function WITHDRAWAL_NETWORK() external view returns (IFeeVault.WithdrawalNetwork); + function minWithdrawalAmount() external view returns (uint256 amount_); + function recipient() external view returns (address recipient_); + function totalProcessed() external view returns (uint256); + function withdraw() external; + function withdrawalNetwork() external view returns (IFeeVault.WithdrawalNetwork network_); + function version() external view returns (string memory); function l1FeeWallet() external view returns (address); function __constructor__( address _recipient, uint256 _minWithdrawalAmount, - WithdrawalNetwork _withdrawalNetwork + IFeeVault.WithdrawalNetwork _withdrawalNetwork ) external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol index fee6a2c2f7bd..6ed17a9f46ec 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainERC20.sol @@ -53,4 +53,6 @@ interface ISuperchainERC20Errors { /// @title ISuperchainERC20 /// @notice Combines Solady's ERC20 interface with the SuperchainERC20Extensions interface. -interface ISuperchainERC20 is IERC20Solady, ISuperchainERC20Extensions, ISuperchainERC20Errors { } +interface ISuperchainERC20 is IERC20Solady, ISuperchainERC20Extensions, ISuperchainERC20Errors { + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol index 7c755e11e869..1204c328fc89 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol @@ -30,4 +30,6 @@ interface ISuperchainWETH { function transferFrom(address src, address dst, uint256 wad) external returns (bool); function version() external view returns (string memory); function withdraw(uint256 wad) external; + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol b/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol index ef60aaa90298..55b940c2d9dd 100644 --- a/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol +++ b/packages/contracts-bedrock/src/dispute/interfaces/IDelayedWETH.sol @@ -27,4 +27,6 @@ interface IDelayedWETH is IWETH { function withdraw(address _guy, uint256 _wad) external; function withdrawals(address _owner, address _guy) external view returns (uint256, uint256); function version() external view returns (string memory); + + function __constructor__(uint256 _delay) external; } diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol b/packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol index 3fae2cbab430..0c0004a53675 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol +++ b/packages/contracts-bedrock/src/legacy/interfaces/IAddressManager.sol @@ -10,4 +10,6 @@ interface IAddressManager is IOwnable { function getAddress(string memory _name) external view returns (address); function setAddress(string memory _name, address _address) external; + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IDeployerWhitelist.sol b/packages/contracts-bedrock/src/legacy/interfaces/IDeployerWhitelist.sol index 050748f1786e..d1e711ea42ff 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IDeployerWhitelist.sol +++ b/packages/contracts-bedrock/src/legacy/interfaces/IDeployerWhitelist.sol @@ -17,4 +17,6 @@ interface IDeployerWhitelist { function setWhitelistedDeployer(address _deployer, bool _isWhitelisted) external; function version() external view returns (string memory); function whitelist(address) external view returns (bool); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol b/packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol index 7634cc67c690..551514632696 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol +++ b/packages/contracts-bedrock/src/legacy/interfaces/IL1BlockNumber.sol @@ -11,4 +11,6 @@ interface IL1BlockNumber is ISemver { receive() external payable; function getL1BlockNumber() external view returns (uint256); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol b/packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol index a5fde0fdb65d..0eebc30d5878 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol +++ b/packages/contracts-bedrock/src/legacy/interfaces/ILegacyMessagePasser.sol @@ -8,4 +8,6 @@ import { ISemver } from "src/universal/interfaces/ISemver.sol"; interface ILegacyMessagePasser is ISemver { function passMessageToL1(bytes memory _message) external; function sentMessages(bytes32) external view returns (bool); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/universal/interfaces/ICrossDomainMessenger.sol b/packages/contracts-bedrock/src/universal/interfaces/ICrossDomainMessenger.sol index ed2fb20ea453..256b09fa56ef 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/ICrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/universal/interfaces/ICrossDomainMessenger.sol @@ -35,4 +35,6 @@ interface ICrossDomainMessenger { function sendMessage(address _target, bytes memory _message, uint32 _minGasLimit) external payable; function successfulMessages(bytes32) external view returns (bool); function xDomainMessageSender() external view returns (address); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol b/packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol index ccb2d5f0a483..3c97958c1033 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol +++ b/packages/contracts-bedrock/src/universal/interfaces/IERC721Bridge.sol @@ -44,4 +44,6 @@ interface IERC721Bridge { function messenger() external view returns (ICrossDomainMessenger); function otherBridge() external view returns (IERC721Bridge); function paused() external view returns (bool); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/universal/interfaces/IFeeVault.sol b/packages/contracts-bedrock/src/universal/interfaces/IFeeVault.sol index 1742a0029c7d..403f603fae0c 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IFeeVault.sol +++ b/packages/contracts-bedrock/src/universal/interfaces/IFeeVault.sol @@ -20,4 +20,6 @@ interface IFeeVault { function totalProcessed() external view returns (uint256); function withdraw() external; function withdrawalNetwork() external view returns (WithdrawalNetwork network_); + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol b/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol index 3523d9bf9b49..b6f48de59b28 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol +++ b/packages/contracts-bedrock/src/universal/interfaces/IOwnable.sol @@ -9,4 +9,6 @@ interface IOwnable { function owner() external view returns (address); function renounceOwnership() external; function transferOwnership(address newOwner) external; // nosemgrep + + function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol b/packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol index b92aae27503b..406a172c0737 100644 --- a/packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol +++ b/packages/contracts-bedrock/src/universal/interfaces/IStandardBridge.sol @@ -61,4 +61,6 @@ interface IStandardBridge { function messenger() external view returns (ICrossDomainMessenger); function otherBridge() external view returns (IStandardBridge); function paused() external view returns (bool); + + function __constructor__() external; } From 2a25c62c946f0bf2ae66be81ecbca281ace3a5f2 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 26 Sep 2024 09:35:14 +1000 Subject: [PATCH 032/116] ci: Add finalized v1.3.1 to list of reproducible prestate checks (#12138) --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0dffb662b116..efbc727ee917 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1123,6 +1123,8 @@ jobs: echo 'export EXPECTED_PRESTATE_HASH="0x03e806a2859a875267a563462a06d4d1d1b455a9efee959a46e21e54b6caf69a"' >> $BASH_ENV elif [[ "<>" == "1.3.1-rc.2" ]]; then echo 'export EXPECTED_PRESTATE_HASH="0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"' >> $BASH_ENV + elif [[ "<>" == "1.3.1" ]]; then + echo 'export EXPECTED_PRESTATE_HASH="0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"' >> $BASH_ENV else echo "Unknown prestate version <>" exit 1 From 2d1f41c4371a0e0129991f66fabbb3a897b1949a Mon Sep 17 00:00:00 2001 From: tre Date: Wed, 25 Sep 2024 17:19:20 -0700 Subject: [PATCH 033/116] feat(L2toL2CrossDomainMessenger): update `sendMessage` to return relay message hash (#12089) * feat(L2toL2CrossDomainMessenger): return message hash from sendMessage * fix * fix build * revert * fix build * bump version * nit * address comments * update tests * nit * revert * update snapshot * update semver devdoc * update semver-lock * bump token versions * bump semver lock * fix semver and update output * update semver-lock * delete clones-with-immutable-args * bump versions: * update semver-lock --- packages/contracts-bedrock/semver-lock.json | 12 +++--- .../abi/L2ToL2CrossDomainMessenger.json | 8 +++- .../src/L2/L2ToL2CrossDomainMessenger.sol | 37 ++++++++++++++++--- .../src/L2/OptimismSuperchainERC20.sol | 4 +- .../src/L2/SuperchainWETH.sol | 4 +- .../IL2ToL2CrossDomainMessenger.sol | 10 ++++- .../src/libraries/Hashing.sol | 24 ++++++++++++ .../test/L2/L2ToL2CrossDomainMessenger.t.sol | 10 ++++- 8 files changed, 90 insertions(+), 19 deletions(-) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 40026d47199a..91c7ad09789d 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -112,12 +112,12 @@ "sourceCodeHash": "0xd08a2e6514dbd44e16aa312a1b27b2841a9eab5622cbd05a39c30f543fad673c" }, "src/L2/L2ToL2CrossDomainMessenger.sol": { - "initCodeHash": "0x2ec4cdf62baf9dfb2c4c211d8b914f3dd1e0c0133c15c739ff81c5a2504d4359", - "sourceCodeHash": "0x54dfffed789dafe11b7f7bb16dfb29988713a19da5209452119c5d6539e48c48" + "initCodeHash": "0x652e07372d45f0f861aa65b4a73db55871291b875ced02df893a405419de723a", + "sourceCodeHash": "0xc3e73c2d9abf3c7853d2505a83e475d58e96ab5fc5ad7770d04dea5feb9e5717" }, "src/L2/OptimismSuperchainERC20.sol": { - "initCodeHash": "0x192bb3abd2a103832172d913f548e36bcf6f2c0220cd224a83f8420e2e86b4ec", - "sourceCodeHash": "0x09d3367612dee674e3708da1c70eebbd0c6835fbcbba339780e678338bdfd3ca" + "initCodeHash": "0xe3dbb0851669708901a4c6bb7ad7d55f9896deeec02cbe53ac58d689ff95b88b", + "sourceCodeHash": "0xe853817da47d32b4ec5bb5392405278c82a1e9620aef377491dcb371fbbe682f" }, "src/L2/OptimismSuperchainERC20Beacon.sol": { "initCodeHash": "0x99ce8095b23c124850d866cbc144fee6cee05dbc6bb5d83acadfe00b90cf42c7", @@ -132,8 +132,8 @@ "sourceCodeHash": "0xd56922cb04597dea469c65e5a49d4b3c50c171e603601e6f41da9517cae0b11a" }, "src/L2/SuperchainWETH.sol": { - "initCodeHash": "0xd8766c7ab41d34d935febf5b48289f947804634bde38f8e346075b9f2d867275", - "sourceCodeHash": "0x6c1691c0fb5c86f1fd67e23495725c2cd86567556602e8cc0f28104ad6114bf4" + "initCodeHash": "0x4ccd25f37a816205bc26f8532afa66e02f2b36ca7b7404d0fa48a4313ed16f0c", + "sourceCodeHash": "0xd186614f1515fa3ba2f43e401e639bfa3159603954e39a51769e9b57ad19a3fd" }, "src/L2/WETH.sol": { "initCodeHash": "0xfb253765520690623f177941c2cd9eba23e4c6d15063bccdd5e98081329d8956", diff --git a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json index f5e21dedebdf..a5cda3493911 100644 --- a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json +++ b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json @@ -108,7 +108,13 @@ } ], "name": "sendMessage", - "outputs": [], + "outputs": [ + { + "internalType": "bytes32", + "name": "msgHash_", + "type": "bytes32" + } + ], "stateMutability": "nonpayable", "type": "function" }, diff --git a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol index 9f85a0fe1b89..3eb72210a109 100644 --- a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol @@ -2,6 +2,7 @@ pragma solidity 0.8.25; import { Encoding } from "src/libraries/Encoding.sol"; +import { Hashing } from "src/libraries/Hashing.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; @@ -57,8 +58,8 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra uint16 public constant messageVersion = uint16(0); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.5 - string public constant version = "1.0.0-beta.5"; + /// @custom:semver 1.0.0-beta.6 + string public constant version = "1.0.0-beta.6"; /// @notice Mapping of message hashes to boolean receipt values. Note that a message will only be present in this /// mapping if it has successfully been relayed on this chain, and can therefore not be relayed again. @@ -99,15 +100,32 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra /// @param _destination Chain ID of the destination chain. /// @param _target Target contract or wallet address. /// @param _message Message payload to call target with. - function sendMessage(uint256 _destination, address _target, bytes calldata _message) external { + /// @return msgHash_ The hash of the message being sent, which can be used for tracking whether + /// the message has successfully been relayed. + function sendMessage( + uint256 _destination, + address _target, + bytes calldata _message + ) + external + returns (bytes32 msgHash_) + { if (_destination == block.chainid) revert MessageDestinationSameChain(); if (_target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); if (_target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert MessageTargetL2ToL2CrossDomainMessenger(); + (uint256 source, uint256 nonce, address sender) = (block.chainid, messageNonce(), msg.sender); bytes memory data = abi.encodeCall( - L2ToL2CrossDomainMessenger.relayMessage, - (_destination, block.chainid, messageNonce(), msg.sender, _target, _message) + L2ToL2CrossDomainMessenger.relayMessage, (_destination, source, nonce, sender, _target, _message) ); + msgHash_ = Hashing.hashL2toL2CrossDomainMessengerRelayMessage({ + _destination: _destination, + _source: source, + _nonce: nonce, + _sender: sender, + _target: _target, + _message: _message + }); assembly { log0(add(data, 0x20), mload(data)) } @@ -145,7 +163,14 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra revert MessageTargetL2ToL2CrossDomainMessenger(); } - bytes32 messageHash = keccak256(abi.encode(_destination, _source, _nonce, _sender, _target, _message)); + bytes32 messageHash = Hashing.hashL2toL2CrossDomainMessengerRelayMessage({ + _destination: _destination, + _source: _source, + _nonce: _nonce, + _sender: _sender, + _target: _target, + _message: _message + }); if (successfulMessages[messageHash]) { revert MessageAlreadyRelayed(); } diff --git a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol index c59eb7d04d78..ffbec844e2ff 100644 --- a/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol +++ b/packages/contracts-bedrock/src/L2/OptimismSuperchainERC20.sol @@ -63,8 +63,8 @@ contract OptimismSuperchainERC20 is } /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.3 - string public constant version = "1.0.0-beta.3"; + /// @custom:semver 1.0.0-beta.4 + string public constant version = "1.0.0-beta.4"; /// @notice Constructs the OptimismSuperchainERC20 contract. constructor() { diff --git a/packages/contracts-bedrock/src/L2/SuperchainWETH.sol b/packages/contracts-bedrock/src/L2/SuperchainWETH.sol index a672918ffea1..4788b70b1bc9 100644 --- a/packages/contracts-bedrock/src/L2/SuperchainWETH.sol +++ b/packages/contracts-bedrock/src/L2/SuperchainWETH.sol @@ -21,8 +21,8 @@ import { IETHLiquidity } from "src/L2/interfaces/IETHLiquidity.sol"; /// do not use a custom gas token. contract SuperchainWETH is WETH98, ISuperchainERC20Extensions, ISemver { /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.4 - string public constant version = "1.0.0-beta.4"; + /// @custom:semver 1.0.0-beta.5 + string public constant version = "1.0.0-beta.5"; /// @inheritdoc WETH98 function deposit() public payable override { diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol index 0794fb11ec35..e043bb43420a 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol @@ -32,7 +32,15 @@ interface IL2ToL2CrossDomainMessenger { /// @param _destination Chain ID of the destination chain. /// @param _target Target contract or wallet address. /// @param _message Message to trigger the target address with. - function sendMessage(uint256 _destination, address _target, bytes calldata _message) external; + /// @return msgHash_ The hash of the message being sent, which can be used for tracking whether + /// the message has successfully been relayed. + function sendMessage( + uint256 _destination, + address _target, + bytes calldata _message + ) + external + returns (bytes32 msgHash_); /// @notice Relays a message that was sent by the other CrossDomainMessenger contract. Can only /// be executed via cross-chain call from the other messenger OR if the message was diff --git a/packages/contracts-bedrock/src/libraries/Hashing.sol b/packages/contracts-bedrock/src/libraries/Hashing.sol index 7546daede7c5..07a31eb76006 100644 --- a/packages/contracts-bedrock/src/libraries/Hashing.sol +++ b/packages/contracts-bedrock/src/libraries/Hashing.sol @@ -121,4 +121,28 @@ library Hashing { ) ); } + + /// @notice Generates a unique hash for a message to be relayed across chains. This hash is + /// used to identify the message and ensure it is not relayed more than once. + /// @param _destination Chain ID of the destination chain. + /// @param _source Chain ID of the source chain. + /// @param _nonce Unique nonce associated with the message to prevent replay attacks. + /// @param _sender Address of the user who originally sent the message. + /// @param _target Address of the contract or wallet that the message is targeting on the destination chain. + /// @param _message The message payload to be relayed to the target on the destination chain. + /// @return Hash of the encoded message parameters, used to uniquely identify the message. + function hashL2toL2CrossDomainMessengerRelayMessage( + uint256 _destination, + uint256 _source, + uint256 _nonce, + address _sender, + address _target, + bytes memory _message + ) + internal + pure + returns (bytes32) + { + return keccak256(abi.encode(_destination, _source, _nonce, _sender, _target, _message)); + } } diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index 66b0b7e83209..ffde996c21c7 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -7,6 +7,7 @@ import { Vm } from "forge-std/Vm.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; +import { Hashing } from "src/libraries/Hashing.sol"; // Target contract import { @@ -90,7 +91,14 @@ contract L2ToL2CrossDomainMessengerTest is Test { vm.recordLogs(); // Call the sendMessage function - l2ToL2CrossDomainMessenger.sendMessage({ _destination: _destination, _target: _target, _message: _message }); + bytes32 msgHash = + l2ToL2CrossDomainMessenger.sendMessage({ _destination: _destination, _target: _target, _message: _message }); + assertEq( + msgHash, + Hashing.hashL2toL2CrossDomainMessengerRelayMessage( + _destination, block.chainid, messageNonce, address(this), _target, _message + ) + ); // Check that the event was emitted with the correct parameters Vm.Log[] memory logs = vm.getRecordedLogs(); From 874c0e9878da51405b6939d927edb40586ad8c5d Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Thu, 26 Sep 2024 09:32:33 +0900 Subject: [PATCH 034/116] Make honestActorConfig public so that it can be used in asterisc e2e tests (#12108) --- .../disputegame/output_cannon_helper.go | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/op-e2e/e2eutils/disputegame/output_cannon_helper.go b/op-e2e/e2eutils/disputegame/output_cannon_helper.go index a4e017f9c980..edd3125dd766 100644 --- a/op-e2e/e2eutils/disputegame/output_cannon_helper.go +++ b/op-e2e/e2eutils/disputegame/output_cannon_helper.go @@ -47,23 +47,23 @@ func (g *OutputCannonGameHelper) StartChallenger(ctx context.Context, name strin return c } -type honestActorConfig struct { - prestateBlock uint64 - poststateBlock uint64 - challengerOpts []challenger.Option +type HonestActorConfig struct { + PrestateBlock uint64 + PoststateBlock uint64 + ChallengerOpts []challenger.Option } -type HonestActorOpt func(cfg *honestActorConfig) +type HonestActorOpt func(cfg *HonestActorConfig) func WithClaimedL2BlockNumber(num uint64) HonestActorOpt { - return func(cfg *honestActorConfig) { - cfg.poststateBlock = num + return func(cfg *HonestActorConfig) { + cfg.PoststateBlock = num } } func WithPrivKey(privKey *ecdsa.PrivateKey) HonestActorOpt { - return func(cfg *honestActorConfig) { - cfg.challengerOpts = append(cfg.challengerOpts, challenger.WithPrivKey(privKey)) + return func(cfg *HonestActorConfig) { + cfg.ChallengerOpts = append(cfg.ChallengerOpts, challenger.WithPrivKey(privKey)) } } @@ -75,21 +75,21 @@ func (g *OutputCannonGameHelper) CreateHonestActor(ctx context.Context, l2Node s g.Require.NoError(err, "Failed to load block range") splitDepth := g.SplitDepth(ctx) rollupClient := g.System.RollupClient(l2Node) - actorCfg := &honestActorConfig{ - prestateBlock: realPrestateBlock, - poststateBlock: realPostStateBlock, - challengerOpts: g.defaultChallengerOptions(), + actorCfg := &HonestActorConfig{ + PrestateBlock: realPrestateBlock, + PoststateBlock: realPostStateBlock, + ChallengerOpts: g.defaultChallengerOptions(), } for _, option := range options { option(actorCfg) } - cfg := challenger.NewChallengerConfig(g.T, g.System, l2Node, actorCfg.challengerOpts...) + cfg := challenger.NewChallengerConfig(g.T, g.System, l2Node, actorCfg.ChallengerOpts...) dir := filepath.Join(cfg.Datadir, "honest") - prestateProvider := outputs.NewPrestateProvider(rollupClient, actorCfg.prestateBlock) + prestateProvider := outputs.NewPrestateProvider(rollupClient, actorCfg.PrestateBlock) l1Head := g.GetL1Head(ctx) accessor, err := outputs.NewOutputCannonTraceAccessor( - logger, metrics.NoopMetrics, cfg.Cannon, vm.NewOpProgramServerExecutor(), l2Client, prestateProvider, cfg.CannonAbsolutePreState, rollupClient, dir, l1Head, splitDepth, actorCfg.prestateBlock, actorCfg.poststateBlock) + logger, metrics.NoopMetrics, cfg.Cannon, vm.NewOpProgramServerExecutor(), l2Client, prestateProvider, cfg.CannonAbsolutePreState, rollupClient, dir, l1Head, splitDepth, actorCfg.PrestateBlock, actorCfg.PoststateBlock) g.Require.NoError(err, "Failed to create output cannon trace accessor") return NewOutputHonestHelper(g.T, g.Require, &g.OutputGameHelper, g.Game, accessor) } From 12a38d0d1745f0986d59d0dbbd438e1a4f36a271 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 26 Sep 2024 11:00:48 +1000 Subject: [PATCH 035/116] op-challenger: Skip prestate verifications for the permissioned game. (#12140) Simplifies deployment with minimal risk given that only permissioned actors are involved in the game and typically the challenger is only resolving games. --- op-challenger/game/fault/register_task.go | 16 ++++++--- op-e2e/e2eutils/challenger/helper.go | 21 +++++++++++- op-e2e/e2eutils/disputegame/helper.go | 40 ++++++++++++++++++----- op-e2e/faultproofs/permissioned_test.go | 35 ++++++++++++++++++++ 4 files changed, 99 insertions(+), 13 deletions(-) create mode 100644 op-e2e/faultproofs/permissioned_test.go diff --git a/op-challenger/game/fault/register_task.go b/op-challenger/game/fault/register_task.go index 3b438ad8eea2..dd346f42077f 100644 --- a/op-challenger/game/fault/register_task.go +++ b/op-challenger/game/fault/register_task.go @@ -30,7 +30,8 @@ import ( ) type RegisterTask struct { - gameType faultTypes.GameType + gameType faultTypes.GameType + skipPrestateValidation bool getPrestateProvider func(prestateHash common.Hash) (faultTypes.PrestateProvider, error) newTraceAccessor func( @@ -51,6 +52,10 @@ func NewCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m c stateConverter := cannon.NewStateConverter() return &RegisterTask{ gameType: gameType, + // Don't validate the absolute prestate or genesis output root for permissioned games + // Only trusted actors participate in these games so they aren't expected to reach the step() call and + // are often configured without valid prestates but the challenger should still resolve the games. + skipPrestateValidation: gameType == faultTypes.PermissionedGameType, getPrestateProvider: cachePrestates( gameType, stateConverter, @@ -244,9 +249,12 @@ func (e *RegisterTask) Register( } return accessor, nil } - prestateValidator := NewPrestateValidator(e.gameType.String(), contract.GetAbsolutePrestateHash, vmPrestateProvider) - startingValidator := NewPrestateValidator("output root", contract.GetStartingRootHash, prestateProvider) - return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, syncValidator, []Validator{prestateValidator, startingValidator}, creator, l1HeaderSource, selective, claimants) + var validators []Validator + if !e.skipPrestateValidation { + validators = append(validators, NewPrestateValidator(e.gameType.String(), contract.GetAbsolutePrestateHash, vmPrestateProvider)) + validators = append(validators, NewPrestateValidator("output root", contract.GetStartingRootHash, prestateProvider)) + } + return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, syncValidator, validators, creator, l1HeaderSource, selective, claimants) } err := registerOracle(ctx, m, oracles, gameFactory, caller, e.gameType) if err != nil { diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index 87a51d96a5f0..177fd90a9bf0 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -58,7 +58,7 @@ func NewHelper(log log.Logger, t *testing.T, require *require.Assertions, dir st } } -type Option func(config2 *config.Config) +type Option func(c *config.Config) func WithFactoryAddress(addr common.Address) Option { return func(c *config.Config) { @@ -84,6 +84,18 @@ func WithPollInterval(pollInterval time.Duration) Option { } } +func WithValidPrestateRequired() Option { + return func(c *config.Config) { + c.AllowInvalidPrestate = false + } +} + +func WithInvalidCannonPrestate() Option { + return func(c *config.Config) { + c.CannonAbsolutePreState = "/tmp/not-a-real-prestate.foo" + } +} + // FindMonorepoRoot finds the relative path to the monorepo root // Different tests might be nested in subdirectories of the op-e2e dir. func FindMonorepoRoot(t *testing.T) string { @@ -136,6 +148,13 @@ func WithCannon(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) } } +func WithPermissioned(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) Option { + return func(c *config.Config) { + c.TraceTypes = append(c.TraceTypes, types.TraceTypePermissioned) + applyCannonConfig(c, t, rollupCfg, l2Genesis) + } +} + func WithAlphabet() Option { return func(c *config.Config) { c.TraceTypes = append(c.TraceTypes, types.TraceTypeAlphabet) diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index 972314c4c762..7651d0941d67 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -41,8 +41,9 @@ var ( ) const ( - cannonGameType uint32 = 0 - alphabetGameType uint32 = 255 + cannonGameType uint32 = 0 + permissionedGameType uint32 = 1 + alphabetGameType uint32 = 255 ) type GameCfg struct { @@ -95,13 +96,28 @@ type FactoryHelper struct { Factory *bindings.DisputeGameFactory } -func NewFactoryHelper(t *testing.T, ctx context.Context, system DisputeSystem) *FactoryHelper { +type FactoryCfg struct { + PrivKey *ecdsa.PrivateKey +} + +type FactoryOption func(c *FactoryCfg) + +func WithFactoryPrivKey(privKey *ecdsa.PrivateKey) FactoryOption { + return func(c *FactoryCfg) { + c.PrivKey = privKey + } +} + +func NewFactoryHelper(t *testing.T, ctx context.Context, system DisputeSystem, opts ...FactoryOption) *FactoryHelper { require := require.New(t) client := system.NodeClient("l1") chainID, err := client.ChainID(ctx) require.NoError(err) - privKey := TestKey - opts, err := bind.NewKeyedTransactorWithChainID(privKey, chainID) + factoryCfg := &FactoryCfg{PrivKey: TestKey} + for _, opt := range opts { + opt(factoryCfg) + } + txOpts, err := bind.NewKeyedTransactorWithChainID(factoryCfg.PrivKey, chainID) require.NoError(err) l1Deployments := system.L1Deployments() @@ -114,8 +130,8 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, system DisputeSystem) * Require: require, System: system, Client: client, - Opts: opts, - PrivKey: privKey, + Opts: txOpts, + PrivKey: factoryCfg.PrivKey, Factory: factory, FactoryAddr: factoryAddr, } @@ -152,6 +168,14 @@ func (h *FactoryHelper) StartOutputCannonGameWithCorrectRoot(ctx context.Context } func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputCannonGameHelper { + return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, rootClaim, cannonGameType, opts...) +} + +func (h *FactoryHelper) StartPermissionedGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputCannonGameHelper { + return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, rootClaim, permissionedGameType, opts...) +} + +func (h *FactoryHelper) startOutputCannonGameOfType(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, gameType uint32, opts ...GameOpt) *OutputCannonGameHelper { cfg := NewGameCfg(opts...) logger := testlog.Logger(h.T, log.LevelInfo).New("role", "OutputCannonGameHelper") rollupClient := h.System.RollupClient(l2Node) @@ -163,7 +187,7 @@ func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string defer cancel() tx, err := transactions.PadGasEstimate(h.Opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) { - return h.Factory.Create(opts, cannonGameType, rootClaim, extraData) + return h.Factory.Create(opts, gameType, rootClaim, extraData) }) h.Require.NoError(err, "create fault dispute game") rcpt, err := wait.ForReceiptOK(ctx, h.Client, tx.Hash()) diff --git a/op-e2e/faultproofs/permissioned_test.go b/op-e2e/faultproofs/permissioned_test.go new file mode 100644 index 000000000000..09c4646fe8ce --- /dev/null +++ b/op-e2e/faultproofs/permissioned_test.go @@ -0,0 +1,35 @@ +package faultproofs + +import ( + "context" + "testing" + + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame" + "github.com/ethereum/go-ethereum/common" +) + +func TestPermissionedGameType(t *testing.T) { + op_e2e.InitParallel(t, op_e2e.UsesCannon) + + ctx := context.Background() + sys, _ := StartFaultDisputeSystem(t) + t.Cleanup(sys.Close) + + gameFactory := disputegame.NewFactoryHelper(t, ctx, sys, disputegame.WithFactoryPrivKey(sys.Cfg.Secrets.Proposer)) + + game := gameFactory.StartPermissionedGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) + + // Start a challenger with both cannon and alphabet support + gameFactory.StartChallenger(ctx, "TowerDefense", + challenger.WithValidPrestateRequired(), + challenger.WithInvalidCannonPrestate(), + challenger.WithPermissioned(t, sys.RollupConfig, sys.L2GenesisCfg), + challenger.WithPrivKey(sys.Cfg.Secrets.Alice), + ) + + // Wait for the challenger to respond + game.RootClaim(ctx).WaitForCounterClaim(ctx) +} From 42305d820f947e5f5d63e5c17f1b52650e6a948e Mon Sep 17 00:00:00 2001 From: Inphi Date: Wed, 25 Sep 2024 21:08:13 -0400 Subject: [PATCH 036/116] cannon: Fix custom --help for multicannon subcommands (#12112) * multicannon: Display custom --help for missing flags * remove comment --- cannon/multicannon/load_elf.go | 12 +++++++++--- cannon/multicannon/run.go | 2 +- cannon/multicannon/witness.go | 10 ++++++++-- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/cannon/multicannon/load_elf.go b/cannon/multicannon/load_elf.go index cbe1fda46303..b34b202b220a 100644 --- a/cannon/multicannon/load_elf.go +++ b/cannon/multicannon/load_elf.go @@ -4,17 +4,17 @@ import ( "fmt" "os" - "github.com/ethereum-optimism/optimism/cannon/cmd" "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" "github.com/urfave/cli/v2" ) func LoadELF(ctx *cli.Context) error { - if len(os.Args) == 2 && os.Args[2] == "--help" { + if len(os.Args) == 3 && os.Args[2] == "--help" { if err := list(); err != nil { return err } fmt.Println("use `--type --help` to get more detailed help") + return nil } typ, err := parseFlag(os.Args[1:], "--type") @@ -28,4 +28,10 @@ func LoadELF(ctx *cli.Context) error { return ExecuteCannon(ctx.Context, os.Args[1:], ver) } -var LoadELFCommand = cmd.CreateLoadELFCommand(LoadELF) +var LoadELFCommand = &cli.Command{ + Name: "load-elf", + Usage: "Load ELF file into Cannon state", + Description: "Load ELF file into Cannon state", + Action: LoadELF, + SkipFlagParsing: true, +} diff --git a/cannon/multicannon/run.go b/cannon/multicannon/run.go index 532cf317fb21..7139436899ab 100644 --- a/cannon/multicannon/run.go +++ b/cannon/multicannon/run.go @@ -16,6 +16,7 @@ func Run(ctx *cli.Context) error { return err } fmt.Println("use `--input --help` to get more detailed help") + return nil } inputPath, err := parsePathFlag(os.Args[1:], "--input") @@ -29,7 +30,6 @@ func Run(ctx *cli.Context) error { return ExecuteCannon(ctx.Context, os.Args[1:], version) } -// var RunCommand = cmd.CreateRunCommand(Run) var RunCommand = &cli.Command{ Name: "run", Usage: "Run VM step(s) and generate proof data to replicate onchain.", diff --git a/cannon/multicannon/witness.go b/cannon/multicannon/witness.go index 077d0d3f1aed..2c72ebecf56a 100644 --- a/cannon/multicannon/witness.go +++ b/cannon/multicannon/witness.go @@ -6,7 +6,6 @@ import ( "github.com/urfave/cli/v2" - "github.com/ethereum-optimism/optimism/cannon/cmd" "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) @@ -16,6 +15,7 @@ func Witness(ctx *cli.Context) error { return err } fmt.Println("use `--input --help` to get more detailed help") + return nil } inputPath, err := parsePathFlag(os.Args[1:], "--input") @@ -29,4 +29,10 @@ func Witness(ctx *cli.Context) error { return ExecuteCannon(ctx.Context, os.Args[1:], version) } -var WitnessCommand = cmd.CreateWitnessCommand(Witness) +var WitnessCommand = &cli.Command{ + Name: "witness", + Usage: "Convert a Cannon JSON state into a binary witness", + Description: "Convert a Cannon JSON state into a binary witness. The hash of the witness is written to stdout", + Action: Witness, + SkipFlagParsing: true, +} From 46807900c430f95de60fb499173f909c4775feb6 Mon Sep 17 00:00:00 2001 From: Inphi Date: Wed, 25 Sep 2024 21:08:46 -0400 Subject: [PATCH 037/116] cannon: update phony makefile targets (#12109) --- cannon/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cannon/Makefile b/cannon/Makefile index e914ad542eae..7dfc39c44023 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -60,6 +60,8 @@ fuzz: .PHONY: \ cannon \ + cannon-impl \ + cannon-embeds \ clean \ test \ lint \ From 4a608f64c6edec8e976adf847e0481d1544d0e97 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 26 Sep 2024 13:56:13 +0800 Subject: [PATCH 038/116] move AttachEmitter block inside nil check (#12144) --- op-node/rollup/event/system.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/op-node/rollup/event/system.go b/op-node/rollup/event/system.go index 566f28fdbe40..d0ae0e3d02d4 100644 --- a/op-node/rollup/event/system.go +++ b/op-node/rollup/event/system.go @@ -147,12 +147,13 @@ func (s *Sys) Register(name string, deriver Deriver, opts *RegisterOpts) Emitter } }) } - // If it can emit, attach an emitter to it - if attachTo, ok := deriver.(AttachEmitter); ok { - attachTo.AttachEmitter(em) - } + // If it can derive, add it to the executor (and only after attaching the emitter) if deriver != nil { + // If it can emit, attach an emitter to it + if attachTo, ok := deriver.(AttachEmitter); ok { + attachTo.AttachEmitter(em) + } r.leaveExecutor = s.executor.Add(r, &opts.Executor) } return em From d42fc0b9dc580616d89f0fc420fccde604a73233 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 26 Sep 2024 19:50:27 +0800 Subject: [PATCH 039/116] op-batcher,op-e2e: replace magic numbers like 6 with consts, eg MaxBlobsPerBlobTx (#11842) * remove some magic numbers * also check TargetNumFrames <= 6 for auto DA * define eth.MaxBlobsPerBlobTx and replace magic 6 * also change the original params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob --- op-batcher/batcher/channel_test.go | 2 +- op-batcher/batcher/config.go | 10 ++++++---- op-batcher/batcher/config_test.go | 8 +++++--- op-e2e/actions/batcher/eip4844_test.go | 5 +++-- op-e2e/actions/helpers/l2_batcher.go | 4 ++-- op-e2e/system/da/eip4844_test.go | 13 +++++++------ op-service/eth/blob.go | 12 +++++++----- 7 files changed, 31 insertions(+), 23 deletions(-) diff --git a/op-batcher/batcher/channel_test.go b/op-batcher/batcher/channel_test.go index 8dec9d9e108b..3585ea8b99f6 100644 --- a/op-batcher/batcher/channel_test.go +++ b/op-batcher/batcher/channel_test.go @@ -160,7 +160,7 @@ func TestChannel_NextTxData_singleFrameTx(t *testing.T) { func TestChannel_NextTxData_multiFrameTx(t *testing.T) { require := require.New(t) - const n = 6 + const n = eth.MaxBlobsPerBlobTx lgr := testlog.Logger(t, log.LevelWarn) ch, err := newChannel(lgr, metrics.NoopMetrics, ChannelConfig{ UseBlobs: true, diff --git a/op-batcher/batcher/config.go b/op-batcher/batcher/config.go index 250d893e2a71..ac8bad7791a7 100644 --- a/op-batcher/batcher/config.go +++ b/op-batcher/batcher/config.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" oplog "github.com/ethereum-optimism/optimism/op-service/log" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/oppprof" @@ -135,18 +136,19 @@ func (c *CLIConfig) Check() error { if !derive.ValidCompressionAlgo(c.CompressionAlgo) { return fmt.Errorf("invalid compression algo %v", c.CompressionAlgo) } - if c.BatchType > 1 { + if c.BatchType > derive.SpanBatchType { return fmt.Errorf("unknown batch type: %v", c.BatchType) } if c.CheckRecentTxsDepth > 128 { return fmt.Errorf("CheckRecentTxsDepth cannot be set higher than 128: %v", c.CheckRecentTxsDepth) } - if c.DataAvailabilityType == flags.BlobsType && c.TargetNumFrames > 6 { - return errors.New("too many frames for blob transactions, max 6") - } if !flags.ValidDataAvailabilityType(c.DataAvailabilityType) { return fmt.Errorf("unknown data availability type: %q", c.DataAvailabilityType) } + // we want to enforce it for both blobs and auto + if c.DataAvailabilityType != flags.CalldataType && c.TargetNumFrames > eth.MaxBlobsPerBlobTx { + return fmt.Errorf("too many frames for blob transactions, max %d", eth.MaxBlobsPerBlobTx) + } if err := c.MetricsConfig.Check(); err != nil { return err } diff --git a/op-batcher/batcher/config_test.go b/op-batcher/batcher/config_test.go index f8fb08a703da..4b90ebaccb68 100644 --- a/op-batcher/batcher/config_test.go +++ b/op-batcher/batcher/config_test.go @@ -1,6 +1,7 @@ package batcher_test import ( + "fmt" "testing" "time" @@ -8,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/oppprof" @@ -98,12 +100,12 @@ func TestBatcherConfig(t *testing.T) { errString: "TargetNumFrames must be at least 1", }, { - name: "larger 6 TargetNumFrames for blobs", + name: fmt.Sprintf("larger %d TargetNumFrames for blobs", eth.MaxBlobsPerBlobTx), override: func(c *batcher.CLIConfig) { - c.TargetNumFrames = 7 + c.TargetNumFrames = eth.MaxBlobsPerBlobTx + 1 c.DataAvailabilityType = flags.BlobsType }, - errString: "too many frames for blob transactions, max 6", + errString: fmt.Sprintf("too many frames for blob transactions, max %d", eth.MaxBlobsPerBlobTx), }, { name: "invalid compr ratio for ratio compressor", diff --git a/op-e2e/actions/batcher/eip4844_test.go b/op-e2e/actions/batcher/eip4844_test.go index 6d77a3961788..1447a07a2076 100644 --- a/op-e2e/actions/batcher/eip4844_test.go +++ b/op-e2e/actions/batcher/eip4844_test.go @@ -14,6 +14,7 @@ import ( batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" ) @@ -104,10 +105,10 @@ func TestEIP4844MultiBlobs(gt *testing.T) { sequencer.ActBuildToL1Head(t) // submit all new L2 blocks - batcher.ActSubmitAllMultiBlobs(t, 6) + batcher.ActSubmitAllMultiBlobs(t, eth.MaxBlobsPerBlobTx) batchTx := batcher.LastSubmitted require.Equal(t, uint8(types.BlobTxType), batchTx.Type(), "batch tx must be blob-tx") - require.Len(t, batchTx.BlobTxSidecar().Blobs, 6) + require.Len(t, batchTx.BlobTxSidecar().Blobs, eth.MaxBlobsPerBlobTx) // new L1 block with L2 batch miner.ActL1StartBlock(12)(t) diff --git a/op-e2e/actions/helpers/l2_batcher.go b/op-e2e/actions/helpers/l2_batcher.go index d9e6fe3dbec5..352774a9968f 100644 --- a/op-e2e/actions/helpers/l2_batcher.go +++ b/op-e2e/actions/helpers/l2_batcher.go @@ -346,8 +346,8 @@ func (s *L2Batcher) ActL2BatchSubmitMultiBlob(t Testing, numBlobs int) { if s.l2BatcherCfg.DataAvailabilityType != batcherFlags.BlobsType { t.InvalidAction("ActL2BatchSubmitMultiBlob only available for Blobs DA type") return - } else if numBlobs > 6 || numBlobs < 1 { - t.InvalidAction("invalid number of blobs %d, must be within [1,6]", numBlobs) + } else if numBlobs > eth.MaxBlobsPerBlobTx || numBlobs < 1 { + t.InvalidAction("invalid number of blobs %d, must be within [1,%d]", numBlobs, eth.MaxBlobsPerBlobTx) } // Don't run this action if there's no data to submit diff --git a/op-e2e/system/da/eip4844_test.go b/op-e2e/system/da/eip4844_test.go index 332da11f9d6f..27cc1db0d21d 100644 --- a/op-e2e/system/da/eip4844_test.go +++ b/op-e2e/system/da/eip4844_test.go @@ -2,6 +2,7 @@ package da import ( "context" + "fmt" "math/big" "math/rand" "testing" @@ -50,12 +51,12 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva cfg.BatcherBatchType = derive.SpanBatchType cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(7000)) - const maxBlobs = 6 + const maxBlobs = eth.MaxBlobsPerBlobTx var maxL1TxSize int if multiBlob { - cfg.BatcherTargetNumFrames = 6 + cfg.BatcherTargetNumFrames = eth.MaxBlobsPerBlobTx cfg.BatcherUseMaxTxSizeForBlobs = true - // leads to 6 blobs for an L2 block with a user tx with 400 random bytes + // leads to eth.MaxBlobsPerBlobTx blobs for an L2 block with a user tx with 400 random bytes // while all other L2 blocks take 1 blob (deposit tx) maxL1TxSize = derive.FrameV0OverHeadSize + 100 cfg.BatcherMaxL1TxSizeBytes = uint64(maxL1TxSize) @@ -129,7 +130,7 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva opts.Value = big.NewInt(1_000_000_000) opts.Nonce = 1 // Already have deposit opts.ToAddr = &common.Address{0xff, 0xff} - // put some random data in the tx to make it fill up 6 blobs (multi-blob case) + // put some random data in the tx to make it fill up eth.MaxBlobsPerBlobTx blobs (multi-blob case) opts.Data = testutils.RandomData(rand.New(rand.NewSource(420)), 400) opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) require.NoError(t, err) @@ -207,7 +208,7 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva if !multiBlob { require.NotZero(t, numBlobs, "single-blob: expected to find L1 blob tx") } else { - require.Equal(t, maxBlobs, numBlobs, "multi-blob: expected to find L1 blob tx with 6 blobs") + require.Equal(t, maxBlobs, numBlobs, fmt.Sprintf("multi-blob: expected to find L1 blob tx with %d blobs", eth.MaxBlobsPerBlobTx)) // blob tx should have filled up all but last blob bcl := sys.L1BeaconHTTPClient() hashes := toIndexedBlobHashes(blobTx.BlobHashes()...) @@ -255,7 +256,7 @@ func TestBatcherAutoDA(t *testing.T) { cfg.DeployConfig.L1GenesisBlockGasLimit = 2_500_000 // low block gas limit to drive up gas price more quickly t.Logf("L1BlockTime: %d, L2BlockTime: %d", cfg.DeployConfig.L1BlockTime, cfg.DeployConfig.L2BlockTime) - cfg.BatcherTargetNumFrames = 6 + cfg.BatcherTargetNumFrames = eth.MaxBlobsPerBlobTx sys, err := cfg.Start(t) require.NoError(t, err, "Error starting up system") diff --git a/op-service/eth/blob.go b/op-service/eth/blob.go index 9e51c568634f..b7cf4524a48a 100644 --- a/op-service/eth/blob.go +++ b/op-service/eth/blob.go @@ -9,14 +9,16 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" ) const ( - BlobSize = 4096 * 32 - MaxBlobDataSize = (4*31+3)*1024 - 4 - EncodingVersion = 0 - VersionOffset = 1 // offset of the version byte in the blob encoding - Rounds = 1024 // number of encode/decode rounds + BlobSize = 4096 * 32 + MaxBlobDataSize = (4*31+3)*1024 - 4 + EncodingVersion = 0 + VersionOffset = 1 // offset of the version byte in the blob encoding + Rounds = 1024 // number of encode/decode rounds + MaxBlobsPerBlobTx = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob ) var ( From 300d9f335726b0608b50d478aa65cb30a4fd8bd2 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Thu, 26 Sep 2024 10:35:33 -0400 Subject: [PATCH 040/116] feat(deploy): deploy implementations before proxies (#12114) * feat(deploy): deploy implementations before proxies This change prepares for the OPCM integration by deploying the implementations before the proxies. `intializeImplementations()` was renamed to `initializeProxies()` which is more accurate. Also the ASR's implementation was unnecessarily initialized with a proxy address for a constructor argument. * test(deploy): Move ASR impl deployment to deployProxies It must be deployed per chain because it is not MCP ready. * test(deploy): Rename deploy functions to correspond to OPCM --- .../scripts/deploy/Deploy.s.sol | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 5456b88492d5..0787c965e199 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -398,9 +398,10 @@ contract Deploy is Deployer { mustGetAddress("AddressManager"); mustGetAddress("ProxyAdmin"); - deployProxies(); deployImplementations(); - initializeImplementations(); + + deployOpChain(); + initializeOpChain(); setAlphabetFaultGameImplementation({ _allowUpgrade: false }); setFastFaultGameImplementation({ _allowUpgrade: false }); @@ -411,9 +412,9 @@ contract Deploy is Deployer { transferDelayedWETHOwnership(); } - /// @notice Deploy all of the proxies - function deployProxies() public { - console.log("Deploying proxies"); + /// @notice Deploy all of the OP Chain specific contracts + function deployOpChain() public { + console.log("Deploying OP Chain contracts"); deployERC1967Proxy("OptimismPortalProxy"); deployERC1967Proxy("SystemConfigProxy"); @@ -431,6 +432,8 @@ contract Deploy is Deployer { deployERC1967Proxy("PermissionedDelayedWETHProxy"); deployERC1967Proxy("AnchorStateRegistryProxy"); + deployAnchorStateRegistry(); + transferAddressManagerOwnership(); // to the ProxyAdmin } @@ -450,12 +453,12 @@ contract Deploy is Deployer { deployDelayedWETH(); deployPreimageOracle(); deployMips(); - deployAnchorStateRegistry(); } - /// @notice Initialize all of the implementations - function initializeImplementations() public { - console.log("Initializing implementations"); + /// @notice Initialize all of the proxies in an OP Chain by upgrading to the correct proxy and calling the + /// initialize function + function initializeOpChain() public { + console.log("Initializing Op Chain proxies"); // Selectively initialize either the original OptimismPortal or the new OptimismPortal2. Since this will upgrade // the proxy, we cannot initialize both. if (cfg.useFaultProofs()) { From 087bb74606446a6ba1271b19daefe4d2878dc471 Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Thu, 26 Sep 2024 11:02:53 -0400 Subject: [PATCH 041/116] feat: Sepolia Standard Versions OPCM (#12121) * feat: Sepolia Standard Versions OPCM * fix: reran make sync-standard-version * fix: passing through Standard Versions mainnet in tests. --- op-chain-ops/Makefile | 3 +- op-chain-ops/deployer/opcm/implementations.go | 2 +- .../opcm/standard-versions-mainnet.toml | 45 +++++++++++++++++++ .../opcm/standard-versions-sepolia.toml | 23 ++++++++++ op-chain-ops/deployer/opcm/standard.go | 7 ++- .../deployer/pipeline/implementations.go | 2 +- op-chain-ops/interopgen/configs.go | 2 +- op-chain-ops/interopgen/deploy.go | 2 +- op-chain-ops/interopgen/recipe.go | 2 +- 9 files changed, 80 insertions(+), 8 deletions(-) create mode 100644 op-chain-ops/deployer/opcm/standard-versions-mainnet.toml create mode 100644 op-chain-ops/deployer/opcm/standard-versions-sepolia.toml diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index fd3cc9ad67b3..6c4b57652855 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -46,6 +46,7 @@ fuzz: sync-standard-version: - curl -Lo ./deployer/opcm/standard-versions.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions.toml + curl -Lo ./deployer/opcm/standard-versions-mainnet.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-mainnet.toml + curl -Lo ./deployer/opcm/standard-versions-sepolia.toml https://raw.githubusercontent.com/ethereum-optimism/superchain-registry/refs/heads/main/validation/standard/standard-versions-sepolia.toml .PHONY: test fuzz op-deployer sync-standard-version \ No newline at end of file diff --git a/op-chain-ops/deployer/opcm/implementations.go b/op-chain-ops/deployer/opcm/implementations.go index fec30d94cbd5..1d88c9b74398 100644 --- a/op-chain-ops/deployer/opcm/implementations.go +++ b/op-chain-ops/deployer/opcm/implementations.go @@ -23,7 +23,7 @@ type DeployImplementationsInput struct { UseInterop bool // if true, deploy Interop implementations SuperchainProxyAdmin common.Address - StandardVersionsToml string // contents of 'standard-versions.toml' file + StandardVersionsToml string // contents of 'standard-versions-mainnet.toml' or 'standard-versions-sepolia.toml' file } func (input *DeployImplementationsInput) InputSet() bool { diff --git a/op-chain-ops/deployer/opcm/standard-versions-mainnet.toml b/op-chain-ops/deployer/opcm/standard-versions-mainnet.toml new file mode 100644 index 000000000000..754e249dc0b1 --- /dev/null +++ b/op-chain-ops/deployer/opcm/standard-versions-mainnet.toml @@ -0,0 +1,45 @@ +[releases] + +# Contracts which are +# * unproxied singletons: specify a standard "address" +# * proxied : specify a standard "implementation_address" +# * neither : specify neither a standard "address" nor "implementation_address" + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +[releases."op-contracts/v1.6.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } +system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } +anchor_state_registry = { version = "2.0.0" } +delayed_weth = { version = "1.1.0", implementation_address = "0x71e966Ae981d1ce531a7b6d23DC0f27B38409087" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } +fault_dispute_game = { version = "1.3.0" } +permissioned_dispute_game = { version = "1.3.0" } +mips = { version = "1.1.0", address = "0x16e83cE5Ce29BF90AD9Da06D2fE6a15d5f344ce4" } +preimage_oracle = { version = "1.1.2", address = "0x9c065e11870B891D214Bc2Da7EF1f9DDFA1BE277" } +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } +# l2_output_oracle -- This contract not used in fault proofs +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.4.0 +[releases."op-contracts/v1.4.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } +system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } +anchor_state_registry = { version = "1.0.0" } +delayed_weth = { version = "1.0.0", implementation_address = "0x97988d5624F1ba266E1da305117BCf20713bee08" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } +fault_dispute_game = { version = "1.2.0" } +permissioned_dispute_game = { version = "1.2.0" } +mips = { version = "1.0.1", address = "0x0f8EdFbDdD3c0256A80AD8C0F2560B1807873C9c" } +preimage_oracle = { version = "1.0.0", address = "0xD326E10B8186e90F4E2adc5c13a2d0C137ee8b34" } + +# MCP https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.3.0 +[releases."op-contracts/v1.3.0"] +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } +l2_output_oracle = { version = "1.8.0", implementation_address = "0xF243BEd163251380e78068d317ae10f26042B292" } +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } +optimism_portal = { version = "2.5.0", implementation_address = "0x2D778797049FE9259d947D1ED8e5442226dFB589" } +system_config = { version = "1.12.0", implementation_address = "0xba2492e52F45651B60B8B38d4Ea5E2390C64Ffb1" } diff --git a/op-chain-ops/deployer/opcm/standard-versions-sepolia.toml b/op-chain-ops/deployer/opcm/standard-versions-sepolia.toml new file mode 100644 index 000000000000..277f9d096306 --- /dev/null +++ b/op-chain-ops/deployer/opcm/standard-versions-sepolia.toml @@ -0,0 +1,23 @@ +[releases] + +# Contracts which are +# * unproxied singletons: specify a standard "address" +# * proxied : specify a standard "implementation_address" +# * neither : specify neither a standard "address" nor "implementation_address" + +# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +[releases."op-contracts/v1.6.0"] +optimism_portal = { version = "3.10.0", implementation_address = "0x35028bae87d71cbc192d545d38f960ba30b4b233" } +system_config = { version = "2.2.0", implementation_address = "0xCcdd86d581e40fb5a1C77582247BC493b6c8B169" } +anchor_state_registry = { version = "2.0.0" } +delayed_weth = { version = "1.1.0", implementation_address = "0x07f69b19532476c6cd03056d6bc3f1b110ab7538" } +dispute_game_factory = { version = "1.0.0", implementation_address = "0xa51bea7e4d34206c0bcb04a776292f2f19f0beec" } +fault_dispute_game = { version = "1.3.0" } +permissioned_dispute_game = { version = "1.3.0" } +mips = { version = "1.1.0", address = "0x47B0E34C1054009e696BaBAAd56165e1e994144d" } +preimage_oracle = { version = "1.1.2", address = "0x92240135b46fc1142dA181f550aE8f595B858854" } +l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } +l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xae2af01232a6c4a4d3012c5ec5b1b35059caf10d" } +l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64b5a5ed26dcb17370ff4d33a8d503f0fbd06cff" } +# l2_output_oracle -- This contract not used in fault proofs +optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xe01efbeb1089d1d1db9c6c8b135c934c0734c846" } diff --git a/op-chain-ops/deployer/opcm/standard.go b/op-chain-ops/deployer/opcm/standard.go index 9f182ca4685c..c82e5de12a32 100644 --- a/op-chain-ops/deployer/opcm/standard.go +++ b/op-chain-ops/deployer/opcm/standard.go @@ -2,7 +2,10 @@ package opcm import "embed" -//go:embed standard-versions.toml -var StandardVersionsData string +//go:embed standard-versions-mainnet.toml +var StandardVersionsMainnetData string + +//go:embed standard-versions-sepolia.toml +var StandardVersionsSepoliaData string var _ embed.FS diff --git a/op-chain-ops/deployer/pipeline/implementations.go b/op-chain-ops/deployer/pipeline/implementations.go index 5c5a1e99287c..d54d64abc564 100644 --- a/op-chain-ops/deployer/pipeline/implementations.go +++ b/op-chain-ops/deployer/pipeline/implementations.go @@ -50,7 +50,7 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, SuperchainProxyAdmin: st.SuperchainDeployment.ProxyAdminAddress, - StandardVersionsToml: opcm.StandardVersionsData, + StandardVersionsToml: opcm.StandardVersionsMainnetData, UseInterop: false, }, ) diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index f40d29904c2f..946a60468b8a 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -40,7 +40,7 @@ type OPCMImplementationsConfig struct { UseInterop bool // to deploy Interop implementation contracts, instead of the regular ones. - StandardVersionsToml string // serialized string of superchain-registry 'standard-versions.toml' file + StandardVersionsToml string // serialized string of superchain-registry 'standard-versions-mainnet.toml' file } type SuperchainConfig struct { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index be837484e512..8958456f0b6e 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -171,7 +171,7 @@ func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, SuperchainProxyAdmin: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, - StandardVersionsToml: opcm.StandardVersionsData, + StandardVersionsToml: opcm.StandardVersionsMainnetData, }) if err != nil { return nil, fmt.Errorf("failed to deploy Implementations contracts: %w", err) diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index eea42b87e0a4..53f27a3af36e 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -77,7 +77,7 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) DisputeGameFinalityDelaySeconds: big.NewInt(6), }, UseInterop: true, - StandardVersionsToml: opcm.StandardVersionsData, + StandardVersionsToml: opcm.StandardVersionsMainnetData, }, SuperchainL1DeployConfig: genesis.SuperchainL1DeployConfig{ RequiredProtocolVersion: params.OPStackSupport, From e1fe38e4f7a11671bf6dbe56c84b0927d3bc3aaf Mon Sep 17 00:00:00 2001 From: Chen Kai <281165273grape@gmail.com> Date: Thu, 26 Sep 2024 23:24:04 +0800 Subject: [PATCH 042/116] MT Cannon: add more cannon operator instr tests (#12104) * feat:add more cannon operator instr tests Signed-off-by: Chen Kai <281165273grape@gmail.com> * feat:code review suggestion fix Signed-off-by: Chen Kai <281165273grape@gmail.com> --------- Signed-off-by: Chen Kai <281165273grape@gmail.com> --- cannon/mipsevm/tests/evm_common_test.go | 48 ++++++++++++++----------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 21aea97a7a14..ad93014450dd 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -162,25 +162,36 @@ func TestEVMSingleStep_Jump(t *testing.T) { } } -func TestEVMSingleStep_Add(t *testing.T) { +func TestEVMSingleStep_Operators(t *testing.T) { var tracer *tracing.Hooks versions := GetMipsVersionTestCases(t) cases := []struct { name string - insn uint32 - ifImm bool + isImm bool rs uint32 rt uint32 imm uint16 - expectRD uint32 - expectImm uint32 + funct uint32 + opcode uint32 + expectRes uint32 }{ - {name: "add", insn: 0x02_32_40_20, ifImm: false, rs: uint32(12), rt: uint32(20), expectRD: uint32(32)}, // add t0, s1, s2 - {name: "addu", insn: 0x02_32_40_21, ifImm: false, rs: uint32(12), rt: uint32(20), expectRD: uint32(32)}, // addu t0, s1, s2 - {name: "addi", insn: 0x22_28_00_28, ifImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectImm: uint32(44)}, // addi t0, s1, 40 - {name: "addi sign", insn: 0x22_28_ff_fe, ifImm: true, rs: uint32(2), rt: uint32(1), imm: uint16(0xfffe), expectImm: uint32(0)}, // addi t0, s1, -2 - {name: "addiu", insn: 0x26_28_00_28, ifImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectImm: uint32(44)}, // addiu t0, s1, 40 + {name: "add", funct: 0x20, isImm: false, rs: uint32(12), rt: uint32(20), expectRes: uint32(32)}, // add t0, s1, s2 + {name: "addu", funct: 0x21, isImm: false, rs: uint32(12), rt: uint32(20), expectRes: uint32(32)}, // addu t0, s1, s2 + {name: "addi", opcode: 0x8, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // addi t0, s1, 40 + {name: "addi sign", opcode: 0x8, isImm: true, rs: uint32(2), rt: uint32(1), imm: uint16(0xfffe), expectRes: uint32(0)}, // addi t0, s1, -2 + {name: "addiu", opcode: 0x9, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // addiu t0, s1, 40 + {name: "sub", funct: 0x22, isImm: false, rs: uint32(20), rt: uint32(12), expectRes: uint32(8)}, // sub t0, s1, s2 + {name: "subu", funct: 0x23, isImm: false, rs: uint32(20), rt: uint32(12), expectRes: uint32(8)}, // subu t0, s1, s2 + {name: "and", funct: 0x24, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(160)}, // and t0, s1, s2 + {name: "andi", opcode: 0xc, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(0)}, // andi t0, s1, 40 + {name: "or", funct: 0x25, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(1530)}, // or t0, s1, s2 + {name: "ori", opcode: 0xd, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // ori t0, s1, 40 + {name: "xor", funct: 0x26, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(1370)}, // xor t0, s1, s2 + {name: "xori", opcode: 0xe, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // xori t0, s1, 40 + {name: "nor", funct: 0x27, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(4294965765)}, // nor t0, s1, s2 + {name: "slt", funct: 0x2a, isImm: false, rs: 0xFF_FF_FF_FE, rt: uint32(5), expectRes: uint32(1)}, // slt t0, s1, s2 + {name: "sltu", funct: 0x2b, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(0)}, // sltu t0, s1, s2 } for _, v := range versions { @@ -189,14 +200,17 @@ func TestEVMSingleStep_Add(t *testing.T) { t.Run(testName, func(t *testing.T) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(0), testutil.WithNextPC(4)) state := goVm.GetState() - if tt.ifImm { + var insn uint32 + if tt.isImm { + insn = tt.opcode<<26 | uint32(17)<<21 | uint32(8)<<16 | uint32(tt.imm) state.GetRegistersRef()[8] = tt.rt state.GetRegistersRef()[17] = tt.rs } else { + insn = uint32(17)<<21 | uint32(18)<<16 | uint32(8)<<11 | tt.funct state.GetRegistersRef()[17] = tt.rs state.GetRegistersRef()[18] = tt.rt } - state.GetMemory().SetMemory(0, tt.insn) + state.GetMemory().SetMemory(0, insn) step := state.GetStep() // Setup expectations @@ -204,15 +218,7 @@ func TestEVMSingleStep_Add(t *testing.T) { expected.Step += 1 expected.PC = 4 expected.NextPC = 8 - - if tt.ifImm { - expected.Registers[8] = tt.expectImm - expected.Registers[17] = tt.rs - } else { - expected.Registers[8] = tt.expectRD - expected.Registers[17] = tt.rs - expected.Registers[18] = tt.rt - } + expected.Registers[8] = tt.expectRes stepWitness, err := goVm.Step(true) require.NoError(t, err) From de46c9af7c17700a973b73e674df5ae546381704 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 27 Sep 2024 02:57:23 +1000 Subject: [PATCH 043/116] proofs-tools: Update challenger to v1.1.2-rc.1 (#12143) --- docker-bake.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index b09495948745..5740590a95f2 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -206,7 +206,7 @@ target "proofs-tools" { dockerfile = "./ops/docker/proofs-tools/Dockerfile" context = "." args = { - CHALLENGER_VERSION="90700b9bb37080961747420882b14578577d47cc" + CHALLENGER_VERSION="v1.1.2-rc.1" KONA_VERSION="kona-client-v0.1.0-alpha.3" ASTERISC_VERSION="v1.0.2" } From c65c1f88570ee0a038e4f10261e0349a496996bd Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Fri, 27 Sep 2024 00:59:30 +0800 Subject: [PATCH 044/116] support EstimateGas for blob tx (#12086) --- op-service/txmgr/txmgr.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index 6fa0ea5dc15a..e8e8ae1d1883 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -349,34 +349,39 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* gasLimit := candidate.GasLimit + var sidecar *types.BlobTxSidecar + var blobHashes []common.Hash + if len(candidate.Blobs) > 0 { + if candidate.To == nil { + return nil, errors.New("blob txs cannot deploy contracts") + } + if sidecar, blobHashes, err = MakeSidecar(candidate.Blobs); err != nil { + return nil, fmt.Errorf("failed to make sidecar: %w", err) + } + } + // If the gas limit is set, we can use that as the gas if gasLimit == 0 { // Calculate the intrinsic gas for the transaction - gas, err := m.backend.EstimateGas(ctx, ethereum.CallMsg{ + callMsg := ethereum.CallMsg{ From: m.cfg.From, To: candidate.To, GasTipCap: gasTipCap, GasFeeCap: gasFeeCap, Data: candidate.TxData, Value: candidate.Value, - }) + } + if len(blobHashes) > 0 { + callMsg.BlobGasFeeCap = blobBaseFee + callMsg.BlobHashes = blobHashes + } + gas, err := m.backend.EstimateGas(ctx, callMsg) if err != nil { return nil, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) } gasLimit = gas } - var sidecar *types.BlobTxSidecar - var blobHashes []common.Hash - if len(candidate.Blobs) > 0 { - if candidate.To == nil { - return nil, errors.New("blob txs cannot deploy contracts") - } - if sidecar, blobHashes, err = MakeSidecar(candidate.Blobs); err != nil { - return nil, fmt.Errorf("failed to make sidecar: %w", err) - } - } - var txMessage types.TxData if sidecar != nil { if blobBaseFee == nil { From 3210a8c6bb7f98639213fb49718919d84aa8774a Mon Sep 17 00:00:00 2001 From: Disco <131301107+0xDiscotech@users.noreply.github.com> Date: Thu, 26 Sep 2024 14:07:21 -0300 Subject: [PATCH 045/116] chore: rename isthmus to interop on golang files (#12133) --- op-node/rollup/derive/attributes_test.go | 4 +-- op-node/rollup/derive/fuzz_parsers_test.go | 10 +++---- op-node/rollup/derive/l1_block_info.go | 30 ++++++++++----------- op-node/rollup/derive/l1_block_info_test.go | 18 ++++++------- 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/op-node/rollup/derive/attributes_test.go b/op-node/rollup/derive/attributes_test.go index 64fcec556343..26b4b1f28437 100644 --- a/op-node/rollup/derive/attributes_test.go +++ b/op-node/rollup/derive/attributes_test.go @@ -195,7 +195,7 @@ func TestPreparePayloadAttributes(t *testing.T) { require.Equal(t, l1InfoTx, []byte(attrs.Transactions[0])) require.True(t, attrs.NoTxPool) }) - t.Run("new origin with deposits on post-Isthmus", func(t *testing.T) { + t.Run("new origin with deposits on post-Interop", func(t *testing.T) { rng := rand.New(rand.NewSource(1234)) l1Fetcher := &testutils.MockL1Source{} defer l1Fetcher.AssertExpectations(t) @@ -247,7 +247,7 @@ func TestPreparePayloadAttributes(t *testing.T) { require.True(t, attrs.NoTxPool) }) - t.Run("same origin without deposits on post-Isthmus", func(t *testing.T) { + t.Run("same origin without deposits on post-Interop", func(t *testing.T) { rng := rand.New(rand.NewSource(1234)) l1Fetcher := &testutils.MockL1Source{} defer l1Fetcher.AssertExpectations(t) diff --git a/op-node/rollup/derive/fuzz_parsers_test.go b/op-node/rollup/derive/fuzz_parsers_test.go index 4f76c4ac7420..3c5275e501a6 100644 --- a/op-node/rollup/derive/fuzz_parsers_test.go +++ b/op-node/rollup/derive/fuzz_parsers_test.go @@ -93,16 +93,16 @@ func FuzzL1InfoEcotoneRoundTrip(f *testing.F) { if !cmp.Equal(in, out, cmp.Comparer(testutils.BigEqual)) { t.Fatalf("The Ecotone data did not round trip correctly. in: %v. out: %v", in, out) } - enc, err = in.marshalBinaryIsthmus() + enc, err = in.marshalBinaryInterop() if err != nil { - t.Fatalf("Failed to marshal Isthmus binary: %v", err) + t.Fatalf("Failed to marshal Interop binary: %v", err) } - err = out.unmarshalBinaryIsthmus(enc) + err = out.unmarshalBinaryInterop(enc) if err != nil { - t.Fatalf("Failed to unmarshal Isthmus binary: %v", err) + t.Fatalf("Failed to unmarshal Interop binary: %v", err) } if !cmp.Equal(in, out, cmp.Comparer(testutils.BigEqual)) { - t.Fatalf("The Isthmus data did not round trip correctly. in: %v. out: %v", in, out) + t.Fatalf("The Interop data did not round trip correctly. in: %v. out: %v", in, out) } }) diff --git a/op-node/rollup/derive/l1_block_info.go b/op-node/rollup/derive/l1_block_info.go index 43ea9b29bedc..a01fe5bca6b9 100644 --- a/op-node/rollup/derive/l1_block_info.go +++ b/op-node/rollup/derive/l1_block_info.go @@ -20,7 +20,7 @@ import ( const ( L1InfoFuncBedrockSignature = "setL1BlockValues(uint64,uint64,uint256,bytes32,uint64,bytes32,uint256,uint256)" L1InfoFuncEcotoneSignature = "setL1BlockValuesEcotone()" - L1InfoFuncIsthmusSignature = "setL1BlockValuesIsthmus()" + L1InfoFuncInteropSignature = "setL1BlockValuesInterop()" DepositsCompleteSignature = "depositsComplete()" L1InfoArguments = 8 L1InfoBedrockLen = 4 + 32*L1InfoArguments @@ -28,8 +28,8 @@ const ( DepositsCompleteLen = 4 // only the selector // DepositsCompleteGas allocates 21k gas for intrinsic tx costs, and // an additional 15k to ensure that the DepositsComplete call does not run out of gas. - // GasBenchMark_L1BlockIsthmus_DepositsComplete:test_depositsComplete_benchmark() (gas: 7768) - // GasBenchMark_L1BlockIsthmus_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5768) + // GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7768) + // GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5768) // see `test_depositsComplete_benchmark` at: `/packages/contracts-bedrock/test/BenchmarkTest.t.sol` DepositsCompleteGas = uint64(21_000 + 15_000) ) @@ -37,7 +37,7 @@ const ( var ( L1InfoFuncBedrockBytes4 = crypto.Keccak256([]byte(L1InfoFuncBedrockSignature))[:4] L1InfoFuncEcotoneBytes4 = crypto.Keccak256([]byte(L1InfoFuncEcotoneSignature))[:4] - L1InfoFuncIsthmusBytes4 = crypto.Keccak256([]byte(L1InfoFuncIsthmusSignature))[:4] + L1InfoFuncInteropBytes4 = crypto.Keccak256([]byte(L1InfoFuncInteropSignature))[:4] DepositsCompleteBytes4 = crypto.Keccak256([]byte(DepositsCompleteSignature))[:4] L1InfoDepositerAddress = common.HexToAddress("0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001") L1BlockAddress = predeploys.L1BlockAddr @@ -155,7 +155,7 @@ func (info *L1BlockInfo) unmarshalBinaryBedrock(data []byte) error { return nil } -// Isthmus & Ecotone Binary Format +// Interop & Ecotone Binary Format // +---------+--------------------------+ // | Bytes | Field | // +---------+--------------------------+ @@ -179,16 +179,16 @@ func (info *L1BlockInfo) marshalBinaryEcotone() ([]byte, error) { return out, nil } -func (info *L1BlockInfo) marshalBinaryIsthmus() ([]byte, error) { - out, err := marshalBinaryWithSignature(info, L1InfoFuncIsthmusBytes4) +func (info *L1BlockInfo) marshalBinaryInterop() ([]byte, error) { + out, err := marshalBinaryWithSignature(info, L1InfoFuncInteropBytes4) if err != nil { - return nil, fmt.Errorf("failed to marshal Isthmus l1 block info: %w", err) + return nil, fmt.Errorf("failed to marshal Interop l1 block info: %w", err) } return out, nil } func marshalBinaryWithSignature(info *L1BlockInfo, signature []byte) ([]byte, error) { - w := bytes.NewBuffer(make([]byte, 0, L1InfoEcotoneLen)) // Ecotone and Isthmus have the same length + w := bytes.NewBuffer(make([]byte, 0, L1InfoEcotoneLen)) // Ecotone and Interop have the same length if err := solabi.WriteSignature(w, signature); err != nil { return nil, err } @@ -231,8 +231,8 @@ func (info *L1BlockInfo) unmarshalBinaryEcotone(data []byte) error { return unmarshalBinaryWithSignatureAndData(info, L1InfoFuncEcotoneBytes4, data) } -func (info *L1BlockInfo) unmarshalBinaryIsthmus(data []byte) error { - return unmarshalBinaryWithSignatureAndData(info, L1InfoFuncIsthmusBytes4, data) +func (info *L1BlockInfo) unmarshalBinaryInterop(data []byte) error { + return unmarshalBinaryWithSignatureAndData(info, L1InfoFuncInteropBytes4, data) } func unmarshalBinaryWithSignatureAndData(info *L1BlockInfo, signature []byte, data []byte) error { @@ -285,7 +285,7 @@ func isEcotoneButNotFirstBlock(rollupCfg *rollup.Config, l2Timestamp uint64) boo return rollupCfg.IsEcotone(l2Timestamp) && !rollupCfg.IsEcotoneActivationBlock(l2Timestamp) } -// isInteropButNotFirstBlock returns whether the specified block is subject to the Isthmus upgrade, +// isInteropButNotFirstBlock returns whether the specified block is subject to the Interop upgrade, // but is not the activation block itself. func isInteropButNotFirstBlock(rollupCfg *rollup.Config, l2Timestamp uint64) bool { // Since we use the pre-interop L1 tx one last time during the upgrade block, @@ -300,7 +300,7 @@ func L1BlockInfoFromBytes(rollupCfg *rollup.Config, l2BlockTime uint64, data []b var info L1BlockInfo // Important, this should be ordered from most recent to oldest if isInteropButNotFirstBlock(rollupCfg, l2BlockTime) { - return &info, info.unmarshalBinaryIsthmus(data) + return &info, info.unmarshalBinaryInterop(data) } if isEcotoneButNotFirstBlock(rollupCfg, l2BlockTime) { return &info, info.unmarshalBinaryEcotone(data) @@ -333,9 +333,9 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber l1BlockInfo.BlobBaseFeeScalar = scalars.BlobBaseFeeScalar l1BlockInfo.BaseFeeScalar = scalars.BaseFeeScalar if isInteropButNotFirstBlock(rollupCfg, l2Timestamp) { - out, err := l1BlockInfo.marshalBinaryIsthmus() + out, err := l1BlockInfo.marshalBinaryInterop() if err != nil { - return nil, fmt.Errorf("failed to marshal Isthmus l1 block info: %w", err) + return nil, fmt.Errorf("failed to marshal Interop l1 block info: %w", err) } data = out } else { diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index b98e8a7d4c63..3f7dd0647e6d 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -154,7 +154,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) require.Equal(t, L1InfoEcotoneLen, len(depTx.Data)) }) - t.Run("isthmus", func(t *testing.T) { + t.Run("interop", func(t *testing.T) { rng := rand.New(rand.NewSource(1234)) info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{BlockTime: 2, Genesis: rollup.Genesis{L2Time: 1000}} @@ -165,25 +165,25 @@ func TestParseL1InfoDepositTxData(t *testing.T) { require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) - require.Equal(t, L1InfoEcotoneLen, len(depTx.Data), "the length is same in isthmus") - require.Equal(t, L1InfoFuncIsthmusBytes4, depTx.Data[:4], "upgrade is active, need isthmus signature") + require.Equal(t, L1InfoEcotoneLen, len(depTx.Data), "the length is same in interop") + require.Equal(t, L1InfoFuncInteropBytes4, depTx.Data[:4], "upgrade is active, need interop signature") }) - t.Run("activation-block isthmus", func(t *testing.T) { + t.Run("activation-block interop", func(t *testing.T) { rng := rand.New(rand.NewSource(1234)) info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{BlockTime: 2, Genesis: rollup.Genesis{L2Time: 1000}} rollupCfg.ActivateAtGenesis(rollup.Fjord) - isthmusTime := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime // activate isthmus just after genesis - rollupCfg.InteropTime = &isthmusTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, isthmusTime) + interopTime := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime // activate interop just after genesis + rollupCfg.InteropTime = &interopTime + depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, interopTime) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) - // Isthmus activates, but ecotone L1 info is still used at this upgrade block + // Interop activates, but ecotone L1 info is still used at this upgrade block require.Equal(t, L1InfoEcotoneLen, len(depTx.Data)) require.Equal(t, L1InfoFuncEcotoneBytes4, depTx.Data[:4]) }) - t.Run("genesis-block isthmus", func(t *testing.T) { + t.Run("genesis-block interop", func(t *testing.T) { rng := rand.New(rand.NewSource(1234)) info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{BlockTime: 2, Genesis: rollup.Genesis{L2Time: 1000}} From a0eee5cc1c33fc8193628a741abc6d7348e7602e Mon Sep 17 00:00:00 2001 From: Disco <131301107+0xDiscotech@users.noreply.github.com> Date: Thu, 26 Sep 2024 14:55:30 -0300 Subject: [PATCH 046/116] chore: solidity isthmus to interop (#12132) * chore: rename isthmus to interop on solidity files * chore: rename test file * chore: update contracts versions (#66) --- packages/contracts-bedrock/.gas-snapshot | 8 +- .../contracts-bedrock/scripts/L2Genesis.s.sol | 2 +- packages/contracts-bedrock/semver-lock.json | 18 +-- ...1BlockIsthmus.json => L1BlockInterop.json} | 2 +- ...1BlockIsthmus.json => L1BlockInterop.json} | 0 .../src/L1/OptimismPortalInterop.sol | 8 +- .../src/L1/SystemConfigInterop.sol | 6 +- .../L1/interfaces/IOptimismPortalInterop.sol | 2 +- .../contracts-bedrock/src/L2/CrossL2Inbox.sol | 10 +- ...{L1BlockIsthmus.sol => L1BlockInterop.sol} | 16 +-- ...L1BlockIsthmus.sol => IL1BlockInterop.sol} | 4 +- .../src/libraries/Encoding.sol | 4 +- .../test/BenchmarkTest.t.sol | 38 ++--- .../test/L1/OptimismPortalInterop.t.sol | 8 +- .../test/L1/SystemConfigInterop.t.sol | 2 +- .../test/L2/CrossL2Inbox.t.sol | 26 ++-- ...lockIsthmus.t.sol => L1BlockInterop.t.sol} | 134 +++++++++--------- 17 files changed, 144 insertions(+), 144 deletions(-) rename packages/contracts-bedrock/snapshots/abi/{L1BlockIsthmus.json => L1BlockInterop.json} (99%) rename packages/contracts-bedrock/snapshots/storageLayout/{L1BlockIsthmus.json => L1BlockInterop.json} (100%) rename packages/contracts-bedrock/src/L2/{L1BlockIsthmus.sol => L1BlockInterop.sol} (94%) rename packages/contracts-bedrock/src/L2/interfaces/{IL1BlockIsthmus.sol => IL1BlockInterop.sol} (96%) rename packages/contracts-bedrock/test/L2/{L1BlockIsthmus.t.sol => L1BlockInterop.t.sol} (71%) diff --git a/packages/contracts-bedrock/.gas-snapshot b/packages/contracts-bedrock/.gas-snapshot index ecfb713d05a0..3564748212d9 100644 --- a/packages/contracts-bedrock/.gas-snapshot +++ b/packages/contracts-bedrock/.gas-snapshot @@ -1,7 +1,7 @@ -GasBenchMark_L1BlockIsthmus_DepositsComplete:test_depositsComplete_benchmark() (gas: 7567) -GasBenchMark_L1BlockIsthmus_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5567) -GasBenchMark_L1BlockIsthmus_SetValuesIsthmus:test_setL1BlockValuesIsthmus_benchmark() (gas: 175657) -GasBenchMark_L1BlockIsthmus_SetValuesIsthmus_Warm:test_setL1BlockValuesIsthmus_benchmark() (gas: 5121) +GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7567) +GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5567) +GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175677) +GasBenchMark_L1BlockInterop_SetValuesInterop_Warm:test_setL1BlockValuesInterop_benchmark() (gas: 5099) GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158531) GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7597) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 369242) diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index ceae376ee5c2..ae4773a2eebd 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -384,7 +384,7 @@ contract L2Genesis is Deployer { /// @notice This predeploy is following the safety invariant #1. function setL1Block() public { if (cfg.useInterop()) { - string memory cname = "L1BlockIsthmus"; + string memory cname = "L1BlockInterop"; address impl = Predeploys.predeployToCodeNamespace(Predeploys.L1_BLOCK_ATTRIBUTES); console.log("Setting %s implementation at: %s", cname, impl); vm.etch(impl, vm.getDeployedCode(string.concat(cname, ".sol:", cname))); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 91c7ad09789d..808b66317094 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -44,8 +44,8 @@ "sourceCodeHash": "0x3fb97859f66c078573753b6ba5ec370449ab03b8eca9e7779fce8db5bb23b7c0" }, "src/L1/OptimismPortalInterop.sol": { - "initCodeHash": "0x1c8372865dbf38225de4d843ca696a17f0d9e3cacf13c10a3d065ba19bdca05e", - "sourceCodeHash": "0xe6a7794799915f408cb57c73af266670de8a3f02408d3dbc2c97db25d3e42635" + "initCodeHash": "0xfeaa67ccd652bda9103fea507e4357b2bd4e93210b03ff85eb357d7145f1606c", + "sourceCodeHash": "0x6401b81f04093863557ef46192f56793daa0d412618065383ab353b2ed2929d8" }, "src/L1/ProtocolVersions.sol": { "initCodeHash": "0x8f033874dd8b36615b2209d553660dcff1ff91ca2bad3ca1de7b441dbfba4842", @@ -60,16 +60,16 @@ "sourceCodeHash": "0x06a50ac992175fdb434b13e8461893e83862c23ce399e697e6e8109728ad1a3d" }, "src/L1/SystemConfigInterop.sol": { - "initCodeHash": "0x1f500e310170769ffc747e08ad1d5b0de4b0f58534001bc4d4d563ec058bb331", - "sourceCodeHash": "0xcb6008cb49a06f87eb5b6cb4651e5e4aafe0b1f33000eccd165226c04f6b63c6" + "initCodeHash": "0x7515e5ed1266412a8c2d27d99aba6266fda2fc9068c20f0b7e6b555ee5073c91", + "sourceCodeHash": "0x441d1e3e8e987f829f55996b5b6c850da8c59ad48f09cf7e0a69a1fa559d42a2" }, "src/L2/BaseFeeVault.sol": { "initCodeHash": "0x3bfcd57e25ad54b66c374f63e24e33a6cf107044aa8f5f69ef21202c380b5c5b", "sourceCodeHash": "0x2dc2284cf7c68e743da50e4113e96ffeab435de2390aeba2eab2f1e8ca411ce9" }, "src/L2/CrossL2Inbox.sol": { - "initCodeHash": "0x0ee27866b4bf864a0b68ab25ea9559d7f2722b0396d02f2e8e089c6a1a5a6a93", - "sourceCodeHash": "0xe6f453049035e0d77e4d7a92904b448bc17e04dd3d99e738b9af20e20986ce64" + "initCodeHash": "0x66b052adce7e9194d054952d67d08b53964120067600358243ec86c85b90877b", + "sourceCodeHash": "0x38e6127ec6be99eb8c38c2c9d6e82761b33dde446bba250dc2c1b84983449e4e" }, "src/L2/ETHLiquidity.sol": { "initCodeHash": "0x713c18f95a6a746d0703f475f3ae10c106c9b9ecb64d881a2e61b8969b581371", @@ -83,9 +83,9 @@ "initCodeHash": "0xd12353c5bf71c6765cc9292eecf262f216e67f117f4ba6287796a5207dbca00f", "sourceCodeHash": "0xfe3a9585d9bfca8428e12759cab68a3114374e5c37371cfe08bb1976a9a5a041" }, - "src/L2/L1BlockIsthmus.sol": { - "initCodeHash": "0xb7a7a113056e4ac44824350b79fed5ea423e880223edcf1220e8f8b3172f50c5", - "sourceCodeHash": "0x6be7e7402c4dfc10e1407e070712a3f9f352db45f8a8ab296e8f6bc56a341f47" + "src/L2/L1BlockInterop.sol": { + "initCodeHash": "0x77b3b2151fe14ea36a640469115a5e4de27f7654a9606a9d0701522c6a4ad887", + "sourceCodeHash": "0x7417677643e1df1ae1782513b94c7821097b9529d3f8626c3bcb8b3a9ae0d180" }, "src/L2/L1FeeVault.sol": { "initCodeHash": "0x3bfcd57e25ad54b66c374f63e24e33a6cf107044aa8f5f69ef21202c380b5c5b", diff --git a/packages/contracts-bedrock/snapshots/abi/L1BlockIsthmus.json b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json similarity index 99% rename from packages/contracts-bedrock/snapshots/abi/L1BlockIsthmus.json rename to packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json index d827b32a9cab..ab089f0cec55 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1BlockIsthmus.json +++ b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json @@ -354,7 +354,7 @@ }, { "inputs": [], - "name": "setL1BlockValuesIsthmus", + "name": "setL1BlockValuesInterop", "outputs": [], "stateMutability": "nonpayable", "type": "function" diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockIsthmus.json b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json similarity index 100% rename from packages/contracts-bedrock/snapshots/storageLayout/L1BlockIsthmus.json rename to packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json diff --git a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol index fd33c5286ebd..b02248eaff43 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.15; // Contracts import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { L1BlockIsthmus, ConfigType } from "src/L2/L1BlockIsthmus.sol"; +import { L1BlockInterop, ConfigType } from "src/L2/L1BlockInterop.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -23,9 +23,9 @@ contract OptimismPortalInterop is OptimismPortal2 { OptimismPortal2(_proofMaturityDelaySeconds, _disputeGameFinalityDelaySeconds) { } - /// @custom:semver +interop + /// @custom:semver +interop-beta.1 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop"); + return string.concat(super.version(), "+interop-beta.1"); } /// @notice Sets static configuration options for the L2 system. @@ -48,7 +48,7 @@ contract OptimismPortalInterop is OptimismPortal2 { uint256(0), // value uint64(SYSTEM_DEPOSIT_GAS_LIMIT), // gasLimit false, // isCreation, - abi.encodeCall(L1BlockIsthmus.setConfig, (_type, _value)) + abi.encodeCall(L1BlockInterop.setConfig, (_type, _value)) ) ); } diff --git a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol index ee5b052e04ec..f7b8921d10d2 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.15; import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; import { IOptimismPortalInterop as IOptimismPortal } from "src/L1/interfaces/IOptimismPortalInterop.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { ConfigType } from "src/L2/L1BlockIsthmus.sol"; +import { ConfigType } from "src/L2/L1BlockInterop.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; @@ -68,9 +68,9 @@ contract SystemConfigInterop is SystemConfig { Storage.setAddress(DEPENDENCY_MANAGER_SLOT, _dependencyManager); } - /// @custom:semver +interop + /// @custom:semver +interop-beta.1 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop"); + return string.concat(super.version(), "+interop-beta.1"); } /// @notice Internal setter for the gas paying token address, includes validation. diff --git a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol index fc2d7528f802..521c7232e125 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IOptimismPortalInterop.sol @@ -7,7 +7,7 @@ import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; -import { ConfigType } from "src/L2/L1BlockIsthmus.sol"; +import { ConfigType } from "src/L2/L1BlockInterop.sol"; interface IOptimismPortalInterop { error AlreadyFinalized(); diff --git a/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol b/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol index 437e0c62a2e3..7939dccddbb4 100644 --- a/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol +++ b/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol @@ -7,7 +7,7 @@ import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; import { IDependencySet } from "src/L2/interfaces/IDependencySet.sol"; -import { IL1BlockIsthmus } from "src/L2/interfaces/IL1BlockIsthmus.sol"; +import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; /// @notice Thrown when the caller is not DEPOSITOR_ACCOUNT when calling `setInteropStart()` error NotDepositor(); @@ -65,8 +65,8 @@ contract CrossL2Inbox is ICrossL2Inbox, ISemver, TransientReentrancyAware { address internal constant DEPOSITOR_ACCOUNT = 0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001; /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.7 - string public constant version = "1.0.0-beta.7"; + /// @custom:semver 1.0.0-beta.8 + string public constant version = "1.0.0-beta.8"; /// @notice Emitted when a cross chain message is being executed. /// @param msgHash Hash of message payload being executed. @@ -140,7 +140,7 @@ contract CrossL2Inbox is ICrossL2Inbox, ISemver, TransientReentrancyAware { reentrantAware { // We need to know if this is being called on a depositTx - if (IL1BlockIsthmus(Predeploys.L1_BLOCK_ATTRIBUTES).isDeposit()) revert NoExecutingDeposits(); + if (IL1BlockInterop(Predeploys.L1_BLOCK_ATTRIBUTES).isDeposit()) revert NoExecutingDeposits(); // Check the Identifier. _checkIdentifier(_id); @@ -165,7 +165,7 @@ contract CrossL2Inbox is ICrossL2Inbox, ISemver, TransientReentrancyAware { /// @param _msgHash Hash of the message payload to call target with. function validateMessage(Identifier calldata _id, bytes32 _msgHash) external { // We need to know if this is being called on a depositTx - if (IL1BlockIsthmus(Predeploys.L1_BLOCK_ATTRIBUTES).isDeposit()) revert NoExecutingDeposits(); + if (IL1BlockInterop(Predeploys.L1_BLOCK_ATTRIBUTES).isDeposit()) revert NoExecutingDeposits(); // Check the Identifier. _checkIdentifier(_id); diff --git a/packages/contracts-bedrock/src/L2/L1BlockIsthmus.sol b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol similarity index 94% rename from packages/contracts-bedrock/src/L2/L1BlockIsthmus.sol rename to packages/contracts-bedrock/src/L2/L1BlockInterop.sol index c9643659030e..15ea67f5e6b3 100644 --- a/packages/contracts-bedrock/src/L2/L1BlockIsthmus.sol +++ b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol @@ -11,7 +11,7 @@ import { StaticConfig } from "src/libraries/StaticConfig.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import "src/libraries/L1BlockErrors.sol"; -/// @notice Enum representing different types of configurations that can be set on L1BlockIsthmus. +/// @notice Enum representing different types of configurations that can be set on L1BlockInterop. /// @custom:value SET_GAS_PAYING_TOKEN Represents the config type for setting the gas paying token. /// @custom:value ADD_DEPENDENCY Represents the config type for adding a chain to the interop dependency set. /// @custom:value REMOVE_DEPENDENCY Represents the config type for removing a chain from the interop dependency set. @@ -23,9 +23,9 @@ enum ConfigType { /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000015 -/// @title L1BlockIsthmus -/// @notice Isthmus extenstions of L1Block. -contract L1BlockIsthmus is L1Block { +/// @title L1BlockInterop +/// @notice Interop extenstions of L1Block. +contract L1BlockInterop is L1Block { using EnumerableSet for EnumerableSet.UintSet; /// @notice Event emitted when a new dependency is added to the interop dependency set. @@ -42,9 +42,9 @@ contract L1BlockIsthmus is L1Block { /// keccak256(abi.encode(uint256(keccak256("l1Block.identifier.isDeposit")) - 1)) & ~bytes32(uint256(0xff)) uint256 internal constant IS_DEPOSIT_SLOT = 0x921bd3a089295c6e5540e8fba8195448d253efd6f2e3e495b499b627dc36a300; - /// @custom:semver +isthmus + /// @custom:semver +interop function version() public pure override returns (string memory) { - return string.concat(super.version(), "+isthmus"); + return string.concat(super.version(), "+interop"); } /// @notice Returns whether the call was triggered from a a deposit or not. @@ -70,10 +70,10 @@ contract L1BlockIsthmus is L1Block { return uint8(dependencySet.length()); } - /// @notice Updates the `isDeposit` flag and sets the L1 block values for an Isthmus upgraded chain. + /// @notice Updates the `isDeposit` flag and sets the L1 block values for an Interop upgraded chain. /// It updates the L1 block values through the `setL1BlockValuesEcotone` function. /// It forwards the calldata to the internally-used `setL1BlockValuesEcotone` function. - function setL1BlockValuesIsthmus() external { + function setL1BlockValuesInterop() external { // Set the isDeposit flag to true. assembly { sstore(IS_DEPOSIT_SLOT, 1) diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol similarity index 96% rename from packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol rename to packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol index b464246e8f7a..dd72e3fa6f89 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockIsthmus.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol @@ -7,7 +7,7 @@ enum ConfigType { REMOVE_DEPENDENCY } -interface IL1BlockIsthmus { +interface IL1BlockInterop { error AlreadyDependency(); error CantRemovedDependency(); error DependencySetSizeTooLarge(); @@ -52,7 +52,7 @@ interface IL1BlockIsthmus { ) external; function setL1BlockValuesEcotone() external; - function setL1BlockValuesIsthmus() external; + function setL1BlockValuesInterop() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index 6c3b9a29aaa9..edcdd4ed75e2 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -184,7 +184,7 @@ library Encoding { /// @param _blobBaseFee L1 blob base fee. /// @param _hash L1 blockhash. /// @param _batcherHash Versioned hash to authenticate batcher by. - function encodeSetL1BlockValuesIsthmus( + function encodeSetL1BlockValuesInterop( uint32 _baseFeeScalar, uint32 _blobBaseFeeScalar, uint64 _sequenceNumber, @@ -199,7 +199,7 @@ library Encoding { pure returns (bytes memory) { - bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesIsthmus()")); + bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesInterop()")); return abi.encodePacked( functionSignature, _baseFeeScalar, diff --git a/packages/contracts-bedrock/test/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/BenchmarkTest.t.sol index 063ed6944946..a129736c771b 100644 --- a/packages/contracts-bedrock/test/BenchmarkTest.t.sol +++ b/packages/contracts-bedrock/test/BenchmarkTest.t.sol @@ -10,7 +10,7 @@ import { Bridge_Initializer } from "test/setup/Bridge_Initializer.sol"; // Libraries import { Types } from "src/libraries/Types.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; -import { L1BlockIsthmus } from "src/L2/L1BlockIsthmus.sol"; +import { L1BlockInterop } from "src/L2/L1BlockInterop.sol"; import { Encoding } from "src/libraries/Encoding.sol"; // Interfaces @@ -254,13 +254,13 @@ contract GasBenchMark_L1Block_SetValuesEcotone_Warm is GasBenchMark_L1Block { } } -contract GasBenchMark_L1BlockIsthmus is GasBenchMark_L1Block { - L1BlockIsthmus l1BlockIsthmus; +contract GasBenchMark_L1BlockInterop is GasBenchMark_L1Block { + L1BlockInterop l1BlockInterop; function setUp() public virtual override { super.setUp(); - l1BlockIsthmus = new L1BlockIsthmus(); - setValuesCalldata = Encoding.encodeSetL1BlockValuesIsthmus( + l1BlockInterop = new L1BlockInterop(); + setValuesCalldata = Encoding.encodeSetL1BlockValuesInterop( type(uint32).max, type(uint32).max, type(uint64).max, @@ -274,42 +274,42 @@ contract GasBenchMark_L1BlockIsthmus is GasBenchMark_L1Block { } } -contract GasBenchMark_L1BlockIsthmus_SetValuesIsthmus is GasBenchMark_L1BlockIsthmus { - function test_setL1BlockValuesIsthmus_benchmark() external { - SafeCall.call({ _target: address(l1BlockIsthmus), _calldata: setValuesCalldata }); +contract GasBenchMark_L1BlockInterop_SetValuesInterop is GasBenchMark_L1BlockInterop { + function test_setL1BlockValuesInterop_benchmark() external { + SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); } } -contract GasBenchMark_L1BlockIsthmus_SetValuesIsthmus_Warm is GasBenchMark_L1BlockIsthmus { +contract GasBenchMark_L1BlockInterop_SetValuesInterop_Warm is GasBenchMark_L1BlockInterop { function setUp() public virtual override { - SafeCall.call({ _target: address(l1BlockIsthmus), _calldata: setValuesCalldata }); + SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); } - function test_setL1BlockValuesIsthmus_benchmark() external { - SafeCall.call({ _target: address(l1BlockIsthmus), _calldata: setValuesCalldata }); + function test_setL1BlockValuesInterop_benchmark() external { + SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); } } -contract GasBenchMark_L1BlockIsthmus_DepositsComplete is GasBenchMark_L1BlockIsthmus { +contract GasBenchMark_L1BlockInterop_DepositsComplete is GasBenchMark_L1BlockInterop { function test_depositsComplete_benchmark() external { SafeCall.call({ - _target: address(l1BlockIsthmus), - _calldata: abi.encodeWithSelector(l1BlockIsthmus.depositsComplete.selector) + _target: address(l1BlockInterop), + _calldata: abi.encodeWithSelector(l1BlockInterop.depositsComplete.selector) }); } } -contract GasBenchMark_L1BlockIsthmus_DepositsComplete_Warm is GasBenchMark_L1BlockIsthmus { +contract GasBenchMark_L1BlockInterop_DepositsComplete_Warm is GasBenchMark_L1BlockInterop { function setUp() public virtual override { super.setUp(); // Set the isDeposit flag to true so then we can benchmark when it is reset. - SafeCall.call({ _target: address(l1BlockIsthmus), _calldata: setValuesCalldata }); + SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); } function test_depositsComplete_benchmark() external { SafeCall.call({ - _target: address(l1BlockIsthmus), - _calldata: abi.encodeWithSelector(l1BlockIsthmus.depositsComplete.selector) + _target: address(l1BlockInterop), + _calldata: abi.encodeWithSelector(l1BlockInterop.depositsComplete.selector) }); } } diff --git a/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol index 6e0235774df6..bc9a980276aa 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortalInterop.t.sol @@ -12,7 +12,7 @@ import "src/libraries/PortalErrors.sol"; // Target contract dependencies import "src/libraries/PortalErrors.sol"; import { OptimismPortalInterop } from "src/L1/OptimismPortalInterop.sol"; -import { L1BlockIsthmus, ConfigType } from "src/L2/L1BlockIsthmus.sol"; +import { L1BlockInterop, ConfigType } from "src/L2/L1BlockInterop.sol"; // Interfaces import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; @@ -35,7 +35,7 @@ contract OptimismPortalInterop_Test is CommonTest { _mint: 0, _gasLimit: 200_000, _isCreation: false, - _data: abi.encodeCall(L1BlockIsthmus.setConfig, (ConfigType.SET_GAS_PAYING_TOKEN, _value)) + _data: abi.encodeCall(L1BlockInterop.setConfig, (ConfigType.SET_GAS_PAYING_TOKEN, _value)) }); vm.prank(address(_optimismPortalInterop().systemConfig())); @@ -58,7 +58,7 @@ contract OptimismPortalInterop_Test is CommonTest { _mint: 0, _gasLimit: 200_000, _isCreation: false, - _data: abi.encodeCall(L1BlockIsthmus.setConfig, (ConfigType.ADD_DEPENDENCY, _value)) + _data: abi.encodeCall(L1BlockInterop.setConfig, (ConfigType.ADD_DEPENDENCY, _value)) }); vm.prank(address(_optimismPortalInterop().systemConfig())); @@ -81,7 +81,7 @@ contract OptimismPortalInterop_Test is CommonTest { _mint: 0, _gasLimit: 200_000, _isCreation: false, - _data: abi.encodeCall(L1BlockIsthmus.setConfig, (ConfigType.REMOVE_DEPENDENCY, _value)) + _data: abi.encodeCall(L1BlockInterop.setConfig, (ConfigType.REMOVE_DEPENDENCY, _value)) }); vm.prank(address(_optimismPortalInterop().systemConfig())); diff --git a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol index 6cd3c8b3145c..0e47529c760c 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol @@ -6,7 +6,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -import { ConfigType } from "src/L2/L1BlockIsthmus.sol"; +import { ConfigType } from "src/L2/L1BlockInterop.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 0d3175d41ed1..8078e2c01c74 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -19,7 +19,7 @@ import { NotDepositor, InteropStartAlreadySet } from "src/L2/CrossL2Inbox.sol"; -import { IL1BlockIsthmus } from "src/L2/interfaces/IL1BlockIsthmus.sol"; +import { IL1BlockInterop } from "src/L2/interfaces/IL1BlockInterop.sol"; import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; /// @title CrossL2InboxWithModifiableTransientStorage @@ -160,7 +160,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -222,7 +222,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -282,7 +282,7 @@ contract CrossL2InboxTest is Test { // Ensure it is a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(true) }); @@ -312,7 +312,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -346,7 +346,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -375,7 +375,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -419,7 +419,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -464,7 +464,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -485,7 +485,7 @@ contract CrossL2InboxTest is Test { // Ensure it is a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(true) }); @@ -508,7 +508,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -537,7 +537,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); @@ -571,7 +571,7 @@ contract CrossL2InboxTest is Test { // Ensure is not a deposit transaction vm.mockCall({ callee: Predeploys.L1_BLOCK_ATTRIBUTES, - data: abi.encodeWithSelector(IL1BlockIsthmus.isDeposit.selector), + data: abi.encodeWithSelector(IL1BlockInterop.isDeposit.selector), returnData: abi.encode(false) }); diff --git a/packages/contracts-bedrock/test/L2/L1BlockIsthmus.t.sol b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol similarity index 71% rename from packages/contracts-bedrock/test/L2/L1BlockIsthmus.t.sol rename to packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol index 1c2407dd73ab..6f0ef2188b8c 100644 --- a/packages/contracts-bedrock/test/L2/L1BlockIsthmus.t.sol +++ b/packages/contracts-bedrock/test/L2/L1BlockInterop.t.sol @@ -8,17 +8,17 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { StaticConfig } from "src/libraries/StaticConfig.sol"; // Target contract dependencies -import { L1BlockIsthmus, ConfigType } from "src/L2/L1BlockIsthmus.sol"; +import { L1BlockInterop, ConfigType } from "src/L2/L1BlockInterop.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import "src/libraries/L1BlockErrors.sol"; -contract L1BlockIsthmusTest is CommonTest { +contract L1BlockInteropTest is CommonTest { event GasPayingTokenSet(address indexed token, uint8 indexed decimals, bytes32 name, bytes32 symbol); event DependencyAdded(uint256 indexed chainId); event DependencyRemoved(uint256 indexed chainId); modifier prankDepositor() { - vm.startPrank(_l1BlockIsthmus().DEPOSITOR_ACCOUNT()); + vm.startPrank(_l1BlockInterop().DEPOSITOR_ACCOUNT()); _; vm.stopPrank(); } @@ -34,14 +34,14 @@ contract L1BlockIsthmusTest is CommonTest { function testFuzz_isInDependencySet_succeeds(uint256 _chainId) public prankDepositor { vm.assume(_chainId != block.chainid); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); - assertTrue(_l1BlockIsthmus().isInDependencySet(_chainId)); + assertTrue(_l1BlockInterop().isInDependencySet(_chainId)); } /// @dev Tests that `isInDependencySet` returns true when the chain's chain ID is passed as the input. function test_isInDependencySet_chainChainId_succeeds() public view { - assertTrue(_l1BlockIsthmus().isInDependencySet(block.chainid)); + assertTrue(_l1BlockInterop().isInDependencySet(block.chainid)); } /// @dev Tests that `isInDependencySet` reverts when the input chain ID is not in the dependency set @@ -50,16 +50,16 @@ contract L1BlockIsthmusTest is CommonTest { vm.assume(_chainId != block.chainid); // Check that the chain ID is not in the dependency set - assertFalse(_l1BlockIsthmus().isInDependencySet(_chainId)); + assertFalse(_l1BlockInterop().isInDependencySet(_chainId)); } /// @dev Tests that `isInDependencySet` returns false when the dependency set is empty. function testFuzz_isInDependencySet_dependencySetEmpty_succeeds(uint256 _chainId) public view { vm.assume(_chainId != block.chainid); - assertEq(_l1BlockIsthmus().dependencySetSize(), 0); + assertEq(_l1BlockInterop().dependencySetSize(), 0); - assertFalse(_l1BlockIsthmus().isInDependencySet(_chainId)); + assertFalse(_l1BlockInterop().isInDependencySet(_chainId)); } /// @dev Tests that the dependency set size is correct when adding an arbitrary number of chain IDs. @@ -70,16 +70,16 @@ contract L1BlockIsthmusTest is CommonTest { for (uint256 i = 0; i < _dependencySetSize; i++) { if (i == block.chainid) continue; - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(i)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(i)); uniqueCount++; } - assertEq(_l1BlockIsthmus().dependencySetSize(), uniqueCount); + assertEq(_l1BlockInterop().dependencySetSize(), uniqueCount); } /// @dev Tests that the dependency set size is correct when the dependency set is empty. function test_dependencySetSize_dependencySetEmpty_succeeds() public view { - assertEq(_l1BlockIsthmus().dependencySetSize(), 0); + assertEq(_l1BlockInterop().dependencySetSize(), 0); } /// @dev Tests that the config for setting the gas paying token succeeds. @@ -97,7 +97,7 @@ contract L1BlockIsthmusTest is CommonTest { vm.expectEmit(address(l1Block)); emit GasPayingTokenSet({ token: _token, decimals: _decimals, name: _name, symbol: _symbol }); - _l1BlockIsthmus().setConfig( + _l1BlockInterop().setConfig( ConfigType.SET_GAS_PAYING_TOKEN, StaticConfig.encodeSetGasPayingToken({ _token: _token, _decimals: _decimals, _name: _name, _symbol: _symbol }) ); @@ -115,7 +115,7 @@ contract L1BlockIsthmusTest is CommonTest { vm.assume(_token != address(vm)); vm.expectRevert(NotDepositor.selector); - _l1BlockIsthmus().setConfig( + _l1BlockInterop().setConfig( ConfigType.SET_GAS_PAYING_TOKEN, StaticConfig.encodeSetGasPayingToken({ _token: _token, _decimals: _decimals, _name: _name, _symbol: _symbol }) ); @@ -128,41 +128,41 @@ contract L1BlockIsthmusTest is CommonTest { vm.expectEmit(address(l1Block)); emit DependencyAdded(_chainId); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); } /// @dev Tests that adding a dependency reverts if it's the chain's chain id function test_setConfig_addDependency_chainChainId_reverts() public prankDepositor { vm.expectRevert(AlreadyDependency.selector); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(block.chainid)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(block.chainid)); } /// @dev Tests that adding a dependency already in the set reverts function test_setConfig_addDependency_alreadyDependency_reverts(uint256 _chainId) public prankDepositor { vm.assume(_chainId != block.chainid); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); vm.expectRevert(AlreadyDependency.selector); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); } /// @dev Tests that setting the add dependency config as not the depositor reverts. function testFuzz_setConfig_addDependency_notDepositor_reverts(uint256 _chainId) public { vm.expectRevert(NotDepositor.selector); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); } /// @dev Tests that setting the add dependency config when the dependency set size is too large reverts. function test_setConfig_addDependency_dependencySetSizeTooLarge_reverts() public prankDepositor { for (uint256 i = 0; i < type(uint8).max; i++) { - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(i)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(i)); } - assertEq(_l1BlockIsthmus().dependencySetSize(), type(uint8).max); + assertEq(_l1BlockInterop().dependencySetSize(), type(uint8).max); vm.expectRevert(DependencySetSizeTooLarge.selector); - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(1)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(1)); } /// @dev Tests that the config for removing a dependency can be set. @@ -170,24 +170,24 @@ contract L1BlockIsthmusTest is CommonTest { vm.assume(_chainId != block.chainid); // Add the chain ID to the dependency set before removing it - _l1BlockIsthmus().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.ADD_DEPENDENCY, StaticConfig.encodeAddDependency(_chainId)); vm.expectEmit(address(l1Block)); emit DependencyRemoved(_chainId); - _l1BlockIsthmus().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(_chainId)); } /// @dev Tests that setting the remove dependency config as not the depositor reverts. function testFuzz_setConfig_removeDependency_notDepositor_reverts(uint256 _chainId) public { vm.expectRevert(NotDepositor.selector); - _l1BlockIsthmus().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(_chainId)); } /// @dev Tests that setting the remove dependency config for the chain's chain ID reverts. function test_setConfig_removeDependency_chainChainId_reverts() public prankDepositor { vm.expectRevert(CantRemovedDependency.selector); - _l1BlockIsthmus().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(block.chainid)); + _l1BlockInterop().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(block.chainid)); } /// @dev Tests that setting the remove dependency config for a chain ID that is not in the dependency set reverts. @@ -195,50 +195,50 @@ contract L1BlockIsthmusTest is CommonTest { vm.assume(_chainId != block.chainid); vm.expectRevert(NotDependency.selector); - _l1BlockIsthmus().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(_chainId)); + _l1BlockInterop().setConfig(ConfigType.REMOVE_DEPENDENCY, StaticConfig.encodeRemoveDependency(_chainId)); } - /// @dev Returns the L1BlockIsthmus instance. - function _l1BlockIsthmus() internal view returns (L1BlockIsthmus) { - return L1BlockIsthmus(address(l1Block)); + /// @dev Returns the L1BlockInterop instance. + function _l1BlockInterop() internal view returns (L1BlockInterop) { + return L1BlockInterop(address(l1Block)); } } -contract L1BlockIsthmusIsDeposit_Test is L1BlockIsthmusTest { +contract L1BlockInteropIsDeposit_Test is L1BlockInteropTest { /// @dev Tests that `isDeposit` reverts if the caller is not the cross L2 inbox. function test_isDeposit_notCrossL2Inbox_reverts(address _caller) external { vm.assume(_caller != Predeploys.CROSS_L2_INBOX); vm.expectRevert(NotCrossL2Inbox.selector); - _l1BlockIsthmus().isDeposit(); + _l1BlockInterop().isDeposit(); } /// @dev Tests that `isDeposit` always returns the correct value. function test_isDeposit_succeeds() external { // Assert is false if the value is not updated vm.prank(Predeploys.CROSS_L2_INBOX); - assertEq(_l1BlockIsthmus().isDeposit(), false); + assertEq(_l1BlockInterop().isDeposit(), false); - /// @dev Assuming that `setL1BlockValuesIsthmus` will set the proper value. That function is tested as well - vm.prank(_l1BlockIsthmus().DEPOSITOR_ACCOUNT()); - _l1BlockIsthmus().setL1BlockValuesIsthmus(); + /// @dev Assuming that `setL1BlockValuesInterop` will set the proper value. That function is tested as well + vm.prank(_l1BlockInterop().DEPOSITOR_ACCOUNT()); + _l1BlockInterop().setL1BlockValuesInterop(); // Assert is true if the value is updated vm.prank(Predeploys.CROSS_L2_INBOX); - assertEq(_l1BlockIsthmus().isDeposit(), true); + assertEq(_l1BlockInterop().isDeposit(), true); } } -contract L1BlockIsthmusSetL1BlockValuesIsthmus_Test is L1BlockIsthmusTest { - /// @dev Tests that `setL1BlockValuesIsthmus` reverts if sender address is not the depositor - function test_setL1BlockValuesIsthmus_notDepositor_reverts(address _caller) external { - vm.assume(_caller != _l1BlockIsthmus().DEPOSITOR_ACCOUNT()); +contract L1BlockInteropSetL1BlockValuesInterop_Test is L1BlockInteropTest { + /// @dev Tests that `setL1BlockValuesInterop` reverts if sender address is not the depositor + function test_setL1BlockValuesInterop_notDepositor_reverts(address _caller) external { + vm.assume(_caller != _l1BlockInterop().DEPOSITOR_ACCOUNT()); vm.prank(_caller); vm.expectRevert(NotDepositor.selector); - _l1BlockIsthmus().setL1BlockValuesIsthmus(); + _l1BlockInterop().setL1BlockValuesInterop(); } - /// @dev Tests that `setL1BlockValuesIsthmus` succeeds if sender address is the depositor - function test_setL1BlockValuesIsthmus_succeeds( + /// @dev Tests that `setL1BlockValuesInterop` succeeds if sender address is the depositor + function test_setL1BlockValuesInterop_succeeds( uint32 baseFeeScalar, uint32 blobBaseFeeScalar, uint64 sequenceNumber, @@ -251,62 +251,62 @@ contract L1BlockIsthmusSetL1BlockValuesIsthmus_Test is L1BlockIsthmusTest { ) external { - // Ensure the `isDepositTransaction` flag is false before calling `setL1BlockValuesIsthmus` + // Ensure the `isDepositTransaction` flag is false before calling `setL1BlockValuesInterop` vm.prank(Predeploys.CROSS_L2_INBOX); - assertEq(_l1BlockIsthmus().isDeposit(), false); + assertEq(_l1BlockInterop().isDeposit(), false); bytes memory setValuesEcotoneCalldata = abi.encodePacked( baseFeeScalar, blobBaseFeeScalar, sequenceNumber, timestamp, number, baseFee, blobBaseFee, hash, batcherHash ); - vm.prank(_l1BlockIsthmus().DEPOSITOR_ACCOUNT()); + vm.prank(_l1BlockInterop().DEPOSITOR_ACCOUNT()); (bool success,) = address(l1Block).call( - abi.encodePacked(L1BlockIsthmus.setL1BlockValuesIsthmus.selector, setValuesEcotoneCalldata) + abi.encodePacked(L1BlockInterop.setL1BlockValuesInterop.selector, setValuesEcotoneCalldata) ); assertTrue(success, "function call failed"); // Assert that the `isDepositTransaction` flag was properly set to true vm.prank(Predeploys.CROSS_L2_INBOX); - assertEq(_l1BlockIsthmus().isDeposit(), true); + assertEq(_l1BlockInterop().isDeposit(), true); // Assert `setL1BlockValuesEcotone` was properly called, forwarding the calldata to it - assertEq(_l1BlockIsthmus().baseFeeScalar(), baseFeeScalar, "base fee scalar not properly set"); - assertEq(_l1BlockIsthmus().blobBaseFeeScalar(), blobBaseFeeScalar, "blob base fee scalar not properly set"); - assertEq(_l1BlockIsthmus().sequenceNumber(), sequenceNumber, "sequence number not properly set"); - assertEq(_l1BlockIsthmus().timestamp(), timestamp, "timestamp not properly set"); - assertEq(_l1BlockIsthmus().number(), number, "number not properly set"); - assertEq(_l1BlockIsthmus().basefee(), baseFee, "base fee not properly set"); - assertEq(_l1BlockIsthmus().blobBaseFee(), blobBaseFee, "blob base fee not properly set"); - assertEq(_l1BlockIsthmus().hash(), hash, "hash not properly set"); - assertEq(_l1BlockIsthmus().batcherHash(), batcherHash, "batcher hash not properly set"); + assertEq(_l1BlockInterop().baseFeeScalar(), baseFeeScalar, "base fee scalar not properly set"); + assertEq(_l1BlockInterop().blobBaseFeeScalar(), blobBaseFeeScalar, "blob base fee scalar not properly set"); + assertEq(_l1BlockInterop().sequenceNumber(), sequenceNumber, "sequence number not properly set"); + assertEq(_l1BlockInterop().timestamp(), timestamp, "timestamp not properly set"); + assertEq(_l1BlockInterop().number(), number, "number not properly set"); + assertEq(_l1BlockInterop().basefee(), baseFee, "base fee not properly set"); + assertEq(_l1BlockInterop().blobBaseFee(), blobBaseFee, "blob base fee not properly set"); + assertEq(_l1BlockInterop().hash(), hash, "hash not properly set"); + assertEq(_l1BlockInterop().batcherHash(), batcherHash, "batcher hash not properly set"); } } -contract L1BlockDepositsComplete_Test is L1BlockIsthmusTest { +contract L1BlockDepositsComplete_Test is L1BlockInteropTest { // @dev Tests that `depositsComplete` reverts if the caller is not the depositor. function test_deposits_is_depositor_reverts(address _caller) external { - vm.assume(_caller != _l1BlockIsthmus().DEPOSITOR_ACCOUNT()); + vm.assume(_caller != _l1BlockInterop().DEPOSITOR_ACCOUNT()); vm.expectRevert(NotDepositor.selector); - _l1BlockIsthmus().depositsComplete(); + _l1BlockInterop().depositsComplete(); } // @dev Tests that `depositsComplete` succeeds if the caller is the depositor. function test_depositsComplete_succeeds() external { // Set the `isDeposit` flag to true - vm.prank(_l1BlockIsthmus().DEPOSITOR_ACCOUNT()); - _l1BlockIsthmus().setL1BlockValuesIsthmus(); + vm.prank(_l1BlockInterop().DEPOSITOR_ACCOUNT()); + _l1BlockInterop().setL1BlockValuesInterop(); // Assert that the `isDeposit` flag was properly set to true vm.prank(Predeploys.CROSS_L2_INBOX); - assertTrue(_l1BlockIsthmus().isDeposit()); + assertTrue(_l1BlockInterop().isDeposit()); // Call `depositsComplete` - vm.prank(_l1BlockIsthmus().DEPOSITOR_ACCOUNT()); - _l1BlockIsthmus().depositsComplete(); + vm.prank(_l1BlockInterop().DEPOSITOR_ACCOUNT()); + _l1BlockInterop().depositsComplete(); // Assert that the `isDeposit` flag was properly set to false /// @dev Assuming that `isDeposit()` wil return the proper value. That function is tested as well vm.prank(Predeploys.CROSS_L2_INBOX); - assertEq(_l1BlockIsthmus().isDeposit(), false); + assertEq(_l1BlockInterop().isDeposit(), false); } } From bb87eef8be59586c4608da8495ea6ae2a2848730 Mon Sep 17 00:00:00 2001 From: Inphi Date: Thu, 26 Sep 2024 14:17:55 -0400 Subject: [PATCH 047/116] cannon: Consistent state serialization (#12151) * cannon: Consistent state serialization * nosilent cmp * fix run input * add elf target dep --- cannon/Makefile | 23 ++++++++++++++++++++++- cannon/mipsevm/memory/memory.go | 8 +++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cannon/Makefile b/cannon/Makefile index 7dfc39c44023..ea9a29ebb5c2 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -43,6 +43,26 @@ contract: test: elf contract go test -v ./... +diff-%-cannon: cannon elf + $$OTHER_CANNON load-elf --type $* --path ./testdata/example/bin/hello.elf --out ./bin/prestate-other.bin.gz --meta "" + ./bin/cannon load-elf --type $* --path ./testdata/example/bin/hello.elf --out ./bin/prestate.bin.gz --meta "" + @cmp ./bin/prestate-other.bin.gz ./bin/prestate.bin.gz + @if [ $$? -eq 0 ]; then \ + echo "Generated identical prestates"; \ + else \ + echo "Generated different prestates"; \ + exit 1; \ + fi + $$OTHER_CANNON run --proof-at '=0' --stop-at '=100000000' --input=./bin/prestate.bin.gz --output ./bin/out-other.bin.gz --meta "" + ./bin/cannon run --proof-at '=0' --stop-at '=100000000' --input=./bin/prestate.bin.gz --output ./bin/out.bin.gz --meta "" + @cmp ./bin/out-other.bin.gz ./bin/out.bin.gz + @if [ $$? -eq 0 ]; then \ + echo "Generated identical states"; \ + else \ + echo "Generated different prestates"; \ + exit 1; \ + fi + fuzz: # Common vm tests go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm/tests @@ -65,4 +85,5 @@ fuzz: clean \ test \ lint \ - fuzz + fuzz \ + diff-%-cannon diff --git a/cannon/mipsevm/memory/memory.go b/cannon/mipsevm/memory/memory.go index 392a0482c48e..ea5c279763b3 100644 --- a/cannon/mipsevm/memory/memory.go +++ b/cannon/mipsevm/memory/memory.go @@ -6,9 +6,11 @@ import ( "fmt" "io" "math/bits" + "slices" "sort" "github.com/ethereum/go-ethereum/crypto" + "golang.org/x/exp/maps" ) // Note: 2**12 = 4 KiB, the min phys page size in the Go runtime. @@ -299,7 +301,11 @@ func (m *Memory) Serialize(out io.Writer) error { if err := binary.Write(out, binary.BigEndian, uint32(m.PageCount())); err != nil { return err } - for pageIndex, page := range m.pages { + indexes := maps.Keys(m.pages) + // iterate sorted map keys for consistent serialization + slices.Sort(indexes) + for _, pageIndex := range indexes { + page := m.pages[pageIndex] if err := binary.Write(out, binary.BigEndian, pageIndex); err != nil { return err } From 99251ea900539d833ef30bc218b8b0889a74e124 Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Thu, 26 Sep 2024 11:58:10 -0700 Subject: [PATCH 048/116] OPSM: update bond defaults (#12155) * chore: set init bond default to 0 * test: add initial bond test * chore: semver lock --- packages/contracts-bedrock/semver-lock.json | 4 ++-- packages/contracts-bedrock/src/L1/OPContractsManager.sol | 5 ++--- packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol | 8 ++++++-- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 808b66317094..e3e7c8989d52 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x08be0367ee031ee292b74aa9b6fc86c5d65cbbdadd455bb8120748eec79cf2d8", - "sourceCodeHash": "0x84fd2b583ddf44e900c58861ddda103f7bea793d71fb845f76ed28afd1e757bc" + "initCodeHash": "0x292d367322dc74744e8c98c463021e1abae77e57954eef8bac6e2081fcba5644", + "sourceCodeHash": "0xbfcc2032df842e50067d4b4a75ce66cc14cc34e67d35e37e2160215be57d8e2e" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 93b9b71b00c3..ed4d0675faf0 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -124,8 +124,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.8 - string public constant version = "1.0.0-beta.8"; + /// @custom:semver 1.0.0-beta.9 + string public constant version = "1.0.0-beta.9"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -330,7 +330,6 @@ contract OPContractsManager is ISemver, Initializable { output.disputeGameFactoryProxy.setImplementation( GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(output.permissionedDisputeGame)) ); - output.disputeGameFactoryProxy.setInitBond(GameTypes.PERMISSIONED_CANNON, 0.08 ether); output.disputeGameFactoryProxy.transferOwnership(address(output.opChainProxyAdmin)); impl.logic = address(output.anchorStateRegistryImpl); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 9537f6339575..cfb81717ece8 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -496,8 +496,12 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { assertEq(address(doo.permissionedDisputeGame().challenger()), challenger, "2600"); // TODO once we deploy the Permissionless Dispute Game - // assertEq(address(doo.faultDisputeGame().proposer()), proposer, "2700"); - // assertEq(address(doo.faultDisputeGame().challenger()), challenger, "2800"); + // assertEq(address(doo.faultDisputeGame().proposer()), proposer, "2610"); + // assertEq(address(doo.faultDisputeGame().challenger()), challenger, "2620"); + + // Verify that the initial bonds are zero. + assertEq(doo.disputeGameFactoryProxy().initBonds(GameTypes.CANNON), 0, "2700"); + assertEq(doo.disputeGameFactoryProxy().initBonds(GameTypes.PERMISSIONED_CANNON), 0, "2800"); // Most architecture assertions are handled within the OP Contracts Manager itself and therefore // we only assert on the things that are not visible onchain. From ec80f9c7e559992e6eb2a23067bfe88b9afe30bb Mon Sep 17 00:00:00 2001 From: James Kim Date: Thu, 26 Sep 2024 15:56:38 -0400 Subject: [PATCH 049/116] chore: make interopgen deploy step fns public (#12150) --- op-chain-ops/interopgen/deploy.go | 32 +++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 8958456f0b6e..95964a699407 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -39,18 +39,18 @@ func Deploy(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMap L2s: make(map[string]*L2Deployment), } - l1Host := createL1(logger, fa, srcFS, cfg.L1) + l1Host := CreateL1(logger, fa, srcFS, cfg.L1) if err := l1Host.EnableCheats(); err != nil { return nil, nil, fmt.Errorf("failed to enable cheats in L1 state: %w", err) } - l1Deployment, err := prepareInitialL1(l1Host, cfg.L1) + l1Deployment, err := PrepareInitialL1(l1Host, cfg.L1) if err != nil { return nil, nil, fmt.Errorf("failed to deploy initial L1 content: %w", err) } deployments.L1 = l1Deployment - superDeployment, err := deploySuperchainToL1(l1Host, cfg.Superchain) + superDeployment, err := DeploySuperchainToL1(l1Host, cfg.Superchain) if err != nil { return nil, nil, fmt.Errorf("failed to deploy superchain to L1: %w", err) } @@ -62,7 +62,7 @@ func Deploy(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMap // after creating the final config for any particular L2. Will add comments. for l2ChainID, l2Cfg := range cfg.L2s { - l2Deployment, err := deployL2ToL1(l1Host, cfg.Superchain, superDeployment, l2Cfg) + l2Deployment, err := DeployL2ToL1(l1Host, cfg.Superchain, superDeployment, l2Cfg) if err != nil { return nil, nil, fmt.Errorf("failed to deploy L2 %d to L1: %w", &l2ChainID, err) } @@ -72,7 +72,7 @@ func Deploy(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMap out := &WorldOutput{ L2s: make(map[string]*L2Output), } - l1Out, err := completeL1(l1Host, cfg.L1) + l1Out, err := CompleteL1(l1Host, cfg.L1) if err != nil { return nil, nil, fmt.Errorf("failed to complete L1: %w", err) } @@ -83,14 +83,14 @@ func Deploy(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMap genesisTimestamp := l1Out.Genesis.Timestamp for l2ChainID, l2Cfg := range cfg.L2s { - l2Host := createL2(logger, fa, srcFS, l2Cfg, genesisTimestamp) + l2Host := CreateL2(logger, fa, srcFS, l2Cfg, genesisTimestamp) if err := l2Host.EnableCheats(); err != nil { return nil, nil, fmt.Errorf("failed to enable cheats in L2 state %s: %w", l2ChainID, err) } - if err := genesisL2(l2Host, l2Cfg, deployments.L2s[l2ChainID]); err != nil { + if err := GenesisL2(l2Host, l2Cfg, deployments.L2s[l2ChainID]); err != nil { return nil, nil, fmt.Errorf("failed to apply genesis data to L2 %s: %w", l2ChainID, err) } - l2Out, err := completeL2(l2Host, l2Cfg, l1GenesisBlock, deployments.L2s[l2ChainID]) + l2Out, err := CompleteL2(l2Host, l2Cfg, l1GenesisBlock, deployments.L2s[l2ChainID]) if err != nil { return nil, nil, fmt.Errorf("failed to complete L2 %s: %w", l2ChainID, err) } @@ -99,7 +99,7 @@ func Deploy(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMap return deployments, out, nil } -func createL1(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMapFS, cfg *L1Config) *script.Host { +func CreateL1(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMapFS, cfg *L1Config) *script.Host { l1Context := script.Context{ ChainID: cfg.ChainID, Sender: sysGenesisDeployer, @@ -115,7 +115,7 @@ func createL1(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceM return l1Host } -func createL2(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMapFS, l2Cfg *L2Config, genesisTimestamp uint64) *script.Host { +func CreateL2(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMapFS, l2Cfg *L2Config, genesisTimestamp uint64) *script.Host { l2Context := script.Context{ ChainID: new(big.Int).SetUint64(l2Cfg.L2ChainID), Sender: sysGenesisDeployer, @@ -134,7 +134,7 @@ func createL2(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceM } // prepareInitialL1 deploys basics such as preinstalls to L1 (incl. EIP-4788) -func prepareInitialL1(l1Host *script.Host, cfg *L1Config) (*L1Deployment, error) { +func PrepareInitialL1(l1Host *script.Host, cfg *L1Config) (*L1Deployment, error) { l1Host.SetTxOrigin(sysGenesisDeployer) if err := deployers.InsertPreinstalls(l1Host); err != nil { @@ -145,7 +145,7 @@ func prepareInitialL1(l1Host *script.Host, cfg *L1Config) (*L1Deployment, error) return &L1Deployment{}, nil } -func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*SuperchainDeployment, error) { +func DeploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*SuperchainDeployment, error) { l1Host.SetTxOrigin(superCfg.Deployer) superDeployment, err := opcm.DeploySuperchain(l1Host, opcm.DeploySuperchainInput{ @@ -189,7 +189,7 @@ func deploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup }, nil } -func deployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployment *SuperchainDeployment, cfg *L2Config) (*L2Deployment, error) { +func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployment *SuperchainDeployment, cfg *L2Config) (*L2Deployment, error) { if cfg.UseAltDA { return nil, errors.New("alt-da mode not supported yet") } @@ -218,7 +218,7 @@ func deployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme }, nil } -func genesisL2(l2Host *script.Host, cfg *L2Config, deployment *L2Deployment) error { +func GenesisL2(l2Host *script.Host, cfg *L2Config, deployment *L2Deployment) error { if err := opcm.L2Genesis(l2Host, &opcm.L2GenesisInput{ L1Deployments: opcm.L1Deployments{ L1CrossDomainMessengerProxy: deployment.L1CrossDomainMessengerProxy, @@ -233,7 +233,7 @@ func genesisL2(l2Host *script.Host, cfg *L2Config, deployment *L2Deployment) err return nil } -func completeL1(l1Host *script.Host, cfg *L1Config) (*L1Output, error) { +func CompleteL1(l1Host *script.Host, cfg *L1Config) (*L1Output, error) { l1Genesis, err := genesis.NewL1Genesis(&genesis.DeployConfig{ L2InitializationConfig: genesis.L2InitializationConfig{ L2CoreDeployConfig: genesis.L2CoreDeployConfig{ @@ -276,7 +276,7 @@ func completeL1(l1Host *script.Host, cfg *L1Config) (*L1Output, error) { }, nil } -func completeL2(l2Host *script.Host, cfg *L2Config, l1Block *types.Block, deployment *L2Deployment) (*L2Output, error) { +func CompleteL2(l2Host *script.Host, cfg *L2Config, l1Block *types.Block, deployment *L2Deployment) (*L2Output, error) { deployCfg := &genesis.DeployConfig{ L2InitializationConfig: cfg.L2InitializationConfig, L1DependenciesConfig: genesis.L1DependenciesConfig{ From 7ff6940301af6ee30f73b73a648f6f26e9b6e7a5 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Thu, 26 Sep 2024 22:18:39 +0200 Subject: [PATCH 050/116] ci-builder: bump go tool versions (#12159) * ci-builder: bump go tool versions * update mocks after update of mockery * bump cci ci-builder version to v0.53.0 * fix golangci-lint run --help | grep concurrency --- .circleci/config.yml | 4 +- .../gating/mocks/BlockingConnectionGater.go | 67 ++++++++++++++-- op-node/p2p/gating/mocks/ExpiryStore.go | 27 +++++-- op-node/p2p/gating/mocks/Scores.go | 15 ++-- op-node/p2p/mocks/API.go | 79 +++++++++++++++++-- op-node/p2p/mocks/GossipMetricer.go | 11 ++- op-node/p2p/mocks/Peerstore.go | 23 ++++-- op-node/p2p/mocks/ScoreMetrics.go | 11 ++- op-node/p2p/monitor/mocks/PeerManager.go | 27 +++++-- op-service/sources/mocks/BeaconClient.go | 27 +++++-- .../sources/mocks/BlobSideCarsFetcher.go | 15 ++-- op-service/txmgr/mocks/TxManager.go | 35 ++++++-- ops/docker/ci-builder/Dockerfile | 8 +- 13 files changed, 277 insertions(+), 72 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index efbc727ee917..03b998a12203 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2.1 parameters: ci_builder_image: type: string - default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.51.0 + default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.53.0 ci_builder_rust_image: type: string default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder-rust:latest @@ -791,7 +791,7 @@ jobs: name: run Go linter command: | # Identify how many cores it defaults to - golangci-lint --help | grep concurrency + golangci-lint run --help | grep concurrency make lint-go working_directory: . - save_cache: diff --git a/op-node/p2p/gating/mocks/BlockingConnectionGater.go b/op-node/p2p/gating/mocks/BlockingConnectionGater.go index 7d289aebd057..ade24d40c6b7 100644 --- a/op-node/p2p/gating/mocks/BlockingConnectionGater.go +++ b/op-node/p2p/gating/mocks/BlockingConnectionGater.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -33,6 +33,10 @@ func (_m *BlockingConnectionGater) EXPECT() *BlockingConnectionGater_Expecter { func (_m *BlockingConnectionGater) BlockAddr(ip net.IP) error { ret := _m.Called(ip) + if len(ret) == 0 { + panic("no return value specified for BlockAddr") + } + var r0 error if rf, ok := ret.Get(0).(func(net.IP) error); ok { r0 = rf(ip) @@ -75,6 +79,10 @@ func (_c *BlockingConnectionGater_BlockAddr_Call) RunAndReturn(run func(net.IP) func (_m *BlockingConnectionGater) BlockPeer(p peer.ID) error { ret := _m.Called(p) + if len(ret) == 0 { + panic("no return value specified for BlockPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID) error); ok { r0 = rf(p) @@ -117,6 +125,10 @@ func (_c *BlockingConnectionGater_BlockPeer_Call) RunAndReturn(run func(peer.ID) func (_m *BlockingConnectionGater) BlockSubnet(ipnet *net.IPNet) error { ret := _m.Called(ipnet) + if len(ret) == 0 { + panic("no return value specified for BlockSubnet") + } + var r0 error if rf, ok := ret.Get(0).(func(*net.IPNet) error); ok { r0 = rf(ipnet) @@ -159,6 +171,10 @@ func (_c *BlockingConnectionGater_BlockSubnet_Call) RunAndReturn(run func(*net.I func (_m *BlockingConnectionGater) InterceptAccept(_a0 network.ConnMultiaddrs) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for InterceptAccept") + } + var r0 bool if rf, ok := ret.Get(0).(func(network.ConnMultiaddrs) bool); ok { r0 = rf(_a0) @@ -201,6 +217,10 @@ func (_c *BlockingConnectionGater_InterceptAccept_Call) RunAndReturn(run func(ne func (_m *BlockingConnectionGater) InterceptAddrDial(_a0 peer.ID, _a1 multiaddr.Multiaddr) bool { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InterceptAddrDial") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID, multiaddr.Multiaddr) bool); ok { r0 = rf(_a0, _a1) @@ -244,6 +264,10 @@ func (_c *BlockingConnectionGater_InterceptAddrDial_Call) RunAndReturn(run func( func (_m *BlockingConnectionGater) InterceptPeerDial(p peer.ID) bool { ret := _m.Called(p) + if len(ret) == 0 { + panic("no return value specified for InterceptPeerDial") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { r0 = rf(p) @@ -286,6 +310,10 @@ func (_c *BlockingConnectionGater_InterceptPeerDial_Call) RunAndReturn(run func( func (_m *BlockingConnectionGater) InterceptSecured(_a0 network.Direction, _a1 peer.ID, _a2 network.ConnMultiaddrs) bool { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for InterceptSecured") + } + var r0 bool if rf, ok := ret.Get(0).(func(network.Direction, peer.ID, network.ConnMultiaddrs) bool); ok { r0 = rf(_a0, _a1, _a2) @@ -330,6 +358,10 @@ func (_c *BlockingConnectionGater_InterceptSecured_Call) RunAndReturn(run func(n func (_m *BlockingConnectionGater) InterceptUpgraded(_a0 network.Conn) (bool, control.DisconnectReason) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for InterceptUpgraded") + } + var r0 bool var r1 control.DisconnectReason if rf, ok := ret.Get(0).(func(network.Conn) (bool, control.DisconnectReason)); ok { @@ -382,6 +414,10 @@ func (_c *BlockingConnectionGater_InterceptUpgraded_Call) RunAndReturn(run func( func (_m *BlockingConnectionGater) ListBlockedAddrs() []net.IP { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ListBlockedAddrs") + } + var r0 []net.IP if rf, ok := ret.Get(0).(func() []net.IP); ok { r0 = rf() @@ -425,6 +461,10 @@ func (_c *BlockingConnectionGater_ListBlockedAddrs_Call) RunAndReturn(run func() func (_m *BlockingConnectionGater) ListBlockedPeers() []peer.ID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ListBlockedPeers") + } + var r0 []peer.ID if rf, ok := ret.Get(0).(func() []peer.ID); ok { r0 = rf() @@ -468,6 +508,10 @@ func (_c *BlockingConnectionGater_ListBlockedPeers_Call) RunAndReturn(run func() func (_m *BlockingConnectionGater) ListBlockedSubnets() []*net.IPNet { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ListBlockedSubnets") + } + var r0 []*net.IPNet if rf, ok := ret.Get(0).(func() []*net.IPNet); ok { r0 = rf() @@ -511,6 +555,10 @@ func (_c *BlockingConnectionGater_ListBlockedSubnets_Call) RunAndReturn(run func func (_m *BlockingConnectionGater) UnblockAddr(ip net.IP) error { ret := _m.Called(ip) + if len(ret) == 0 { + panic("no return value specified for UnblockAddr") + } + var r0 error if rf, ok := ret.Get(0).(func(net.IP) error); ok { r0 = rf(ip) @@ -553,6 +601,10 @@ func (_c *BlockingConnectionGater_UnblockAddr_Call) RunAndReturn(run func(net.IP func (_m *BlockingConnectionGater) UnblockPeer(p peer.ID) error { ret := _m.Called(p) + if len(ret) == 0 { + panic("no return value specified for UnblockPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID) error); ok { r0 = rf(p) @@ -595,6 +647,10 @@ func (_c *BlockingConnectionGater_UnblockPeer_Call) RunAndReturn(run func(peer.I func (_m *BlockingConnectionGater) UnblockSubnet(ipnet *net.IPNet) error { ret := _m.Called(ipnet) + if len(ret) == 0 { + panic("no return value specified for UnblockSubnet") + } + var r0 error if rf, ok := ret.Get(0).(func(*net.IPNet) error); ok { r0 = rf(ipnet) @@ -633,13 +689,12 @@ func (_c *BlockingConnectionGater_UnblockSubnet_Call) RunAndReturn(run func(*net return _c } -type mockConstructorTestingTNewBlockingConnectionGater interface { +// NewBlockingConnectionGater creates a new instance of BlockingConnectionGater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockingConnectionGater(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockingConnectionGater creates a new instance of BlockingConnectionGater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockingConnectionGater(t mockConstructorTestingTNewBlockingConnectionGater) *BlockingConnectionGater { +}) *BlockingConnectionGater { mock := &BlockingConnectionGater{} mock.Mock.Test(t) diff --git a/op-node/p2p/gating/mocks/ExpiryStore.go b/op-node/p2p/gating/mocks/ExpiryStore.go index f4c3faf81f40..6de9bba10d30 100644 --- a/op-node/p2p/gating/mocks/ExpiryStore.go +++ b/op-node/p2p/gating/mocks/ExpiryStore.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -29,6 +29,10 @@ func (_m *ExpiryStore) EXPECT() *ExpiryStore_Expecter { func (_m *ExpiryStore) GetIPBanExpiration(ip net.IP) (time.Time, error) { ret := _m.Called(ip) + if len(ret) == 0 { + panic("no return value specified for GetIPBanExpiration") + } + var r0 time.Time var r1 error if rf, ok := ret.Get(0).(func(net.IP) (time.Time, error)); ok { @@ -81,6 +85,10 @@ func (_c *ExpiryStore_GetIPBanExpiration_Call) RunAndReturn(run func(net.IP) (ti func (_m *ExpiryStore) GetPeerBanExpiration(id peer.ID) (time.Time, error) { ret := _m.Called(id) + if len(ret) == 0 { + panic("no return value specified for GetPeerBanExpiration") + } + var r0 time.Time var r1 error if rf, ok := ret.Get(0).(func(peer.ID) (time.Time, error)); ok { @@ -133,6 +141,10 @@ func (_c *ExpiryStore_GetPeerBanExpiration_Call) RunAndReturn(run func(peer.ID) func (_m *ExpiryStore) SetIPBanExpiration(ip net.IP, expiry time.Time) error { ret := _m.Called(ip, expiry) + if len(ret) == 0 { + panic("no return value specified for SetIPBanExpiration") + } + var r0 error if rf, ok := ret.Get(0).(func(net.IP, time.Time) error); ok { r0 = rf(ip, expiry) @@ -176,6 +188,10 @@ func (_c *ExpiryStore_SetIPBanExpiration_Call) RunAndReturn(run func(net.IP, tim func (_m *ExpiryStore) SetPeerBanExpiration(id peer.ID, expiry time.Time) error { ret := _m.Called(id, expiry) + if len(ret) == 0 { + panic("no return value specified for SetPeerBanExpiration") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID, time.Time) error); ok { r0 = rf(id, expiry) @@ -215,13 +231,12 @@ func (_c *ExpiryStore_SetPeerBanExpiration_Call) RunAndReturn(run func(peer.ID, return _c } -type mockConstructorTestingTNewExpiryStore interface { +// NewExpiryStore creates a new instance of ExpiryStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExpiryStore(t interface { mock.TestingT Cleanup(func()) -} - -// NewExpiryStore creates a new instance of ExpiryStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExpiryStore(t mockConstructorTestingTNewExpiryStore) *ExpiryStore { +}) *ExpiryStore { mock := &ExpiryStore{} mock.Mock.Test(t) diff --git a/op-node/p2p/gating/mocks/Scores.go b/op-node/p2p/gating/mocks/Scores.go index eec399f831cc..39d645ef1996 100644 --- a/op-node/p2p/gating/mocks/Scores.go +++ b/op-node/p2p/gating/mocks/Scores.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -24,6 +24,10 @@ func (_m *Scores) EXPECT() *Scores_Expecter { func (_m *Scores) GetPeerScore(id peer.ID) (float64, error) { ret := _m.Called(id) + if len(ret) == 0 { + panic("no return value specified for GetPeerScore") + } + var r0 float64 var r1 error if rf, ok := ret.Get(0).(func(peer.ID) (float64, error)); ok { @@ -72,13 +76,12 @@ func (_c *Scores_GetPeerScore_Call) RunAndReturn(run func(peer.ID) (float64, err return _c } -type mockConstructorTestingTNewScores interface { +// NewScores creates a new instance of Scores. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScores(t interface { mock.TestingT Cleanup(func()) -} - -// NewScores creates a new instance of Scores. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewScores(t mockConstructorTestingTNewScores) *Scores { +}) *Scores { mock := &Scores{} mock.Mock.Test(t) diff --git a/op-node/p2p/mocks/API.go b/op-node/p2p/mocks/API.go index c9204cf287b4..dc128911e0fa 100644 --- a/op-node/p2p/mocks/API.go +++ b/op-node/p2p/mocks/API.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -32,6 +32,10 @@ func (_m *API) EXPECT() *API_Expecter { func (_m *API) BlockAddr(ctx context.Context, ip net.IP) error { ret := _m.Called(ctx, ip) + if len(ret) == 0 { + panic("no return value specified for BlockAddr") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, net.IP) error); ok { r0 = rf(ctx, ip) @@ -75,6 +79,10 @@ func (_c *API_BlockAddr_Call) RunAndReturn(run func(context.Context, net.IP) err func (_m *API) BlockPeer(ctx context.Context, p peer.ID) error { ret := _m.Called(ctx, p) + if len(ret) == 0 { + panic("no return value specified for BlockPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, peer.ID) error); ok { r0 = rf(ctx, p) @@ -118,6 +126,10 @@ func (_c *API_BlockPeer_Call) RunAndReturn(run func(context.Context, peer.ID) er func (_m *API) BlockSubnet(ctx context.Context, ipnet *net.IPNet) error { ret := _m.Called(ctx, ipnet) + if len(ret) == 0 { + panic("no return value specified for BlockSubnet") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *net.IPNet) error); ok { r0 = rf(ctx, ipnet) @@ -161,6 +173,10 @@ func (_c *API_BlockSubnet_Call) RunAndReturn(run func(context.Context, *net.IPNe func (_m *API) ConnectPeer(ctx context.Context, addr string) error { ret := _m.Called(ctx, addr) + if len(ret) == 0 { + panic("no return value specified for ConnectPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, addr) @@ -204,6 +220,10 @@ func (_c *API_ConnectPeer_Call) RunAndReturn(run func(context.Context, string) e func (_m *API) DisconnectPeer(ctx context.Context, id peer.ID) error { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for DisconnectPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, peer.ID) error); ok { r0 = rf(ctx, id) @@ -247,6 +267,10 @@ func (_c *API_DisconnectPeer_Call) RunAndReturn(run func(context.Context, peer.I func (_m *API) DiscoveryTable(ctx context.Context) ([]*enode.Node, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for DiscoveryTable") + } + var r0 []*enode.Node var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]*enode.Node, error)); ok { @@ -301,6 +325,10 @@ func (_c *API_DiscoveryTable_Call) RunAndReturn(run func(context.Context) ([]*en func (_m *API) ListBlockedAddrs(ctx context.Context) ([]net.IP, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ListBlockedAddrs") + } + var r0 []net.IP var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]net.IP, error)); ok { @@ -355,6 +383,10 @@ func (_c *API_ListBlockedAddrs_Call) RunAndReturn(run func(context.Context) ([]n func (_m *API) ListBlockedPeers(ctx context.Context) ([]peer.ID, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ListBlockedPeers") + } + var r0 []peer.ID var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]peer.ID, error)); ok { @@ -409,6 +441,10 @@ func (_c *API_ListBlockedPeers_Call) RunAndReturn(run func(context.Context) ([]p func (_m *API) ListBlockedSubnets(ctx context.Context) ([]*net.IPNet, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ListBlockedSubnets") + } + var r0 []*net.IPNet var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]*net.IPNet, error)); ok { @@ -463,6 +499,10 @@ func (_c *API_ListBlockedSubnets_Call) RunAndReturn(run func(context.Context) ([ func (_m *API) PeerStats(ctx context.Context) (*p2p.PeerStats, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for PeerStats") + } + var r0 *p2p.PeerStats var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*p2p.PeerStats, error)); ok { @@ -517,6 +557,10 @@ func (_c *API_PeerStats_Call) RunAndReturn(run func(context.Context) (*p2p.PeerS func (_m *API) Peers(ctx context.Context, connected bool) (*p2p.PeerDump, error) { ret := _m.Called(ctx, connected) + if len(ret) == 0 { + panic("no return value specified for Peers") + } + var r0 *p2p.PeerDump var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*p2p.PeerDump, error)); ok { @@ -572,6 +616,10 @@ func (_c *API_Peers_Call) RunAndReturn(run func(context.Context, bool) (*p2p.Pee func (_m *API) ProtectPeer(ctx context.Context, p peer.ID) error { ret := _m.Called(ctx, p) + if len(ret) == 0 { + panic("no return value specified for ProtectPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, peer.ID) error); ok { r0 = rf(ctx, p) @@ -615,6 +663,10 @@ func (_c *API_ProtectPeer_Call) RunAndReturn(run func(context.Context, peer.ID) func (_m *API) Self(ctx context.Context) (*p2p.PeerInfo, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Self") + } + var r0 *p2p.PeerInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*p2p.PeerInfo, error)); ok { @@ -669,6 +721,10 @@ func (_c *API_Self_Call) RunAndReturn(run func(context.Context) (*p2p.PeerInfo, func (_m *API) UnblockAddr(ctx context.Context, ip net.IP) error { ret := _m.Called(ctx, ip) + if len(ret) == 0 { + panic("no return value specified for UnblockAddr") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, net.IP) error); ok { r0 = rf(ctx, ip) @@ -712,6 +768,10 @@ func (_c *API_UnblockAddr_Call) RunAndReturn(run func(context.Context, net.IP) e func (_m *API) UnblockPeer(ctx context.Context, p peer.ID) error { ret := _m.Called(ctx, p) + if len(ret) == 0 { + panic("no return value specified for UnblockPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, peer.ID) error); ok { r0 = rf(ctx, p) @@ -755,6 +815,10 @@ func (_c *API_UnblockPeer_Call) RunAndReturn(run func(context.Context, peer.ID) func (_m *API) UnblockSubnet(ctx context.Context, ipnet *net.IPNet) error { ret := _m.Called(ctx, ipnet) + if len(ret) == 0 { + panic("no return value specified for UnblockSubnet") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *net.IPNet) error); ok { r0 = rf(ctx, ipnet) @@ -798,6 +862,10 @@ func (_c *API_UnblockSubnet_Call) RunAndReturn(run func(context.Context, *net.IP func (_m *API) UnprotectPeer(ctx context.Context, p peer.ID) error { ret := _m.Called(ctx, p) + if len(ret) == 0 { + panic("no return value specified for UnprotectPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, peer.ID) error); ok { r0 = rf(ctx, p) @@ -837,13 +905,12 @@ func (_c *API_UnprotectPeer_Call) RunAndReturn(run func(context.Context, peer.ID return _c } -type mockConstructorTestingTNewAPI interface { +// NewAPI creates a new instance of API. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAPI(t interface { mock.TestingT Cleanup(func()) -} - -// NewAPI creates a new instance of API. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAPI(t mockConstructorTestingTNewAPI) *API { +}) *API { mock := &API{} mock.Mock.Test(t) diff --git a/op-node/p2p/mocks/GossipMetricer.go b/op-node/p2p/mocks/GossipMetricer.go index d5da6438212f..fc4509b4feae 100644 --- a/op-node/p2p/mocks/GossipMetricer.go +++ b/op-node/p2p/mocks/GossipMetricer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -14,13 +14,12 @@ func (_m *GossipMetricer) RecordGossipEvent(evType int32) { _m.Called(evType) } -type mockConstructorTestingTNewGossipMetricer interface { +// NewGossipMetricer creates a new instance of GossipMetricer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipMetricer(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipMetricer creates a new instance of GossipMetricer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipMetricer(t mockConstructorTestingTNewGossipMetricer) *GossipMetricer { +}) *GossipMetricer { mock := &GossipMetricer{} mock.Mock.Test(t) diff --git a/op-node/p2p/mocks/Peerstore.go b/op-node/p2p/mocks/Peerstore.go index 9e49aaf3e1fc..bbf0656166be 100644 --- a/op-node/p2p/mocks/Peerstore.go +++ b/op-node/p2p/mocks/Peerstore.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -19,6 +19,10 @@ type Peerstore struct { func (_m *Peerstore) PeerInfo(_a0 peer.ID) peer.AddrInfo { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PeerInfo") + } + var r0 peer.AddrInfo if rf, ok := ret.Get(0).(func(peer.ID) peer.AddrInfo); ok { r0 = rf(_a0) @@ -33,6 +37,10 @@ func (_m *Peerstore) PeerInfo(_a0 peer.ID) peer.AddrInfo { func (_m *Peerstore) Peers() peer.IDSlice { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Peers") + } + var r0 peer.IDSlice if rf, ok := ret.Get(0).(func() peer.IDSlice); ok { r0 = rf() @@ -49,6 +57,10 @@ func (_m *Peerstore) Peers() peer.IDSlice { func (_m *Peerstore) SetScore(id peer.ID, diff store.ScoreDiff) (store.PeerScores, error) { ret := _m.Called(id, diff) + if len(ret) == 0 { + panic("no return value specified for SetScore") + } + var r0 store.PeerScores var r1 error if rf, ok := ret.Get(0).(func(peer.ID, store.ScoreDiff) (store.PeerScores, error)); ok { @@ -69,13 +81,12 @@ func (_m *Peerstore) SetScore(id peer.ID, diff store.ScoreDiff) (store.PeerScore return r0, r1 } -type mockConstructorTestingTNewPeerstore interface { +// NewPeerstore creates a new instance of Peerstore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerstore(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerstore creates a new instance of Peerstore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerstore(t mockConstructorTestingTNewPeerstore) *Peerstore { +}) *Peerstore { mock := &Peerstore{} mock.Mock.Test(t) diff --git a/op-node/p2p/mocks/ScoreMetrics.go b/op-node/p2p/mocks/ScoreMetrics.go index 7e04e44116ae..1c6547a54b0a 100644 --- a/op-node/p2p/mocks/ScoreMetrics.go +++ b/op-node/p2p/mocks/ScoreMetrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -18,13 +18,12 @@ func (_m *ScoreMetrics) SetPeerScores(_a0 []store.PeerScores) { _m.Called(_a0) } -type mockConstructorTestingTNewScoreMetrics interface { +// NewScoreMetrics creates a new instance of ScoreMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScoreMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewScoreMetrics creates a new instance of ScoreMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewScoreMetrics(t mockConstructorTestingTNewScoreMetrics) *ScoreMetrics { +}) *ScoreMetrics { mock := &ScoreMetrics{} mock.Mock.Test(t) diff --git a/op-node/p2p/monitor/mocks/PeerManager.go b/op-node/p2p/monitor/mocks/PeerManager.go index d91af047a182..78602e768ecf 100644 --- a/op-node/p2p/monitor/mocks/PeerManager.go +++ b/op-node/p2p/monitor/mocks/PeerManager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -27,6 +27,10 @@ func (_m *PeerManager) EXPECT() *PeerManager_Expecter { func (_m *PeerManager) BanPeer(_a0 peer.ID, _a1 time.Time) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BanPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID, time.Time) error); ok { r0 = rf(_a0, _a1) @@ -70,6 +74,10 @@ func (_c *PeerManager_BanPeer_Call) RunAndReturn(run func(peer.ID, time.Time) er func (_m *PeerManager) GetPeerScore(id peer.ID) (float64, error) { ret := _m.Called(id) + if len(ret) == 0 { + panic("no return value specified for GetPeerScore") + } + var r0 float64 var r1 error if rf, ok := ret.Get(0).(func(peer.ID) (float64, error)); ok { @@ -122,6 +130,10 @@ func (_c *PeerManager_GetPeerScore_Call) RunAndReturn(run func(peer.ID) (float64 func (_m *PeerManager) IsStatic(_a0 peer.ID) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for IsStatic") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { r0 = rf(_a0) @@ -164,6 +176,10 @@ func (_c *PeerManager_IsStatic_Call) RunAndReturn(run func(peer.ID) bool) *PeerM func (_m *PeerManager) Peers() []peer.ID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Peers") + } + var r0 []peer.ID if rf, ok := ret.Get(0).(func() []peer.ID); ok { r0 = rf() @@ -203,13 +219,12 @@ func (_c *PeerManager_Peers_Call) RunAndReturn(run func() []peer.ID) *PeerManage return _c } -type mockConstructorTestingTNewPeerManager interface { +// NewPeerManager creates a new instance of PeerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerManager creates a new instance of PeerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerManager(t mockConstructorTestingTNewPeerManager) *PeerManager { +}) *PeerManager { mock := &PeerManager{} mock.Mock.Test(t) diff --git a/op-service/sources/mocks/BeaconClient.go b/op-service/sources/mocks/BeaconClient.go index b862b39db64c..89b07d0670c5 100644 --- a/op-service/sources/mocks/BeaconClient.go +++ b/op-service/sources/mocks/BeaconClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -26,6 +26,10 @@ func (_m *BeaconClient) EXPECT() *BeaconClient_Expecter { func (_m *BeaconClient) BeaconBlobSideCars(ctx context.Context, fetchAllSidecars bool, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIGetBlobSidecarsResponse, error) { ret := _m.Called(ctx, fetchAllSidecars, slot, hashes) + if len(ret) == 0 { + panic("no return value specified for BeaconBlobSideCars") + } + var r0 eth.APIGetBlobSidecarsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool, uint64, []eth.IndexedBlobHash) (eth.APIGetBlobSidecarsResponse, error)); ok { @@ -81,6 +85,10 @@ func (_c *BeaconClient_BeaconBlobSideCars_Call) RunAndReturn(run func(context.Co func (_m *BeaconClient) BeaconGenesis(ctx context.Context) (eth.APIGenesisResponse, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for BeaconGenesis") + } + var r0 eth.APIGenesisResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context) (eth.APIGenesisResponse, error)); ok { @@ -133,6 +141,10 @@ func (_c *BeaconClient_BeaconGenesis_Call) RunAndReturn(run func(context.Context func (_m *BeaconClient) ConfigSpec(ctx context.Context) (eth.APIConfigResponse, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ConfigSpec") + } + var r0 eth.APIConfigResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context) (eth.APIConfigResponse, error)); ok { @@ -185,6 +197,10 @@ func (_c *BeaconClient_ConfigSpec_Call) RunAndReturn(run func(context.Context) ( func (_m *BeaconClient) NodeVersion(ctx context.Context) (string, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for NodeVersion") + } + var r0 string var r1 error if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { @@ -233,13 +249,12 @@ func (_c *BeaconClient_NodeVersion_Call) RunAndReturn(run func(context.Context) return _c } -type mockConstructorTestingTNewBeaconClient interface { +// NewBeaconClient creates a new instance of BeaconClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBeaconClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewBeaconClient creates a new instance of BeaconClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBeaconClient(t mockConstructorTestingTNewBeaconClient) *BeaconClient { +}) *BeaconClient { mock := &BeaconClient{} mock.Mock.Test(t) diff --git a/op-service/sources/mocks/BlobSideCarsFetcher.go b/op-service/sources/mocks/BlobSideCarsFetcher.go index 5dc530d9317a..94c76f5671cb 100644 --- a/op-service/sources/mocks/BlobSideCarsFetcher.go +++ b/op-service/sources/mocks/BlobSideCarsFetcher.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -26,6 +26,10 @@ func (_m *BlobSideCarsFetcher) EXPECT() *BlobSideCarsFetcher_Expecter { func (_m *BlobSideCarsFetcher) BeaconBlobSideCars(ctx context.Context, fetchAllSidecars bool, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIGetBlobSidecarsResponse, error) { ret := _m.Called(ctx, fetchAllSidecars, slot, hashes) + if len(ret) == 0 { + panic("no return value specified for BeaconBlobSideCars") + } + var r0 eth.APIGetBlobSidecarsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool, uint64, []eth.IndexedBlobHash) (eth.APIGetBlobSidecarsResponse, error)); ok { @@ -77,13 +81,12 @@ func (_c *BlobSideCarsFetcher_BeaconBlobSideCars_Call) RunAndReturn(run func(con return _c } -type mockConstructorTestingTNewBlobSideCarsFetcher interface { +// NewBlobSideCarsFetcher creates a new instance of BlobSideCarsFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlobSideCarsFetcher(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlobSideCarsFetcher creates a new instance of BlobSideCarsFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlobSideCarsFetcher(t mockConstructorTestingTNewBlobSideCarsFetcher) *BlobSideCarsFetcher { +}) *BlobSideCarsFetcher { mock := &BlobSideCarsFetcher{} mock.Mock.Test(t) diff --git a/op-service/txmgr/mocks/TxManager.go b/op-service/txmgr/mocks/TxManager.go index ec805b74d004..0a803f790b2e 100644 --- a/op-service/txmgr/mocks/TxManager.go +++ b/op-service/txmgr/mocks/TxManager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.46.0. DO NOT EDIT. package mocks @@ -26,6 +26,10 @@ type TxManager struct { func (_m *TxManager) API() rpc.API { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for API") + } + var r0 rpc.API if rf, ok := ret.Get(0).(func() rpc.API); ok { r0 = rf() @@ -40,6 +44,10 @@ func (_m *TxManager) API() rpc.API { func (_m *TxManager) BlockNumber(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { @@ -69,6 +77,10 @@ func (_m *TxManager) Close() { func (_m *TxManager) From() common.Address { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for From") + } + var r0 common.Address if rf, ok := ret.Get(0).(func() common.Address); ok { r0 = rf() @@ -85,6 +97,10 @@ func (_m *TxManager) From() common.Address { func (_m *TxManager) IsClosed() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsClosed") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -99,6 +115,10 @@ func (_m *TxManager) IsClosed() bool { func (_m *TxManager) Send(ctx context.Context, candidate txmgr.TxCandidate) (*types.Receipt, error) { ret := _m.Called(ctx, candidate) + if len(ret) == 0 { + panic("no return value specified for Send") + } + var r0 *types.Receipt var r1 error if rf, ok := ret.Get(0).(func(context.Context, txmgr.TxCandidate) (*types.Receipt, error)); ok { @@ -130,6 +150,10 @@ func (_m *TxManager) SendAsync(ctx context.Context, candidate txmgr.TxCandidate, func (_m *TxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for SuggestGasPriceCaps") + } + var r0 *big.Int var r1 *big.Int var r2 *big.Int @@ -170,13 +194,12 @@ func (_m *TxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.In return r0, r1, r2, r3 } -type mockConstructorTestingTNewTxManager interface { +// NewTxManager creates a new instance of TxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewTxManager creates a new instance of TxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTxManager(t mockConstructorTestingTNewTxManager) *TxManager { +}) *TxManager { mock := &TxManager{} mock.Mock.Test(t) diff --git a/ops/docker/ci-builder/Dockerfile b/ops/docker/ci-builder/Dockerfile index 3c1956987b55..52e1f4e649fb 100644 --- a/ops/docker/ci-builder/Dockerfile +++ b/ops/docker/ci-builder/Dockerfile @@ -54,10 +54,10 @@ COPY ./versions.json ./versions.json RUN go install github.com/ethereum/go-ethereum/cmd/abigen@$(jq -r .abigen < versions.json) RUN go install github.com/ethereum/go-ethereum/cmd/geth@$(jq -r .geth < versions.json) -RUN go install gotest.tools/gotestsum@v1.11.0 -RUN go install github.com/vektra/mockery/v2@v2.28.1 -RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2 -RUN go install github.com/mikefarah/yq/v4@v4.43.1 +RUN go install gotest.tools/gotestsum@v1.12.0 +RUN go install github.com/vektra/mockery/v2@v2.46.0 +RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 +RUN go install github.com/mikefarah/yq/v4@v4.44.3 # Strip binaries to reduce size RUN strip /go/bin/gotestsum && \ From c8afa158061f113e45b9acd9f47fc2aa3ef94106 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 27 Sep 2024 06:30:52 +1000 Subject: [PATCH 051/116] cannon: Output information about the state in JSON format from the witness command (#12137) * cannon: Output information about the state in JSON format from the witness command. Will provide all the information about a state that the challenger needs so it doesn't have to depend on the parsing code directly. * cannon: Update multicannon witness description. --- cannon/Makefile | 2 ++ cannon/cmd/witness.go | 30 ++++++++++++++++++++++++------ cannon/multicannon/witness.go | 2 +- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/cannon/Makefile b/cannon/Makefile index ea9a29ebb5c2..e80de55b5e44 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -13,6 +13,8 @@ ifeq ($(shell uname),Darwin) FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic endif +.DEFAULT_GOAL := cannon + cannon-impl: env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon-impl . diff --git a/cannon/cmd/witness.go b/cannon/cmd/witness.go index 753438493f95..d74e145df7bf 100644 --- a/cannon/cmd/witness.go +++ b/cannon/cmd/witness.go @@ -5,6 +5,9 @@ import ( "os" factory "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + "github.com/ethereum/go-ethereum/common" "github.com/urfave/cli/v2" ) @@ -22,20 +25,35 @@ var ( } ) +type response struct { + WitnessHash common.Hash `json:"witnessHash"` + Step uint64 `json:"step"` + Exited bool `json:"exited"` + ExitCode uint8 `json:"exitCode"` +} + func Witness(ctx *cli.Context) error { input := ctx.Path(WitnessInputFlag.Name) - output := ctx.Path(WitnessOutputFlag.Name) + witnessOutput := ctx.Path(WitnessOutputFlag.Name) state, err := factory.LoadStateFromFile(input) if err != nil { return fmt.Errorf("invalid input state (%v): %w", input, err) } witness, h := state.EncodeWitness() - if output != "" { - if err := os.WriteFile(output, witness, 0755); err != nil { - return fmt.Errorf("writing output to %v: %w", output, err) + if witnessOutput != "" { + if err := os.WriteFile(witnessOutput, witness, 0755); err != nil { + return fmt.Errorf("writing output to %v: %w", witnessOutput, err) } } - fmt.Println(h.Hex()) + output := response{ + WitnessHash: h, + Step: state.GetStep(), + Exited: state.GetExited(), + ExitCode: state.GetExitCode(), + } + if err := jsonutil.WriteJSON(output, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write response: %w", err) + } return nil } @@ -43,7 +61,7 @@ func CreateWitnessCommand(action cli.ActionFunc) *cli.Command { return &cli.Command{ Name: "witness", Usage: "Convert a Cannon JSON state into a binary witness", - Description: "Convert a Cannon JSON state into a binary witness. The hash of the witness is written to stdout", + Description: "Convert a Cannon JSON state into a binary witness. Basic data about the state is printed to stdout in JSON format.", Action: action, Flags: []cli.Flag{ WitnessInputFlag, diff --git a/cannon/multicannon/witness.go b/cannon/multicannon/witness.go index 2c72ebecf56a..c54fd9487e57 100644 --- a/cannon/multicannon/witness.go +++ b/cannon/multicannon/witness.go @@ -32,7 +32,7 @@ func Witness(ctx *cli.Context) error { var WitnessCommand = &cli.Command{ Name: "witness", Usage: "Convert a Cannon JSON state into a binary witness", - Description: "Convert a Cannon JSON state into a binary witness. The hash of the witness is written to stdout", + Description: "Convert a Cannon JSON state into a binary witness. Basic data about the state is printed to stdout in JSON format.", Action: Witness, SkipFlagParsing: true, } From 368c13326f154d051d9dee9c336c2b7c470e3483 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 27 Sep 2024 06:59:44 +1000 Subject: [PATCH 052/116] op-challenger: Use witness subcommand instead of parsing cannon states (#12141) * op-challenger: Use the cannon witness subcommand to read states instead of directly calling the parsing code. * op-challenger: Use a context when downloading the prestate. --- cannon/cmd/witness.go | 11 +- op-challenger/game/fault/register.go | 2 +- op-challenger/game/fault/register_task.go | 24 +-- .../game/fault/trace/asterisc/provider.go | 4 +- .../fault/trace/asterisc/state_converter.go | 3 +- .../game/fault/trace/cannon/provider.go | 8 +- .../game/fault/trace/cannon/provider_test.go | 18 ++- .../fault/trace/cannon/state_converter.go | 54 +++++-- .../trace/cannon/state_converter_test.go | 144 ++++++++---------- .../game/fault/trace/prestates/cache.go | 12 +- .../game/fault/trace/prestates/cache_test.go | 12 +- .../game/fault/trace/prestates/multi.go | 18 ++- .../game/fault/trace/prestates/multi_test.go | 17 ++- .../game/fault/trace/prestates/single.go | 8 +- op-challenger/game/fault/trace/vm/iface.go | 8 +- op-challenger/game/fault/trace/vm/prestate.go | 4 +- .../game/fault/trace/vm/prestate_test.go | 2 +- op-challenger/runner/factory.go | 34 +++-- op-challenger/runner/runner.go | 4 +- 19 files changed, 219 insertions(+), 168 deletions(-) diff --git a/cannon/cmd/witness.go b/cannon/cmd/witness.go index d74e145df7bf..9fbc9727d8d7 100644 --- a/cannon/cmd/witness.go +++ b/cannon/cmd/witness.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/ioutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/urfave/cli/v2" ) @@ -26,10 +27,11 @@ var ( ) type response struct { - WitnessHash common.Hash `json:"witnessHash"` - Step uint64 `json:"step"` - Exited bool `json:"exited"` - ExitCode uint8 `json:"exitCode"` + WitnessHash common.Hash `json:"witnessHash"` + Witness hexutil.Bytes `json:"witness"` + Step uint64 `json:"step"` + Exited bool `json:"exited"` + ExitCode uint8 `json:"exitCode"` } func Witness(ctx *cli.Context) error { @@ -47,6 +49,7 @@ func Witness(ctx *cli.Context) error { } output := response{ WitnessHash: h, + Witness: witness, Step: state.GetStep(), Exited: state.GetExited(), ExitCode: state.GetExitCode(), diff --git a/op-challenger/game/fault/register.go b/op-challenger/game/fault/register.go index 08140164a17c..38957be0ce95 100644 --- a/op-challenger/game/fault/register.go +++ b/op-challenger/game/fault/register.go @@ -35,7 +35,7 @@ type PrestateSource interface { // PrestatePath returns the path to the prestate file to use for the game. // The provided prestateHash may be used to differentiate between different states but no guarantee is made that // the returned prestate matches the supplied hash. - PrestatePath(prestateHash common.Hash) (string, error) + PrestatePath(ctx context.Context, prestateHash common.Hash) (string, error) } type RollupClient interface { diff --git a/op-challenger/game/fault/register_task.go b/op-challenger/game/fault/register_task.go index dd346f42077f..b2dfac5f4a6a 100644 --- a/op-challenger/game/fault/register_task.go +++ b/op-challenger/game/fault/register_task.go @@ -33,7 +33,7 @@ type RegisterTask struct { gameType faultTypes.GameType skipPrestateValidation bool - getPrestateProvider func(prestateHash common.Hash) (faultTypes.PrestateProvider, error) + getPrestateProvider func(ctx context.Context, prestateHash common.Hash) (faultTypes.PrestateProvider, error) newTraceAccessor func( logger log.Logger, m metrics.Metricer, @@ -49,7 +49,7 @@ type RegisterTask struct { } func NewCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor) *RegisterTask { - stateConverter := cannon.NewStateConverter() + stateConverter := cannon.NewStateConverter(cfg.Cannon) return &RegisterTask{ gameType: gameType, // Don't validate the absolute prestate or genesis output root for permissioned games @@ -63,7 +63,7 @@ func NewCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m c cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState, filepath.Join(cfg.Datadir, "cannon-prestates"), - func(path string) faultTypes.PrestateProvider { + func(ctx context.Context, path string) faultTypes.PrestateProvider { return vm.NewPrestateProvider(path, stateConverter) }), newTraceAccessor: func( @@ -95,7 +95,7 @@ func NewAsteriscRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m cfg.AsteriscAbsolutePreStateBaseURL, cfg.AsteriscAbsolutePreState, filepath.Join(cfg.Datadir, "asterisc-prestates"), - func(path string) faultTypes.PrestateProvider { + func(ctx context.Context, path string) faultTypes.PrestateProvider { return vm.NewPrestateProvider(path, stateConverter) }), newTraceAccessor: func( @@ -127,7 +127,7 @@ func NewAsteriscKonaRegisterTask(gameType faultTypes.GameType, cfg *config.Confi cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, filepath.Join(cfg.Datadir, "asterisc-kona-prestates"), - func(path string) faultTypes.PrestateProvider { + func(ctx context.Context, path string) faultTypes.PrestateProvider { return vm.NewPrestateProvider(path, stateConverter) }), newTraceAccessor: func( @@ -151,7 +151,7 @@ func NewAsteriscKonaRegisterTask(gameType faultTypes.GameType, cfg *config.Confi func NewAlphabetRegisterTask(gameType faultTypes.GameType) *RegisterTask { return &RegisterTask{ gameType: gameType, - getPrestateProvider: func(_ common.Hash) (faultTypes.PrestateProvider, error) { + getPrestateProvider: func(_ context.Context, _ common.Hash) (faultTypes.PrestateProvider, error) { return alphabet.PrestateProvider, nil }, newTraceAccessor: func( @@ -178,15 +178,15 @@ func cachePrestates( prestateBaseURL *url.URL, preStatePath string, prestateDir string, - newPrestateProvider func(path string) faultTypes.PrestateProvider, -) func(prestateHash common.Hash) (faultTypes.PrestateProvider, error) { + newPrestateProvider func(ctx context.Context, path string) faultTypes.PrestateProvider, +) func(ctx context.Context, prestateHash common.Hash) (faultTypes.PrestateProvider, error) { prestateSource := prestates.NewPrestateSource(prestateBaseURL, preStatePath, prestateDir, stateConverter) - prestateProviderCache := prestates.NewPrestateProviderCache(m, fmt.Sprintf("prestates-%v", gameType), func(prestateHash common.Hash) (faultTypes.PrestateProvider, error) { - prestatePath, err := prestateSource.PrestatePath(prestateHash) + prestateProviderCache := prestates.NewPrestateProviderCache(m, fmt.Sprintf("prestates-%v", gameType), func(ctx context.Context, prestateHash common.Hash) (faultTypes.PrestateProvider, error) { + prestatePath, err := prestateSource.PrestatePath(ctx, prestateHash) if err != nil { return nil, fmt.Errorf("required prestate %v not available: %w", prestateHash, err) } - return newPrestateProvider(prestatePath), nil + return newPrestateProvider(ctx, prestatePath), nil }) return prestateProviderCache.GetOrCreate } @@ -219,7 +219,7 @@ func (e *RegisterTask) Register( return nil, fmt.Errorf("failed to load prestate hash for game %v: %w", game.Proxy, err) } - vmPrestateProvider, err := e.getPrestateProvider(requiredPrestatehash) + vmPrestateProvider, err := e.getPrestateProvider(ctx, requiredPrestatehash) if err != nil { return nil, fmt.Errorf("required prestate %v not available for game %v: %w", requiredPrestatehash, game.Proxy, err) } diff --git a/op-challenger/game/fault/trace/asterisc/provider.go b/op-challenger/game/fault/trace/asterisc/provider.go index 2ea1b729709e..1f6b77e12432 100644 --- a/op-challenger/game/fault/trace/asterisc/provider.go +++ b/op-challenger/game/fault/trace/asterisc/provider.go @@ -125,7 +125,7 @@ func (p *AsteriscTraceProvider) loadProof(ctx context.Context, i uint64) (*utils file, err = ioutil.OpenDecompressed(path) if errors.Is(err, os.ErrNotExist) { // Expected proof wasn't generated, check if we reached the end of execution - proof, step, exited, err := p.stateConverter.ConvertStateToProof(vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) + proof, step, exited, err := p.stateConverter.ConvertStateToProof(ctx, vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) if err != nil { return nil, err } @@ -185,7 +185,7 @@ func (p *AsteriscTraceProviderForTest) FindStep(ctx context.Context, start uint6 return 0, fmt.Errorf("generate asterisc trace (until preimage read): %w", err) } // Load the step from the state asterisc finished with - _, step, exited, err := p.stateConverter.ConvertStateToProof(vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) + _, step, exited, err := p.stateConverter.ConvertStateToProof(ctx, vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) if err != nil { return 0, fmt.Errorf("failed to load final state: %w", err) } diff --git a/op-challenger/game/fault/trace/asterisc/state_converter.go b/op-challenger/game/fault/trace/asterisc/state_converter.go index 29c9f8b2ea50..050cd7f5d8df 100644 --- a/op-challenger/game/fault/trace/asterisc/state_converter.go +++ b/op-challenger/game/fault/trace/asterisc/state_converter.go @@ -1,6 +1,7 @@ package asterisc import ( + "context" "encoding/json" "fmt" "io" @@ -83,7 +84,7 @@ func NewStateConverter() *StateConverter { return &StateConverter{} } -func (c *StateConverter) ConvertStateToProof(statePath string) (*utils.ProofData, uint64, bool, error) { +func (c *StateConverter) ConvertStateToProof(_ context.Context, statePath string) (*utils.ProofData, uint64, bool, error) { state, err := parseState(statePath) if err != nil { return nil, 0, false, fmt.Errorf("cannot read final state: %w", err) diff --git a/op-challenger/game/fault/trace/cannon/provider.go b/op-challenger/game/fault/trace/cannon/provider.go index 823f8c6d814b..cca2cf0e484e 100644 --- a/op-challenger/game/fault/trace/cannon/provider.go +++ b/op-challenger/game/fault/trace/cannon/provider.go @@ -50,7 +50,7 @@ func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, vmCfg vm. return kvstore.NewDiskKV(logger, vm.PreimageDir(dir), kvtypes.DataFormatFile) }), PrestateProvider: prestateProvider, - stateConverter: &StateConverter{}, + stateConverter: NewStateConverter(cfg), cfg: cfg, } } @@ -125,7 +125,7 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*utils.P // Try opening the file again now and it should exist. file, err = ioutil.OpenDecompressed(path) if errors.Is(err, os.ErrNotExist) { - proof, stateStep, exited, err := p.stateConverter.ConvertStateToProof(vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) + proof, stateStep, exited, err := p.stateConverter.ConvertStateToProof(ctx, vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) if err != nil { return nil, fmt.Errorf("cannot create proof from final state: %w", err) } @@ -172,7 +172,7 @@ func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Confi preimageLoader: utils.NewPreimageLoader(func() (utils.PreimageSource, error) { return kvstore.NewDiskKV(logger, vm.PreimageDir(dir), kvtypes.DataFormatFile) }), - stateConverter: NewStateConverter(), + stateConverter: NewStateConverter(cfg.Cannon), cfg: cfg.Cannon, } return &CannonTraceProviderForTest{p} @@ -185,7 +185,7 @@ func (p *CannonTraceProviderForTest) FindStep(ctx context.Context, start uint64, } // Load the step from the state cannon finished with - _, step, exited, err := p.stateConverter.ConvertStateToProof(vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) + _, step, exited, err := p.stateConverter.ConvertStateToProof(ctx, vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) if err != nil { return 0, fmt.Errorf("failed to load final state: %w", err) } diff --git a/op-challenger/game/fault/trace/cannon/provider_test.go b/op-challenger/game/fault/trace/cannon/provider_test.go index 82edc5562623..01cd513cb85f 100644 --- a/op-challenger/game/fault/trace/cannon/provider_test.go +++ b/op-challenger/game/fault/trace/cannon/provider_test.go @@ -244,7 +244,7 @@ func setupWithTestData(t *testing.T, dataDir string, prestate string) (*CannonTr generator: generator, prestate: filepath.Join(dataDir, prestate), gameDepth: 63, - stateConverter: &StateConverter{}, + stateConverter: generator, }, generator } @@ -252,6 +252,21 @@ type stubGenerator struct { generated []int // Using int makes assertions easier finalState *singlethreaded.State proof *utils.ProofData + + finalStatePath string +} + +func (e *stubGenerator) ConvertStateToProof(ctx context.Context, statePath string) (*utils.ProofData, uint64, bool, error) { + if statePath == e.finalStatePath { + witness, hash := e.finalState.EncodeWitness() + return &utils.ProofData{ + ClaimValue: hash, + StateData: witness, + ProofData: []byte{}, + }, e.finalState.Step, e.finalState.Exited, nil + } else { + return nil, 0, false, fmt.Errorf("loading unexpected state: %s, only support: %s", statePath, e.finalStatePath) + } } func (e *stubGenerator) GenerateProof(ctx context.Context, dir string, i uint64) error { @@ -262,6 +277,7 @@ func (e *stubGenerator) GenerateProof(ctx context.Context, dir string, i uint64) if e.finalState != nil && e.finalState.Step <= i { // Requesting a trace index past the end of the trace proofFile = vm.FinalStatePath(dir, false) + e.finalStatePath = proofFile data, err = json.Marshal(e.finalState) if err != nil { return err diff --git a/op-challenger/game/fault/trace/cannon/state_converter.go b/op-challenger/game/fault/trace/cannon/state_converter.go index 248676cc326e..5a6349618f41 100644 --- a/op-challenger/game/fault/trace/cannon/state_converter.go +++ b/op-challenger/game/fault/trace/cannon/state_converter.go @@ -1,38 +1,66 @@ package cannon import ( + "bytes" + "context" + "encoding/json" "fmt" + "os/exec" - "github.com/ethereum-optimism/optimism/cannon/mipsevm" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) +type stateData struct { + WitnessHash common.Hash `json:"witnessHash"` + Witness hexutil.Bytes `json:"witness"` + Step uint64 `json:"step"` + Exited bool `json:"exited"` +} + type StateConverter struct { + vmConfig vm.Config + cmdExecutor func(ctx context.Context, binary string, args ...string) (stdOut string, stdErr string, err error) } -func NewStateConverter() *StateConverter { - return &StateConverter{} +func NewStateConverter(vmConfig vm.Config) *StateConverter { + return &StateConverter{ + vmConfig: vmConfig, + cmdExecutor: runCmd, + } } -func (c *StateConverter) ConvertStateToProof(statePath string) (*utils.ProofData, uint64, bool, error) { - state, err := parseState(statePath) +func (c *StateConverter) ConvertStateToProof(ctx context.Context, statePath string) (*utils.ProofData, uint64, bool, error) { + stdOut, stdErr, err := c.cmdExecutor(ctx, c.vmConfig.VmBin, "witness", "--input", statePath) if err != nil { - return nil, 0, false, fmt.Errorf("cannot read final state: %w", err) + return nil, 0, false, fmt.Errorf("state conversion failed: %w (%s)", err, stdErr) + } + var data stateData + if err := json.Unmarshal([]byte(stdOut), &data); err != nil { + return nil, 0, false, fmt.Errorf("failed to parse state data: %w", err) } // Extend the trace out to the full length using a no-op instruction that doesn't change any state // No execution is done, so no proof-data or oracle values are required. - witness, witnessHash := state.EncodeWitness() return &utils.ProofData{ - ClaimValue: witnessHash, - StateData: witness, + ClaimValue: data.WitnessHash, + StateData: data.Witness, ProofData: []byte{}, OracleKey: nil, OracleValue: nil, OracleOffset: 0, - }, state.GetStep(), state.GetExited(), nil + }, data.Step, data.Exited, nil } -func parseState(path string) (mipsevm.FPVMState, error) { - return versions.LoadStateFromFile(path) +func runCmd(ctx context.Context, binary string, args ...string) (stdOut string, stdErr string, err error) { + var outBuf bytes.Buffer + var errBuf bytes.Buffer + cmd := exec.CommandContext(ctx, binary, args...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + err = cmd.Run() + stdOut = outBuf.String() + stdErr = errBuf.String() + return } diff --git a/op-challenger/game/fault/trace/cannon/state_converter_test.go b/op-challenger/game/fault/trace/cannon/state_converter_test.go index c0c0182529ff..56f093696814 100644 --- a/op-challenger/game/fault/trace/cannon/state_converter_test.go +++ b/op-challenger/game/fault/trace/cannon/state_converter_test.go @@ -1,96 +1,78 @@ package cannon import ( - _ "embed" - "path/filepath" + "context" + "encoding/json" + "errors" "testing" - "github.com/ethereum-optimism/optimism/cannon/mipsevm" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" - "github.com/ethereum-optimism/optimism/cannon/serialize" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" ) -func TestLoadState(t *testing.T) { - tests := []struct { - name string - creator func() mipsevm.FPVMState - supportsJSON bool - }{ - { - name: "singlethreaded", - creator: func() mipsevm.FPVMState { return singlethreaded.CreateInitialState(234, 82) }, - supportsJSON: true, - }, - { - name: "multithreaded", - creator: func() mipsevm.FPVMState { return multithreaded.CreateInitialState(982, 492) }, - supportsJSON: false, - }, - } - for _, test := range tests { - test := test - loadExpectedState := func(t *testing.T) *versions.VersionedState { - state, err := versions.NewFromState(test.creator()) - require.NoError(t, err) - return state - } - t.Run(test.name, func(t *testing.T) { - t.Run("Uncompressed", func(t *testing.T) { - if !test.supportsJSON { - t.Skip("JSON not supported by state version") - } - expected := loadExpectedState(t) - path := writeState(t, "state.json", expected) - - state, err := parseState(path) - require.NoError(t, err) - - require.Equal(t, expected, state) - }) - - t.Run("Gzipped", func(t *testing.T) { - if !test.supportsJSON { - t.Skip("JSON not supported by state version") - } - expected := loadExpectedState(t) - path := writeState(t, "state.json.gz", expected) - - state, err := parseState(path) - require.NoError(t, err) - - require.Equal(t, expected, state) - }) +const testBinary = "./somewhere/cannon" - t.Run("Binary", func(t *testing.T) { - expected := loadExpectedState(t) - - path := writeState(t, "state.bin", expected) - - state, err := parseState(path) - require.NoError(t, err) - require.Equal(t, expected, state) - }) +func TestStateConverter(t *testing.T) { + setup := func(t *testing.T) (*StateConverter, *capturingExecutor) { + vmCfg := vm.Config{ + VmBin: testBinary, + } + executor := &capturingExecutor{} + converter := NewStateConverter(vmCfg) + converter.cmdExecutor = executor.exec + return converter, executor + } - t.Run("BinaryGzip", func(t *testing.T) { - expected := loadExpectedState(t) + t.Run("Valid", func(t *testing.T) { + converter, executor := setup(t) + data := stateData{ + WitnessHash: common.Hash{0xab}, + Witness: []byte{1, 2, 3, 4}, + Step: 42, + Exited: true, + } + ser, err := json.Marshal(data) + require.NoError(t, err) + executor.stdOut = string(ser) + proof, step, exited, err := converter.ConvertStateToProof(context.Background(), "foo.json") + require.NoError(t, err) + require.Equal(t, data.Exited, exited) + require.Equal(t, data.Step, step) + require.Equal(t, data.WitnessHash, proof.ClaimValue) + require.Equal(t, data.Witness, proof.StateData) + require.NotNil(t, proof.ProofData, "later validations require this to be non-nil") + + require.Equal(t, testBinary, executor.binary) + require.Equal(t, []string{"witness", "--input", "foo.json"}, executor.args) + }) + + t.Run("CommandError", func(t *testing.T) { + converter, executor := setup(t) + executor.err = errors.New("boom") + _, _, _, err := converter.ConvertStateToProof(context.Background(), "foo.json") + require.ErrorIs(t, err, executor.err) + }) + + t.Run("InvalidOutput", func(t *testing.T) { + converter, executor := setup(t) + executor.stdOut = "blah blah" + _, _, _, err := converter.ConvertStateToProof(context.Background(), "foo.json") + require.ErrorContains(t, err, "failed to parse state data") + }) +} - path := writeState(t, "state.bin.gz", expected) +type capturingExecutor struct { + binary string + args []string - state, err := parseState(path) - require.NoError(t, err) - require.Equal(t, expected, state) - }) - }) - } + stdOut string + stdErr string + err error } -func writeState(t *testing.T, filename string, state *versions.VersionedState) string { - dir := t.TempDir() - path := filepath.Join(dir, filename) - require.NoError(t, serialize.Write(path, state, 0644)) - return path +func (c *capturingExecutor) exec(_ context.Context, binary string, args ...string) (string, string, error) { + c.binary = binary + c.args = args + return c.stdOut, c.stdErr, c.err } diff --git a/op-challenger/game/fault/trace/prestates/cache.go b/op-challenger/game/fault/trace/prestates/cache.go index 03915477f19c..233cdfadbe8f 100644 --- a/op-challenger/game/fault/trace/prestates/cache.go +++ b/op-challenger/game/fault/trace/prestates/cache.go @@ -1,6 +1,8 @@ package prestates import ( + "context" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-service/sources/caching" "github.com/ethereum/go-ethereum/common" @@ -10,27 +12,27 @@ type PrestateSource interface { // PrestatePath returns the path to the prestate file to use for the game. // The provided prestateHash may be used to differentiate between different states but no guarantee is made that // the returned prestate matches the supplied hash. - PrestatePath(prestateHash common.Hash) (string, error) + PrestatePath(ctx context.Context, prestateHash common.Hash) (string, error) } type PrestateProviderCache struct { - createProvider func(prestateHash common.Hash) (types.PrestateProvider, error) + createProvider func(ctx context.Context, prestateHash common.Hash) (types.PrestateProvider, error) cache *caching.LRUCache[common.Hash, types.PrestateProvider] } -func NewPrestateProviderCache(m caching.Metrics, label string, createProvider func(prestateHash common.Hash) (types.PrestateProvider, error)) *PrestateProviderCache { +func NewPrestateProviderCache(m caching.Metrics, label string, createProvider func(ctx context.Context, prestateHash common.Hash) (types.PrestateProvider, error)) *PrestateProviderCache { return &PrestateProviderCache{ createProvider: createProvider, cache: caching.NewLRUCache[common.Hash, types.PrestateProvider](m, label, 5), } } -func (p *PrestateProviderCache) GetOrCreate(prestateHash common.Hash) (types.PrestateProvider, error) { +func (p *PrestateProviderCache) GetOrCreate(ctx context.Context, prestateHash common.Hash) (types.PrestateProvider, error) { provider, ok := p.cache.Get(prestateHash) if ok { return provider, nil } - provider, err := p.createProvider(prestateHash) + provider, err := p.createProvider(ctx, prestateHash) if err != nil { return nil, err } diff --git a/op-challenger/game/fault/trace/prestates/cache_test.go b/op-challenger/game/fault/trace/prestates/cache_test.go index 820418eb4bda..4157234a2cbd 100644 --- a/op-challenger/game/fault/trace/prestates/cache_test.go +++ b/op-challenger/game/fault/trace/prestates/cache_test.go @@ -11,26 +11,26 @@ import ( ) func TestPrestateProviderCache_CreateAndCache(t *testing.T) { - cache := NewPrestateProviderCache(nil, "", func(prestateHash common.Hash) (types.PrestateProvider, error) { + cache := NewPrestateProviderCache(nil, "", func(_ context.Context, prestateHash common.Hash) (types.PrestateProvider, error) { return &stubPrestateProvider{commitment: prestateHash}, nil }) hash1 := common.Hash{0xaa} hash2 := common.Hash{0xbb} - provider1a, err := cache.GetOrCreate(hash1) + provider1a, err := cache.GetOrCreate(context.Background(), hash1) require.NoError(t, err) commitment, err := provider1a.AbsolutePreStateCommitment(context.Background()) require.NoError(t, err) require.Equal(t, hash1, commitment) - provider1b, err := cache.GetOrCreate(hash1) + provider1b, err := cache.GetOrCreate(context.Background(), hash1) require.NoError(t, err) require.Same(t, provider1a, provider1b) commitment, err = provider1b.AbsolutePreStateCommitment(context.Background()) require.NoError(t, err) require.Equal(t, hash1, commitment) - provider2, err := cache.GetOrCreate(hash2) + provider2, err := cache.GetOrCreate(context.Background(), hash2) require.NoError(t, err) require.NotSame(t, provider1a, provider2) commitment, err = provider2.AbsolutePreStateCommitment(context.Background()) @@ -41,10 +41,10 @@ func TestPrestateProviderCache_CreateAndCache(t *testing.T) { func TestPrestateProviderCache_CreateFails(t *testing.T) { hash1 := common.Hash{0xaa} expectedErr := errors.New("boom") - cache := NewPrestateProviderCache(nil, "", func(prestateHash common.Hash) (types.PrestateProvider, error) { + cache := NewPrestateProviderCache(nil, "", func(_ context.Context, prestateHash common.Hash) (types.PrestateProvider, error) { return nil, expectedErr }) - provider, err := cache.GetOrCreate(hash1) + provider, err := cache.GetOrCreate(context.Background(), hash1) require.ErrorIs(t, err, expectedErr) require.Nil(t, provider) } diff --git a/op-challenger/game/fault/trace/prestates/multi.go b/op-challenger/game/fault/trace/prestates/multi.go index 03abbbc56486..020dfc40deaf 100644 --- a/op-challenger/game/fault/trace/prestates/multi.go +++ b/op-challenger/game/fault/trace/prestates/multi.go @@ -1,6 +1,7 @@ package prestates import ( + "context" "errors" "fmt" "io" @@ -8,6 +9,7 @@ import ( "net/url" "os" "path/filepath" + "time" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" "github.com/ethereum-optimism/optimism/op-service/ioutil" @@ -35,7 +37,7 @@ func NewMultiPrestateProvider(baseUrl *url.URL, dataDir string, stateConverter v } } -func (m *MultiPrestateProvider) PrestatePath(hash common.Hash) (string, error) { +func (m *MultiPrestateProvider) PrestatePath(ctx context.Context, hash common.Hash) (string, error) { // First try to find a previously downloaded prestate for _, fileType := range supportedFileTypes { path := filepath.Join(m.dataDir, hash.Hex()+fileType) @@ -51,7 +53,7 @@ func (m *MultiPrestateProvider) PrestatePath(hash common.Hash) (string, error) { var combinedErr error // Keep a track of each download attempt so we can report them if none work for _, fileType := range supportedFileTypes { path := filepath.Join(m.dataDir, hash.Hex()+fileType) - if err := m.fetchPrestate(hash, fileType, path); errors.Is(err, ErrPrestateUnavailable) { + if err := m.fetchPrestate(ctx, hash, fileType, path); errors.Is(err, ErrPrestateUnavailable) { combinedErr = errors.Join(combinedErr, err) continue // Didn't find prestate in this format, try the next } else if err != nil { @@ -62,12 +64,18 @@ func (m *MultiPrestateProvider) PrestatePath(hash common.Hash) (string, error) { return "", errors.Join(ErrPrestateUnavailable, combinedErr) } -func (m *MultiPrestateProvider) fetchPrestate(hash common.Hash, fileType string, dest string) error { +func (m *MultiPrestateProvider) fetchPrestate(ctx context.Context, hash common.Hash, fileType string, dest string) error { if err := os.MkdirAll(m.dataDir, 0755); err != nil { return fmt.Errorf("error creating prestate dir: %w", err) } prestateUrl := m.baseUrl.JoinPath(hash.Hex() + fileType) - resp, err := http.Get(prestateUrl.String()) + tCtx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + req, err := http.NewRequestWithContext(tCtx, "GET", prestateUrl.String(), nil) + if err != nil { + return fmt.Errorf("failed to create prestate request: %w", err) + } + resp, err := http.DefaultClient.Do(req) if err != nil { return fmt.Errorf("failed to fetch prestate from %v: %w", prestateUrl, err) } @@ -91,7 +99,7 @@ func (m *MultiPrestateProvider) fetchPrestate(hash common.Hash, fileType string, return fmt.Errorf("failed to close file %v: %w", dest, err) } // Verify the prestate actually matches the expected hash before moving it into the final destination - proof, _, _, err := m.stateConverter.ConvertStateToProof(tmpFile) + proof, _, _, err := m.stateConverter.ConvertStateToProof(ctx, tmpFile) if err != nil || proof.ClaimValue != hash { // Treat invalid prestates as unavailable. Often servers return a 404 page with 200 status code _ = os.Remove(tmpFile) // Best effort attempt to clean up the temporary file diff --git a/op-challenger/game/fault/trace/prestates/multi_test.go b/op-challenger/game/fault/trace/prestates/multi_test.go index 7b09b81bdc67..e8f79dd2fcc8 100644 --- a/op-challenger/game/fault/trace/prestates/multi_test.go +++ b/op-challenger/game/fault/trace/prestates/multi_test.go @@ -1,6 +1,7 @@ package prestates import ( + "context" "errors" "io" "net/http" @@ -25,7 +26,7 @@ func TestDownloadPrestate(t *testing.T) { defer server.Close() hash := common.Hash{0xaa} provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir, &stubStateConverter{hash: hash}) - path, err := provider.PrestatePath(hash) + path, err := provider.PrestatePath(context.Background(), hash) require.NoError(t, err) in, err := os.Open(path) require.NoError(t, err) @@ -46,7 +47,7 @@ func TestCreateDirectory(t *testing.T) { defer server.Close() hash := common.Hash{0xaa} provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir, &stubStateConverter{hash: hash}) - path, err := provider.PrestatePath(hash) + path, err := provider.PrestatePath(context.Background(), hash) require.NoError(t, err) in, err := os.Open(path) require.NoError(t, err) @@ -66,7 +67,7 @@ func TestExistingPrestate(t *testing.T) { err := ioutil.WriteCompressedBytes(expectedFile, []byte("expected content"), os.O_WRONLY|os.O_CREATE, 0o644) require.NoError(t, err) - path, err := provider.PrestatePath(hash) + path, err := provider.PrestatePath(context.Background(), hash) require.NoError(t, err) require.Equal(t, expectedFile, path) in, err := ioutil.OpenDecompressed(path) @@ -87,7 +88,7 @@ func TestMissingPrestate(t *testing.T) { defer server.Close() hash := common.Hash{0xaa} provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir, &stubStateConverter{hash: hash}) - path, err := provider.PrestatePath(hash) + path, err := provider.PrestatePath(context.Background(), hash) require.ErrorIs(t, err, ErrPrestateUnavailable) _, err = os.Stat(path) require.ErrorIs(t, err, os.ErrNotExist) @@ -115,7 +116,7 @@ func TestStorePrestateWithCorrectExtension(t *testing.T) { defer server.Close() hash := common.Hash{0xaa} provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir, &stubStateConverter{hash: hash}) - path, err := provider.PrestatePath(hash) + path, err := provider.PrestatePath(context.Background(), hash) require.NoError(t, err) require.Truef(t, strings.HasSuffix(path, ext), "Expected path %v to have extension %v", path, ext) in, err := os.Open(path) @@ -136,7 +137,7 @@ func TestDetectInvalidPrestate(t *testing.T) { defer server.Close() hash := common.Hash{0xaa} provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir, &stubStateConverter{hash: hash, err: errors.New("boom")}) - _, err := provider.PrestatePath(hash) + _, err := provider.PrestatePath(context.Background(), hash) require.ErrorIs(t, err, ErrPrestateUnavailable) entries, err := os.ReadDir(dir) require.NoError(t, err) @@ -152,7 +153,7 @@ func TestDetectPrestateWithWrongHash(t *testing.T) { hash := common.Hash{0xaa} actualHash := common.Hash{0xbb} provider := NewMultiPrestateProvider(parseURL(t, server.URL), dir, &stubStateConverter{hash: actualHash}) - _, err := provider.PrestatePath(hash) + _, err := provider.PrestatePath(context.Background(), hash) require.ErrorIs(t, err, ErrPrestateUnavailable) entries, err := os.ReadDir(dir) require.NoError(t, err) @@ -180,7 +181,7 @@ type stubStateConverter struct { hash common.Hash } -func (s *stubStateConverter) ConvertStateToProof(path string) (*utils.ProofData, uint64, bool, error) { +func (s *stubStateConverter) ConvertStateToProof(_ context.Context, path string) (*utils.ProofData, uint64, bool, error) { // Return an error if we're given the wrong path if _, err := os.Stat(path); err != nil { return nil, 0, false, err diff --git a/op-challenger/game/fault/trace/prestates/single.go b/op-challenger/game/fault/trace/prestates/single.go index 978f17f55d4d..08f43913fb73 100644 --- a/op-challenger/game/fault/trace/prestates/single.go +++ b/op-challenger/game/fault/trace/prestates/single.go @@ -1,6 +1,10 @@ package prestates -import "github.com/ethereum/go-ethereum/common" +import ( + "context" + + "github.com/ethereum/go-ethereum/common" +) type SinglePrestateSource struct { path string @@ -10,6 +14,6 @@ func NewSinglePrestateSource(path string) *SinglePrestateSource { return &SinglePrestateSource{path: path} } -func (s *SinglePrestateSource) PrestatePath(_ common.Hash) (string, error) { +func (s *SinglePrestateSource) PrestatePath(_ context.Context, _ common.Hash) (string, error) { return s.path, nil } diff --git a/op-challenger/game/fault/trace/vm/iface.go b/op-challenger/game/fault/trace/vm/iface.go index 188f19e0c8e2..1fa988aa3197 100644 --- a/op-challenger/game/fault/trace/vm/iface.go +++ b/op-challenger/game/fault/trace/vm/iface.go @@ -1,9 +1,13 @@ package vm -import "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" +) type StateConverter interface { // ConvertStateToProof reads the state snapshot at the specified path and converts it to ProofData. // Returns the proof data, the VM step the state is from and whether or not the VM had exited. - ConvertStateToProof(statePath string) (*utils.ProofData, uint64, bool, error) + ConvertStateToProof(ctx context.Context, statePath string) (*utils.ProofData, uint64, bool, error) } diff --git a/op-challenger/game/fault/trace/vm/prestate.go b/op-challenger/game/fault/trace/vm/prestate.go index bbb4a9437d9a..cec662d41369 100644 --- a/op-challenger/game/fault/trace/vm/prestate.go +++ b/op-challenger/game/fault/trace/vm/prestate.go @@ -25,11 +25,11 @@ func NewPrestateProvider(prestate string, converter StateConverter) *PrestatePro } } -func (p *PrestateProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { +func (p *PrestateProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) { if p.prestateCommitment != (common.Hash{}) { return p.prestateCommitment, nil } - proof, _, _, err := p.stateConverter.ConvertStateToProof(p.prestate) + proof, _, _, err := p.stateConverter.ConvertStateToProof(ctx, p.prestate) if err != nil { return common.Hash{}, fmt.Errorf("cannot load absolute pre-state: %w", err) } diff --git a/op-challenger/game/fault/trace/vm/prestate_test.go b/op-challenger/game/fault/trace/vm/prestate_test.go index 69498e323c59..cdca129fe386 100644 --- a/op-challenger/game/fault/trace/vm/prestate_test.go +++ b/op-challenger/game/fault/trace/vm/prestate_test.go @@ -16,7 +16,7 @@ type stubConverter struct { hash common.Hash } -func (s *stubConverter) ConvertStateToProof(statePath string) (*utils.ProofData, uint64, bool, error) { +func (s *stubConverter) ConvertStateToProof(_ context.Context, _ string) (*utils.ProofData, uint64, bool, error) { if s.err != nil { return nil, 0, false, s.err } diff --git a/op-challenger/runner/factory.go b/op-challenger/runner/factory.go index 898afdbbf1b7..ee9abb9db291 100644 --- a/op-challenger/runner/factory.go +++ b/op-challenger/runner/factory.go @@ -1,6 +1,7 @@ package runner import ( + "context" "errors" "fmt" "net/url" @@ -18,6 +19,7 @@ import ( ) func createTraceProvider( + ctx context.Context, logger log.Logger, m vm.Metricer, cfg *config.Config, @@ -28,51 +30,51 @@ func createTraceProvider( ) (types.TraceProvider, error) { switch traceType { case types.TraceTypeCannon: - vmConfig := vm.NewOpProgramServerExecutor() - stateConverter := cannon.NewStateConverter() - prestate, err := getPrestate(prestateHash, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState, dir, stateConverter) + serverExecutor := vm.NewOpProgramServerExecutor() + stateConverter := cannon.NewStateConverter(cfg.Cannon) + prestate, err := getPrestate(ctx, prestateHash, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState, dir, stateConverter) if err != nil { return nil, err } prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) - return cannon.NewTraceProvider(logger, m, cfg.Cannon, vmConfig, prestateProvider, prestate, localInputs, dir, 42), nil + return cannon.NewTraceProvider(logger, m, cfg.Cannon, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil case types.TraceTypeAsterisc: - vmConfig := vm.NewOpProgramServerExecutor() + serverExecutor := vm.NewOpProgramServerExecutor() stateConverter := asterisc.NewStateConverter() - prestate, err := getPrestate(prestateHash, cfg.AsteriscAbsolutePreStateBaseURL, cfg.AsteriscAbsolutePreState, dir, stateConverter) + prestate, err := getPrestate(ctx, prestateHash, cfg.AsteriscAbsolutePreStateBaseURL, cfg.AsteriscAbsolutePreState, dir, stateConverter) if err != nil { return nil, err } prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) - return asterisc.NewTraceProvider(logger, m, cfg.Asterisc, vmConfig, prestateProvider, prestate, localInputs, dir, 42), nil + return asterisc.NewTraceProvider(logger, m, cfg.Asterisc, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil case types.TraceTypeAsteriscKona: - vmConfig := vm.NewKonaExecutor() + serverExecutor := vm.NewKonaExecutor() stateConverter := asterisc.NewStateConverter() - prestate, err := getPrestate(prestateHash, cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, dir, stateConverter) + prestate, err := getPrestate(ctx, prestateHash, cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, dir, stateConverter) if err != nil { return nil, err } prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) - return asterisc.NewTraceProvider(logger, m, cfg.AsteriscKona, vmConfig, prestateProvider, prestate, localInputs, dir, 42), nil + return asterisc.NewTraceProvider(logger, m, cfg.AsteriscKona, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil } return nil, errors.New("invalid trace type") } func createMTTraceProvider( + ctx context.Context, logger log.Logger, m vm.Metricer, vmConfig vm.Config, prestateHash common.Hash, absolutePrestateBaseURL *url.URL, - traceType types.TraceType, localInputs utils.LocalGameInputs, dir string, ) (types.TraceProvider, error) { executor := vm.NewOpProgramServerExecutor() - stateConverter := cannon.NewStateConverter() + stateConverter := cannon.NewStateConverter(vmConfig) - prestateSource := prestates.NewMultiPrestateProvider(absolutePrestateBaseURL, filepath.Join(dir, "prestates"), cannon.NewStateConverter()) - prestatePath, err := prestateSource.PrestatePath(prestateHash) + prestateSource := prestates.NewMultiPrestateProvider(absolutePrestateBaseURL, filepath.Join(dir, "prestates"), stateConverter) + prestatePath, err := prestateSource.PrestatePath(ctx, prestateHash) if err != nil { return nil, fmt.Errorf("failed to get prestate %v: %w", prestateHash, err) } @@ -80,14 +82,14 @@ func createMTTraceProvider( return cannon.NewTraceProvider(logger, m, vmConfig, executor, prestateProvider, prestatePath, localInputs, dir, 42), nil } -func getPrestate(prestateHash common.Hash, prestateBaseUrl *url.URL, prestatePath string, dataDir string, stateConverter vm.StateConverter) (string, error) { +func getPrestate(ctx context.Context, prestateHash common.Hash, prestateBaseUrl *url.URL, prestatePath string, dataDir string, stateConverter vm.StateConverter) (string, error) { prestateSource := prestates.NewPrestateSource( prestateBaseUrl, prestatePath, filepath.Join(dataDir, "prestates"), stateConverter) - prestate, err := prestateSource.PrestatePath(prestateHash) + prestate, err := prestateSource.PrestatePath(ctx, prestateHash) if err != nil { return "", fmt.Errorf("failed to get prestate %v: %w", prestateHash, err) } diff --git a/op-challenger/runner/runner.go b/op-challenger/runner/runner.go index 61fc8180905f..8e12d6ae0553 100644 --- a/op-challenger/runner/runner.go +++ b/op-challenger/runner/runner.go @@ -172,7 +172,7 @@ func (r *Runner) runAndRecordOnce(ctx context.Context, traceType types.TraceType } func (r *Runner) runOnce(ctx context.Context, logger log.Logger, traceType types.TraceType, prestateHash common.Hash, localInputs utils.LocalGameInputs, dir string) error { - provider, err := createTraceProvider(logger, metrics.NewVmMetrics(r.m, traceType.String()), r.cfg, prestateHash, traceType, localInputs, dir) + provider, err := createTraceProvider(ctx, logger, metrics.NewVmMetrics(r.m, traceType.String()), r.cfg, prestateHash, traceType, localInputs, dir) if err != nil { return fmt.Errorf("failed to create trace provider: %w", err) } @@ -187,7 +187,7 @@ func (r *Runner) runOnce(ctx context.Context, logger log.Logger, traceType types } func (r *Runner) runMTOnce(ctx context.Context, logger log.Logger, localInputs utils.LocalGameInputs, dir string) error { - provider, err := createMTTraceProvider(logger, metrics.NewVmMetrics(r.m, mtCannonType), r.cfg.Cannon, r.addMTCannonPrestate, r.addMTCannonPrestateURL, types.TraceTypeCannon, localInputs, dir) + provider, err := createMTTraceProvider(ctx, logger, metrics.NewVmMetrics(r.m, mtCannonType), r.cfg.Cannon, r.addMTCannonPrestate, r.addMTCannonPrestateURL, localInputs, dir) if err != nil { return fmt.Errorf("failed to create trace provider: %w", err) } From 521fab3b6604fab671bb32a64d9dc91e3a5d9bf6 Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Thu, 26 Sep 2024 17:18:23 -0400 Subject: [PATCH 053/116] feat: Change OPCM salt to include user supplied arg (#12152) * feat: Change OPCM salt to include user supplied arg * fix: removing temporary assumption. * fix: bumped OPContractsManager version. * fix: snapshots update. * fix: bump OPContractsManager version. * fix: snapshots update. * fix: pre-pr run * fix: wiring up Create2Salt as SaltMixer for DeployOPChainInput --- op-chain-ops/deployer/opcm/opchain.go | 1 + op-chain-ops/deployer/pipeline/opchain.go | 1 + op-chain-ops/interopgen/configs.go | 3 +- op-chain-ops/interopgen/deploy.go | 1 + .../scripts/DeployOPChain.s.sol | 14 +++++- packages/contracts-bedrock/semver-lock.json | 4 +- .../snapshots/abi/OPContractsManager.json | 10 ++++ .../abi/OPContractsManagerInterop.json | 10 ++++ .../src/L1/OPContractsManager.sol | 49 ++++++++++++------- .../test/L1/OPContractsManager.t.sol | 3 +- .../test/opcm/DeployOPChain.t.sol | 4 ++ 11 files changed, 77 insertions(+), 23 deletions(-) diff --git a/op-chain-ops/deployer/opcm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go index c204c1a57ec3..512b133c5876 100644 --- a/op-chain-ops/deployer/opcm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -34,6 +34,7 @@ type DeployOPChainInput struct { BlobBaseFeeScalar uint32 L2ChainId *big.Int OpcmProxy common.Address + SaltMixer string } func (input *DeployOPChainInput) InputSet() bool { diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index 27919fb8b135..a7bb0d6a96b8 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -40,6 +40,7 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, BlobBaseFeeScalar: 801949, L2ChainId: chainID.Big(), OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, + SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization } var dco opcm.DeployOPChainOutput diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index 946a60468b8a..d98b6429a872 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -75,7 +75,8 @@ type L2Config struct { Challenger common.Address SystemConfigOwner common.Address genesis.L2InitializationConfig - Prefund map[common.Address]*big.Int + Prefund map[common.Address]*big.Int + SaltMixer string } func (c *L2Config) Check(log log.Logger) error { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 95964a699407..6701e9c940c2 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -207,6 +207,7 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), OpcmProxy: superDeployment.OpcmProxy, + SaltMixer: cfg.SaltMixer, }) if err != nil { return nil, fmt.Errorf("failed to deploy L2 OP chain: %w", err) diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index fb4eb986a631..25b3447ad723 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -47,6 +47,7 @@ contract DeployOPChainInput is BaseDeployIO { uint32 internal _blobBaseFeeScalar; uint256 internal _l2ChainId; OPContractsManager internal _opcmProxy; + string internal _saltMixer; function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployOPChainInput: cannot set zero address"); @@ -73,6 +74,12 @@ contract DeployOPChainInput is BaseDeployIO { } } + function set(bytes4 _sel, string memory _value) public { + require((bytes(_value).length != 0), "DeployImplementationsInput: cannot set empty string"); + if (_sel == this.saltMixer.selector) _saltMixer = _value; + else revert("DeployOPChainInput: unknown selector"); + } + function opChainProxyAdminOwner() public view returns (address) { require(_opChainProxyAdminOwner != address(0), "DeployOPChainInput: not set"); return _opChainProxyAdminOwner; @@ -145,6 +152,10 @@ contract DeployOPChainInput is BaseDeployIO { DeployUtils.assertImplementationSet(address(_opcmProxy)); return _opcmProxy; } + + function saltMixer() public view returns (string memory) { + return _saltMixer; + } } contract DeployOPChainOutput is BaseDeployIO { @@ -486,7 +497,8 @@ contract DeployOPChain is Script { basefeeScalar: _doi.basefeeScalar(), blobBasefeeScalar: _doi.blobBaseFeeScalar(), l2ChainId: _doi.l2ChainId(), - startingAnchorRoots: _doi.startingAnchorRoots() + startingAnchorRoots: _doi.startingAnchorRoots(), + saltMixer: _doi.saltMixer() }); vm.broadcast(msg.sender); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index e3e7c8989d52..8670b4ff9d3a 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x292d367322dc74744e8c98c463021e1abae77e57954eef8bac6e2081fcba5644", - "sourceCodeHash": "0xbfcc2032df842e50067d4b4a75ce66cc14cc34e67d35e37e2160215be57d8e2e" + "initCodeHash": "0x8f00d4415fe9bef59c1aec5b6729105c686e0238ce947432b2b5a035589cff19", + "sourceCodeHash": "0x4b1cb591b22821ae7246fe47260e1ece74f2cb0463fb949de66fe2b6a986a32c" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 57900b34e8e3..2ff2826881f5 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -162,6 +162,11 @@ "internalType": "bytes", "name": "startingAnchorRoots", "type": "bytes" + }, + { + "internalType": "string", + "name": "saltMixer", + "type": "string" } ], "internalType": "struct OPContractsManager.DeployInput", @@ -565,6 +570,11 @@ "name": "InvalidRoleAddress", "type": "error" }, + { + "inputs": [], + "name": "InvalidStartingAnchorRoots", + "type": "error" + }, { "inputs": [], "name": "LatestReleaseNotSet", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 57900b34e8e3..2ff2826881f5 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -162,6 +162,11 @@ "internalType": "bytes", "name": "startingAnchorRoots", "type": "bytes" + }, + { + "internalType": "string", + "name": "saltMixer", + "type": "string" } ], "internalType": "struct OPContractsManager.DeployInput", @@ -565,6 +570,11 @@ "name": "InvalidRoleAddress", "type": "error" }, + { + "inputs": [], + "name": "InvalidStartingAnchorRoots", + "type": "error" + }, { "inputs": [], "name": "LatestReleaseNotSet", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index ed4d0675faf0..67e48e95702a 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -62,6 +62,8 @@ contract OPContractsManager is ISemver, Initializable { // The correct type is AnchorStateRegistry.StartingAnchorRoot[] memory, // but OP Deployer does not yet support structs. bytes startingAnchorRoots; + // The salt mixer is used as part of making the resulting salt unique. + string saltMixer; } /// @notice The full set of outputs from deploying a new OP Stack chain. @@ -124,8 +126,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.9 - string public constant version = "1.0.0-beta.9"; + /// @custom:semver 1.0.0-beta.10 + string public constant version = "1.0.0-beta.10"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -187,6 +189,9 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Thrown when the latest release is not set upon initialization. error LatestReleaseNotSet(); + /// @notice Thrown when the starting anchor roots are not provided. + error InvalidStartingAnchorRoots(); + // -------- Methods -------- /// @notice OPCM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. @@ -218,10 +223,11 @@ contract OPContractsManager is ISemver, Initializable { function deploy(DeployInput calldata _input) external returns (DeployOutput memory) { assertValidInputs(_input); - // TODO Determine how we want to choose salt, e.g. are we concerned about chain ID squatting - // since this approach means a chain ID can only be used once. uint256 l2ChainId = _input.l2ChainId; - bytes32 salt = bytes32(_input.l2ChainId); + + // The salt for a non-proxy contract is a function of the chain ID and the salt mixer. + string memory saltMixer = _input.saltMixer; + bytes32 salt = keccak256(abi.encode(l2ChainId, saltMixer)); DeployOutput memory output; // -------- Deploy Chain Singletons -------- @@ -238,17 +244,19 @@ contract OPContractsManager is ISemver, Initializable { // -------- Deploy Proxy Contracts -------- // Deploy ERC-1967 proxied contracts. - output.l1ERC721BridgeProxy = L1ERC721Bridge(deployProxy(l2ChainId, output.opChainProxyAdmin, "L1ERC721Bridge")); + output.l1ERC721BridgeProxy = + L1ERC721Bridge(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "L1ERC721Bridge")); output.optimismPortalProxy = - OptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "OptimismPortal"))); - output.systemConfigProxy = SystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, "SystemConfig")); + OptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismPortal"))); + output.systemConfigProxy = + SystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "SystemConfig")); output.optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory( - deployProxy(l2ChainId, output.opChainProxyAdmin, "OptimismMintableERC20Factory") + deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismMintableERC20Factory") ); output.disputeGameFactoryProxy = - DisputeGameFactory(deployProxy(l2ChainId, output.opChainProxyAdmin, "DisputeGameFactory")); + DisputeGameFactory(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DisputeGameFactory")); output.anchorStateRegistryProxy = - AnchorStateRegistry(deployProxy(l2ChainId, output.opChainProxyAdmin, "AnchorStateRegistry")); + AnchorStateRegistry(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "AnchorStateRegistry")); // Deploy legacy proxied contracts. output.l1StandardBridgeProxy = L1StandardBridge( @@ -275,10 +283,12 @@ contract OPContractsManager is ISemver, Initializable { ); // We have two delayed WETH contracts per chain, one for each of the permissioned and permissionless games. - output.delayedWETHPermissionlessGameProxy = - DelayedWETH(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "DelayedWETHPermissionlessGame"))); - output.delayedWETHPermissionedGameProxy = - DelayedWETH(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, "DelayedWETHPermissionedGame"))); + output.delayedWETHPermissionlessGameProxy = DelayedWETH( + payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionlessGame")) + ); + output.delayedWETHPermissionedGameProxy = DelayedWETH( + payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionedGame")) + ); // While not a proxy, we deploy the PermissionedDisputeGame here as well because it's bespoke per chain. output.permissionedDisputeGame = PermissionedDisputeGame( @@ -358,6 +368,8 @@ contract OPContractsManager is ISemver, Initializable { if (_input.roles.unsafeBlockSigner == address(0)) revert InvalidRoleAddress("unsafeBlockSigner"); if (_input.roles.proposer == address(0)) revert InvalidRoleAddress("proposer"); if (_input.roles.challenger == address(0)) revert InvalidRoleAddress("challenger"); + + if (_input.startingAnchorRoots.length == 0) revert InvalidStartingAnchorRoots(); } /// @notice Maps an L2 chain ID to an L1 batch inbox address as defined by the standard @@ -372,17 +384,18 @@ contract OPContractsManager is ISemver, Initializable { } /// @notice Deterministically deploys a new proxy contract owned by the provided ProxyAdmin. - /// The salt is computed as a function of the L2 chain ID and the contract name. This is required - /// because we deploy many identical proxies, so they each require a unique salt for determinism. + /// The salt is computed as a function of the L2 chain ID, the salt mixer and the contract name. + /// This is required because we deploy many identical proxies, so they each require a unique salt for determinism. function deployProxy( uint256 _l2ChainId, ProxyAdmin _proxyAdmin, + string memory _saltMixer, string memory _contractName ) internal returns (address) { - bytes32 salt = keccak256(abi.encode(_l2ChainId, _contractName)); + bytes32 salt = keccak256(abi.encode(_l2ChainId, _saltMixer, _contractName)); return Blueprint.deployFrom(blueprint.proxy, salt, abi.encode(_proxyAdmin)); } diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index c99ea77357fa..8edc33f47772 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -68,7 +68,8 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { basefeeScalar: _doi.basefeeScalar(), blobBasefeeScalar: _doi.blobBaseFeeScalar(), l2ChainId: _doi.l2ChainId(), - startingAnchorRoots: _doi.startingAnchorRoots() + startingAnchorRoots: _doi.startingAnchorRoots(), + saltMixer: _doi.saltMixer() }); } diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index cfb81717ece8..7390b1bd110c 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -48,6 +48,7 @@ contract DeployOPChainInput_Test is Test { uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; OPContractsManager opcm = OPContractsManager(makeAddr("opcm")); + string saltMixer = "saltMixer"; function setUp() public { doi = new DeployOPChainInput(); @@ -353,6 +354,7 @@ contract DeployOPChain_TestBase is Test { uint256 l2ChainId = 300; AnchorStateRegistry.StartingAnchorRoot[] startingAnchorRoots; OPContractsManager opcm = OPContractsManager(address(0)); + string saltMixer = "defaultSaltMixer"; function setUp() public virtual { // Set defaults for reference types @@ -470,6 +472,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); doi.set(doi.opcmProxy.selector, address(opcm)); // Not fuzzed since it must be an actual instance. + doi.set(doi.saltMixer.selector, saltMixer); deployOPChain.run(doi, doo); @@ -485,6 +488,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { assertEq(basefeeScalar, doi.basefeeScalar(), "700"); assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "800"); assertEq(l2ChainId, doi.l2ChainId(), "900"); + assertEq(saltMixer, doi.saltMixer(), "1000"); // Assert inputs were properly passed through to the contract initializers. assertEq(address(doo.opChainProxyAdmin().owner()), opChainProxyAdminOwner, "2100"); From 73534e25aa6f2205a6ebfc00f88770bf43f330ba Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Thu, 26 Sep 2024 17:41:03 -0400 Subject: [PATCH 054/116] fix: remove old standard-versions.toml file (#12153) * feat: Change OPCM salt to include user supplied arg * fix: removing temporary assumption. * fix: bumped OPContractsManager version. * fix: snapshots update. * fix: bump OPContractsManager version. * fix: snapshots update. * fix: pre-pr run * fix: wiring up Create2Salt as SaltMixer for DeployOPChainInput * fix: remove old standard-versions.toml file --- .../deployer/opcm/standard-versions.toml | 47 ------------------- 1 file changed, 47 deletions(-) delete mode 100644 op-chain-ops/deployer/opcm/standard-versions.toml diff --git a/op-chain-ops/deployer/opcm/standard-versions.toml b/op-chain-ops/deployer/opcm/standard-versions.toml deleted file mode 100644 index cb4d336a7336..000000000000 --- a/op-chain-ops/deployer/opcm/standard-versions.toml +++ /dev/null @@ -1,47 +0,0 @@ -standard_release = "op-contracts/v1.6.0" - -[releases] - -# Contracts which are -# * unproxied singletons: specify a standard "address" -# * proxied : specify a standard "implementation_address" -# * neither : specify neither a standard "address" nor "implementation_address" - -# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 -[releases."op-contracts/v1.6.0"] -optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } -system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } -anchor_state_registry = { version = "2.0.0" } -delayed_weth = { version = "1.1.0", implementation_address = "0x71e966Ae981d1ce531a7b6d23DC0f27B38409087" } -dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } -fault_dispute_game = { version = "1.3.0" } -permissioned_dispute_game = { version = "1.3.0" } -mips = { version = "1.1.0", address = "0x16e83cE5Ce29BF90AD9Da06D2fE6a15d5f344ce4" } -preimage_oracle = { version = "1.1.2", address = "0x9c065e11870B891D214Bc2Da7EF1f9DDFA1BE277" } -l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } -l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } -l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } -# l2_output_oracle -- This contract not used in fault proofs -optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } - -# Fault Proofs https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.4.0 -[releases."op-contracts/v1.4.0"] -optimism_portal = { version = "3.10.0", implementation_address = "0xe2F826324b2faf99E513D16D266c3F80aE87832B" } -system_config = { version = "2.2.0", implementation_address = "0xF56D96B2535B932656d3c04Ebf51baBff241D886" } -anchor_state_registry = { version = "1.0.0" } -delayed_weth = { version = "1.0.0", implementation_address = "0x97988d5624F1ba266E1da305117BCf20713bee08" } -dispute_game_factory = { version = "1.0.0", implementation_address = "0xc641A33cab81C559F2bd4b21EA34C290E2440C2B" } -fault_dispute_game = { version = "1.2.0" } -permissioned_dispute_game = { version = "1.2.0" } -mips = { version = "1.0.1", address = "0x0f8EdFbDdD3c0256A80AD8C0F2560B1807873C9c" } -preimage_oracle = { version = "1.0.0", address = "0xD326E10B8186e90F4E2adc5c13a2d0C137ee8b34" } - -# MCP https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.3.0 -[releases."op-contracts/v1.3.0"] -l1_cross_domain_messenger = { version = "2.3.0", implementation_address = "0xD3494713A5cfaD3F5359379DfA074E2Ac8C6Fd65" } -l1_erc721_bridge = { version = "2.1.0", implementation_address = "0xAE2AF01232a6c4a4d3012C5eC5b1b35059caF10d" } -l1_standard_bridge = { version = "2.1.0", implementation_address = "0x64B5a5Ed26DCb17370Ff4d33a8D503f0fbD06CfF" } -l2_output_oracle = { version = "1.8.0", implementation_address = "0xF243BEd163251380e78068d317ae10f26042B292" } -optimism_mintable_erc20_factory = { version = "1.9.0", implementation_address = "0xE01efbeb1089D1d1dB9c6c8b135C934C0734c846" } -optimism_portal = { version = "2.5.0", implementation_address = "0x2D778797049FE9259d947D1ED8e5442226dFB589" } -system_config = { version = "1.12.0", implementation_address = "0xba2492e52F45651B60B8B38d4Ea5E2390C64Ffb1" } From 5798c5f4905a33841086c43e9445689e8306324b Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Thu, 26 Sep 2024 18:01:36 -0400 Subject: [PATCH 055/116] fix: Temporarily stop deploying DelayedWETHPermissionlessGameProxy (#12156) * fix: Temporarily stop deploying DelayedWETHPermissionlessGameProxy * fix: removing TODOs from production contract. * fix: semver lock updated. * fix: bump OPContractsManager version. * fix: DeployOutput on OPContractsManager API will remain the same. --- .../deployer/integration_test/apply_test.go | 2 +- .../scripts/DeployOPChain.s.sol | 20 +++++++++----- packages/contracts-bedrock/semver-lock.json | 4 +-- .../src/L1/OPContractsManager.sol | 11 +++----- .../test/opcm/DeployOPChain.t.sol | 27 ++++++++++++------- 5 files changed, 37 insertions(+), 27 deletions(-) diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index ad22651fa36e..a9425f36d670 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -243,7 +243,7 @@ func validateOPChainDeployment(t *testing.T, ctx context.Context, l1Client *ethc {"FaultDisputeGameAddress", chainState.FaultDisputeGameAddress}, {"PermissionedDisputeGameAddress", chainState.PermissionedDisputeGameAddress}, {"DelayedWETHPermissionedGameProxyAddress", chainState.DelayedWETHPermissionedGameProxyAddress}, - {"DelayedWETHPermissionlessGameProxyAddress", chainState.DelayedWETHPermissionlessGameProxyAddress}, + // {"DelayedWETHPermissionlessGameProxyAddress", chainState.DelayedWETHPermissionlessGameProxyAddress}, } for _, addr := range chainAddrs { // TODO Delete this `if`` block once FaultDisputeGameAddress is deployed. diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 25b3447ad723..d4d35bbf347a 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -216,9 +216,11 @@ contract DeployOPChainOutput is BaseDeployIO { address(_anchorStateRegistryImpl), // address(_faultDisputeGame), address(_permissionedDisputeGame), - address(_delayedWETHPermissionedGameProxy), - address(_delayedWETHPermissionlessGameProxy) + address(_delayedWETHPermissionedGameProxy) ); + // TODO: Eventually switch from Permissioned to Permissionless. Add this address back in. + // address(_delayedWETHPermissionlessGameProxy) + DeployUtils.assertValidContractAddresses(Solarray.extend(addrs1, addrs2)); assertValidDeploy(_doi); @@ -295,7 +297,8 @@ contract DeployOPChainOutput is BaseDeployIO { } function delayedWETHPermissionlessGameProxy() public view returns (DelayedWETH) { - DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionlessGameProxy)); + // TODO: Eventually switch from Permissioned to Permissionless. Add this check back in. + // DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionlessGameProxy)); return _delayedWETHPermissionlessGameProxy; } @@ -518,7 +521,8 @@ contract DeployOPChain is Script { // vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); vm.label(address(deployOutput.permissionedDisputeGame), "permissionedDisputeGame"); vm.label(address(deployOutput.delayedWETHPermissionedGameProxy), "delayedWETHPermissionedGameProxy"); - vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); _doo.set(_doo.opChainProxyAdmin.selector, address(deployOutput.opChainProxyAdmin)); _doo.set(_doo.addressManager.selector, address(deployOutput.addressManager)); @@ -536,9 +540,11 @@ contract DeployOPChain is Script { // _doo.set(_doo.faultDisputeGame.selector, address(deployOutput.faultDisputeGame)); _doo.set(_doo.permissionedDisputeGame.selector, address(deployOutput.permissionedDisputeGame)); _doo.set(_doo.delayedWETHPermissionedGameProxy.selector, address(deployOutput.delayedWETHPermissionedGameProxy)); - _doo.set( - _doo.delayedWETHPermissionlessGameProxy.selector, address(deployOutput.delayedWETHPermissionlessGameProxy) - ); + // TODO: Eventually switch from Permissioned to Permissionless. + // _doo.set( + // _doo.delayedWETHPermissionlessGameProxy.selector, + // address(deployOutput.delayedWETHPermissionlessGameProxy) + // ); _doo.checkOutput(_doi); } diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 8670b4ff9d3a..86c72b1a45b5 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x8f00d4415fe9bef59c1aec5b6729105c686e0238ce947432b2b5a035589cff19", - "sourceCodeHash": "0x4b1cb591b22821ae7246fe47260e1ece74f2cb0463fb949de66fe2b6a986a32c" + "initCodeHash": "0xfc35bbfe19cb5345288d314ade85538a65ad213a7163133c0044b5556b180836", + "sourceCodeHash": "0x597bb234e83560d0e120b83334e152269d5fcdba2f8743bdd7594cc79098c15f" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 67e48e95702a..47aed81b8fd8 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -126,8 +126,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.10 - string public constant version = "1.0.0-beta.10"; + /// @custom:semver 1.0.0-beta.11 + string public constant version = "1.0.0-beta.11"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -282,10 +282,7 @@ contract OPContractsManager is ISemver, Initializable { Blueprint.deployFrom(blueprint.anchorStateRegistry, salt, abi.encode(output.disputeGameFactoryProxy)) ); - // We have two delayed WETH contracts per chain, one for each of the permissioned and permissionless games. - output.delayedWETHPermissionlessGameProxy = DelayedWETH( - payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionlessGame")) - ); + // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. output.delayedWETHPermissionedGameProxy = DelayedWETH( payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionedGame")) ); @@ -330,8 +327,8 @@ contract OPContractsManager is ISemver, Initializable { impl = getLatestImplementation("DelayedWETH"); data = encodeDelayedWETHInitializer(impl.initializer, _input); + // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionedGameProxy), impl.logic, data); - upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionlessGameProxy), impl.logic, data); // We set the initial owner to this contract, set game implementations, then transfer ownership. impl = getLatestImplementation("DisputeGameFactory"); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 7390b1bd110c..26c3977cb5dd 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -142,8 +142,9 @@ contract DeployOPChainOutput_Test is Test { FaultDisputeGame faultDisputeGame = FaultDisputeGame(makeAddr("faultDisputeGame")); PermissionedDisputeGame permissionedDisputeGame = PermissionedDisputeGame(makeAddr("permissionedDisputeGame")); DelayedWETH delayedWETHPermissionedGameProxy = DelayedWETH(payable(makeAddr("delayedWETHPermissionedGameProxy"))); - DelayedWETH delayedWETHPermissionlessGameProxy = - DelayedWETH(payable(makeAddr("delayedWETHPermissionlessGameProxy"))); + // TODO: Eventually switch from Permissioned to Permissionless. + // DelayedWETH delayedWETHPermissionlessGameProxy = + // DelayedWETH(payable(makeAddr("delayedWETHPermissionlessGameProxy"))); function setUp() public { doo = new DeployOPChainOutput(); @@ -164,7 +165,8 @@ contract DeployOPChainOutput_Test is Test { vm.etch(address(faultDisputeGame), hex"01"); vm.etch(address(permissionedDisputeGame), hex"01"); vm.etch(address(delayedWETHPermissionedGameProxy), hex"01"); - vm.etch(address(delayedWETHPermissionlessGameProxy), hex"01"); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.etch(address(delayedWETHPermissionlessGameProxy), hex"01"); doo.set(doo.opChainProxyAdmin.selector, address(opChainProxyAdmin)); doo.set(doo.addressManager.selector, address(addressManager)); @@ -180,7 +182,8 @@ contract DeployOPChainOutput_Test is Test { doo.set(doo.faultDisputeGame.selector, address(faultDisputeGame)); doo.set(doo.permissionedDisputeGame.selector, address(permissionedDisputeGame)); doo.set(doo.delayedWETHPermissionedGameProxy.selector, address(delayedWETHPermissionedGameProxy)); - doo.set(doo.delayedWETHPermissionlessGameProxy.selector, address(delayedWETHPermissionlessGameProxy)); + // TODO: Eventually switch from Permissioned to Permissionless. + // doo.set(doo.delayedWETHPermissionlessGameProxy.selector, address(delayedWETHPermissionlessGameProxy)); assertEq(address(opChainProxyAdmin), address(doo.opChainProxyAdmin()), "100"); assertEq(address(addressManager), address(doo.addressManager()), "200"); @@ -196,7 +199,9 @@ contract DeployOPChainOutput_Test is Test { assertEq(address(faultDisputeGame), address(doo.faultDisputeGame()), "1300"); assertEq(address(permissionedDisputeGame), address(doo.permissionedDisputeGame()), "1400"); assertEq(address(delayedWETHPermissionedGameProxy), address(doo.delayedWETHPermissionedGameProxy()), "1500"); - assertEq(address(delayedWETHPermissionlessGameProxy), address(doo.delayedWETHPermissionlessGameProxy()), "1600"); + // TODO: Eventually switch from Permissioned to Permissionless. + // assertEq(address(delayedWETHPermissionlessGameProxy), address(doo.delayedWETHPermissionlessGameProxy()), + // "1600"); } function test_getters_whenNotSet_revert() public { @@ -244,8 +249,9 @@ contract DeployOPChainOutput_Test is Test { vm.expectRevert(expectedErr); doo.delayedWETHPermissionedGameProxy(); - vm.expectRevert(expectedErr); - doo.delayedWETHPermissionlessGameProxy(); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.expectRevert(expectedErr); + // doo.delayedWETHPermissionlessGameProxy(); } function test_getters_whenAddrHasNoCode_reverts() public { @@ -308,9 +314,10 @@ contract DeployOPChainOutput_Test is Test { vm.expectRevert(expectedErr); doo.delayedWETHPermissionedGameProxy(); - doo.set(doo.delayedWETHPermissionlessGameProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.delayedWETHPermissionlessGameProxy(); + // TODO: Eventually switch from Permissioned to Permissionless. + // doo.set(doo.delayedWETHPermissionlessGameProxy.selector, emptyAddr); + // vm.expectRevert(expectedErr); + // doo.delayedWETHPermissionlessGameProxy(); } } From d83f12d3361ee401d42a7e4f9993d39984da4406 Mon Sep 17 00:00:00 2001 From: Francis Li Date: Thu, 26 Sep 2024 15:22:31 -0700 Subject: [PATCH 056/116] feat(op-conductor): implement startup handshake (#12047) * op-node waits establishes connection to conductor before starting in sequencer enabled mode * Added conductor enabled api to op-node * check node enabled conductor during conductor startup * update logs * Change back to lazy initialization * Add method not found check --- op-conductor/client/mocks/SequencerControl.go | 56 +++++++++++++++++++ op-conductor/client/sequencer.go | 6 ++ op-conductor/conductor/service.go | 20 +++++++ op-e2e/actions/helpers/l2_verifier.go | 4 ++ op-node/node/api.go | 8 +++ op-node/node/conductor.go | 7 ++- op-node/node/server_test.go | 4 ++ op-node/rollup/conductor/conductor.go | 7 +++ op-node/rollup/driver/state.go | 4 ++ op-node/rollup/sequencing/disabled.go | 4 ++ op-node/rollup/sequencing/iface.go | 1 + op-node/rollup/sequencing/sequencer.go | 6 +- op-node/rollup/sequencing/sequencer_test.go | 4 ++ op-service/sources/rollupclient.go | 6 ++ 14 files changed, 134 insertions(+), 3 deletions(-) diff --git a/op-conductor/client/mocks/SequencerControl.go b/op-conductor/client/mocks/SequencerControl.go index 7e48f6dbf0df..cd6e5ecbca0c 100644 --- a/op-conductor/client/mocks/SequencerControl.go +++ b/op-conductor/client/mocks/SequencerControl.go @@ -25,6 +25,62 @@ func (_m *SequencerControl) EXPECT() *SequencerControl_Expecter { return &SequencerControl_Expecter{mock: &_m.Mock} } +// ConductorEnabled provides a mock function with given fields: ctx +func (_m *SequencerControl) ConductorEnabled(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ConductorEnabled") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SequencerControl_ConductorEnabled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConductorEnabled' +type SequencerControl_ConductorEnabled_Call struct { + *mock.Call +} + +// ConductorEnabled is a helper method to define mock.On call +// - ctx context.Context +func (_e *SequencerControl_Expecter) ConductorEnabled(ctx interface{}) *SequencerControl_ConductorEnabled_Call { + return &SequencerControl_ConductorEnabled_Call{Call: _e.mock.On("ConductorEnabled", ctx)} +} + +func (_c *SequencerControl_ConductorEnabled_Call) Run(run func(ctx context.Context)) *SequencerControl_ConductorEnabled_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *SequencerControl_ConductorEnabled_Call) Return(_a0 bool, _a1 error) *SequencerControl_ConductorEnabled_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SequencerControl_ConductorEnabled_Call) RunAndReturn(run func(context.Context) (bool, error)) *SequencerControl_ConductorEnabled_Call { + _c.Call.Return(run) + return _c +} + // LatestUnsafeBlock provides a mock function with given fields: ctx func (_m *SequencerControl) LatestUnsafeBlock(ctx context.Context) (eth.BlockInfo, error) { ret := _m.Called(ctx) diff --git a/op-conductor/client/sequencer.go b/op-conductor/client/sequencer.go index 1099c84dbea0..0c2ae4c93ab0 100644 --- a/op-conductor/client/sequencer.go +++ b/op-conductor/client/sequencer.go @@ -18,6 +18,7 @@ type SequencerControl interface { SequencerActive(ctx context.Context) (bool, error) LatestUnsafeBlock(ctx context.Context) (eth.BlockInfo, error) PostUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error + ConductorEnabled(ctx context.Context) (bool, error) } // NewSequencerControl creates a new SequencerControl instance. @@ -59,3 +60,8 @@ func (s *sequencerController) SequencerActive(ctx context.Context) (bool, error) func (s *sequencerController) PostUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { return s.node.PostUnsafePayload(ctx, payload) } + +// ConductorEnabled implements SequencerControl. +func (s *sequencerController) ConductorEnabled(ctx context.Context) (bool, error) { + return s.node.ConductorEnabled(ctx) +} diff --git a/op-conductor/conductor/service.go b/op-conductor/conductor/service.go index d2eb4fe89d9d..f93314f5f70b 100644 --- a/op-conductor/conductor/service.go +++ b/op-conductor/conductor/service.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/httputil" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/retry" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" "github.com/ethereum-optimism/optimism/op-service/sources" ) @@ -140,6 +141,25 @@ func (c *OpConductor) initSequencerControl(ctx context.Context) error { node := sources.NewRollupClient(nc) c.ctrl = client.NewSequencerControl(exec, node) + enabled, err := retry.Do(ctx, 60, retry.Fixed(5*time.Second), func() (bool, error) { + enabled, err := c.ctrl.ConductorEnabled(ctx) + if rpcErr, ok := err.(rpc.Error); ok { + errCode := rpcErr.ErrorCode() + errText := strings.ToLower(err.Error()) + if errCode == -32601 || strings.Contains(errText, "method not found") { // method not found error + c.log.Warn("Warning: conductorEnabled method not found, please upgrade your op-node to the latest version, continuing...") + return true, nil + } + } + return enabled, err + }) + if err != nil { + return errors.Wrap(err, "failed to connect to sequencer") + } + if !enabled { + return errors.New("conductor is not enabled on sequencer, exiting...") + } + return c.updateSequencerActiveStatus() } diff --git a/op-e2e/actions/helpers/l2_verifier.go b/op-e2e/actions/helpers/l2_verifier.go index 1594e1eb368c..6f9d80169875 100644 --- a/op-e2e/actions/helpers/l2_verifier.go +++ b/op-e2e/actions/helpers/l2_verifier.go @@ -241,6 +241,10 @@ func (s *l2VerifierBackend) OnUnsafeL2Payload(ctx context.Context, envelope *eth return nil } +func (s *l2VerifierBackend) ConductorEnabled(ctx context.Context) (bool, error) { + return false, nil +} + func (s *L2Verifier) DerivationMetricsTracer() *testutils.TestDerivationMetrics { return s.derivationMetrics } diff --git a/op-node/node/api.go b/op-node/node/api.go index a94e2477fe16..ccd4a3b81bb3 100644 --- a/op-node/node/api.go +++ b/op-node/node/api.go @@ -34,6 +34,7 @@ type driverClient interface { SequencerActive(context.Context) (bool, error) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error OverrideLeader(ctx context.Context) error + ConductorEnabled(ctx context.Context) (bool, error) } type SafeDBReader interface { @@ -98,6 +99,13 @@ func (n *adminAPI) OverrideLeader(ctx context.Context) error { return n.dr.OverrideLeader(ctx) } +// ConductorEnabled returns true if the sequencer conductor is enabled. +func (n *adminAPI) ConductorEnabled(ctx context.Context) (bool, error) { + recordDur := n.M.RecordRPCServerRequest("admin_conductorEnabled") + defer recordDur() + return n.dr.ConductorEnabled(ctx) +} + type nodeAPI struct { config *rollup.Config client l2EthClient diff --git a/op-node/node/conductor.go b/op-node/node/conductor.go index 20e0638dc686..93bde641a453 100644 --- a/op-node/node/conductor.go +++ b/op-node/node/conductor.go @@ -32,7 +32,7 @@ type ConductorClient struct { var _ conductor.SequencerConductor = &ConductorClient{} // NewConductorClient returns a new conductor client for the op-conductor RPC service. -func NewConductorClient(cfg *Config, log log.Logger, metrics *metrics.Metrics) *ConductorClient { +func NewConductorClient(cfg *Config, log log.Logger, metrics *metrics.Metrics) conductor.SequencerConductor { return &ConductorClient{ cfg: cfg, metrics: metrics, @@ -53,6 +53,11 @@ func (c *ConductorClient) initialize() error { return nil } +// Enabled returns true if the conductor is enabled, and since the conductor client is initialized, the conductor is always enabled. +func (c *ConductorClient) Enabled(ctx context.Context) bool { + return true +} + // Leader returns true if this node is the leader sequencer. func (c *ConductorClient) Leader(ctx context.Context) (bool, error) { if c.overrideLeader.Load() { diff --git a/op-node/node/server_test.go b/op-node/node/server_test.go index 7063b3ed2807..f8722e272318 100644 --- a/op-node/node/server_test.go +++ b/op-node/node/server_test.go @@ -287,6 +287,10 @@ func (c *mockDriverClient) OverrideLeader(ctx context.Context) error { return c.Mock.MethodCalled("OverrideLeader").Get(0).(error) } +func (c *mockDriverClient) ConductorEnabled(ctx context.Context) (bool, error) { + return c.Mock.MethodCalled("ConductorEnabled").Get(0).(bool), nil +} + type mockSafeDBReader struct { mock.Mock } diff --git a/op-node/rollup/conductor/conductor.go b/op-node/rollup/conductor/conductor.go index 927d88035ccb..b668d5fb055f 100644 --- a/op-node/rollup/conductor/conductor.go +++ b/op-node/rollup/conductor/conductor.go @@ -9,6 +9,8 @@ import ( // SequencerConductor is an interface for the driver to communicate with the sequencer conductor. // It is used to determine if the current node is the active sequencer, and to commit unsafe payloads to the conductor log. type SequencerConductor interface { + // Enabled returns true if the conductor is enabled. + Enabled(ctx context.Context) bool // Leader returns true if this node is the leader sequencer. Leader(ctx context.Context) (bool, error) // CommitUnsafePayload commits an unsafe payload to the conductor FSM. @@ -24,6 +26,11 @@ type NoOpConductor struct{} var _ SequencerConductor = &NoOpConductor{} +// Enabled implements SequencerConductor. +func (c *NoOpConductor) Enabled(ctx context.Context) bool { + return false +} + // Leader returns true if this node is the leader sequencer. NoOpConductor always returns true. func (c *NoOpConductor) Leader(ctx context.Context) (bool, error) { return true, nil diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index 2840cedcf423..09f05f67e3b3 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -483,6 +483,10 @@ func (s *Driver) OverrideLeader(ctx context.Context) error { return s.sequencer.OverrideLeader(ctx) } +func (s *Driver) ConductorEnabled(ctx context.Context) (bool, error) { + return s.sequencer.ConductorEnabled(ctx), nil +} + // SyncStatus blocks the driver event loop and captures the syncing status. func (s *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { return s.statusTracker.SyncStatus(), nil diff --git a/op-node/rollup/sequencing/disabled.go b/op-node/rollup/sequencing/disabled.go index 3634284ccd2f..64d452828104 100644 --- a/op-node/rollup/sequencing/disabled.go +++ b/op-node/rollup/sequencing/disabled.go @@ -48,4 +48,8 @@ func (ds DisabledSequencer) OverrideLeader(ctx context.Context) error { return ErrSequencerNotEnabled } +func (ds DisabledSequencer) ConductorEnabled(ctx context.Context) bool { + return false +} + func (ds DisabledSequencer) Close() {} diff --git a/op-node/rollup/sequencing/iface.go b/op-node/rollup/sequencing/iface.go index 54e0c70719e0..c2e6fa7ab200 100644 --- a/op-node/rollup/sequencing/iface.go +++ b/op-node/rollup/sequencing/iface.go @@ -19,5 +19,6 @@ type SequencerIface interface { Stop(ctx context.Context) (hash common.Hash, err error) SetMaxSafeLag(ctx context.Context, v uint64) error OverrideLeader(ctx context.Context) error + ConductorEnabled(ctx context.Context) bool Close() } diff --git a/op-node/rollup/sequencing/sequencer.go b/op-node/rollup/sequencing/sequencer.go index e488300b49f8..538caafe4144 100644 --- a/op-node/rollup/sequencing/sequencer.go +++ b/op-node/rollup/sequencing/sequencer.go @@ -617,8 +617,6 @@ func (d *Sequencer) Init(ctx context.Context, active bool) error { d.emitter.Emit(engine.ForkchoiceRequestEvent{}) if active { - // TODO(#11121): should the conductor be checked on startup? - // The conductor was previously not being checked in this case, but that may be a bug. return d.forceStart() } else { if err := d.listener.SequencerStopped(); err != nil { @@ -712,6 +710,10 @@ func (d *Sequencer) OverrideLeader(ctx context.Context) error { return d.conductor.OverrideLeader(ctx) } +func (d *Sequencer) ConductorEnabled(ctx context.Context) bool { + return d.conductor.Enabled(ctx) +} + func (d *Sequencer) Close() { d.conductor.Close() d.asyncGossip.Stop() diff --git a/op-node/rollup/sequencing/sequencer_test.go b/op-node/rollup/sequencing/sequencer_test.go index 7b410e644ad2..3265711a0c46 100644 --- a/op-node/rollup/sequencing/sequencer_test.go +++ b/op-node/rollup/sequencing/sequencer_test.go @@ -105,6 +105,10 @@ type FakeConductor struct { var _ conductor.SequencerConductor = &FakeConductor{} +func (c *FakeConductor) Enabled(ctx context.Context) bool { + return true +} + func (c *FakeConductor) Leader(ctx context.Context) (bool, error) { return c.leader, nil } diff --git a/op-service/sources/rollupclient.go b/op-service/sources/rollupclient.go index acd0f84b3917..8ff6c54e23ef 100644 --- a/op-service/sources/rollupclient.go +++ b/op-service/sources/rollupclient.go @@ -74,6 +74,12 @@ func (r *RollupClient) OverrideLeader(ctx context.Context) error { return r.rpc.CallContext(ctx, nil, "admin_overrideLeader") } +func (r *RollupClient) ConductorEnabled(ctx context.Context) (bool, error) { + var result bool + err := r.rpc.CallContext(ctx, &result, "admin_conductorEnabled") + return result, err +} + func (r *RollupClient) SetLogLevel(ctx context.Context, lvl slog.Level) error { return r.rpc.CallContext(ctx, nil, "admin_setLogLevel", lvl.String()) } From 6202822a698e24ea214119b95cb4597114f7b770 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Thu, 26 Sep 2024 18:43:31 -0400 Subject: [PATCH 057/116] feat(opcm): Create and use new ISystemConfigV160 (#12163) * feat(opcm): Create and use new ISystemConfigV160 * feat(opcm): defaultSystemConfigParams in OPCM Interop * chore: semgrep * fix: handle different system config initializers * chore: revert interop changes since it uses the latest systemConfig * chore: semver lock * chore: semver locK --------- Co-authored-by: Matt Solomon --- .../scripts/DeployImplementations.s.sol | 15 ++- packages/contracts-bedrock/semver-lock.json | 4 +- .../src/L1/OPContractsManager.sol | 104 +++++++++++++++--- .../src/L1/interfaces/ISystemConfig.sol | 1 + .../src/L1/interfaces/ISystemConfigV160.sol | 85 ++++++++++++++ 5 files changed, 188 insertions(+), 21 deletions(-) create mode 100644 packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 3dffee5d32b0..bdcba80d712c 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -7,6 +7,7 @@ import { LibString } from "@solady/utils/LibString.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -479,7 +480,7 @@ contract DeployImplementations is Script { // --- OP Contracts Manager --- function opcmSystemConfigSetter( - DeployImplementationsInput, + DeployImplementationsInput _dii, DeployImplementationsOutput _dio ) internal @@ -487,9 +488,19 @@ contract DeployImplementations is Script { virtual returns (OPContractsManager.ImplementationSetter memory) { + // When configuring OPCM during Solidity tests, we are using the latest SystemConfig.sol + // version in this repo, which contains Custom Gas Token (CGT) features. This CGT version + // has a different `initialize` signature than the SystemConfig version that was released + // as part of `op-contracts/v1.6.0`, which is no longer in the repo. When running this + // script's bytecode for a production deploy of OPCM at `op-contracts/v1.6.0`, we need to + // use the ISystemConfigV160 interface instead of ISystemConfig. Therefore the selector used + // is a function of the `release` passed in by the caller. + bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") + ? ISystemConfigV160.initialize.selector + : SystemConfig.initialize.selector; return OPContractsManager.ImplementationSetter({ name: "SystemConfig", - info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), SystemConfig.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), selector) }); } diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 86c72b1a45b5..cf14e6995ca6 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xfc35bbfe19cb5345288d314ade85538a65ad213a7163133c0044b5556b180836", - "sourceCodeHash": "0x597bb234e83560d0e120b83334e152269d5fcdba2f8743bdd7594cc79098c15f" + "initCodeHash": "0x15ace86b1d389d02654392e10c3d444000f22e549ad2736cd5a869e4d862ddc8", + "sourceCodeHash": "0xffb7698ab51b7ea58460146de3d094d8e2f869c1425d289832855062c77692a8" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 47aed81b8fd8..1e81946578ae 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -12,6 +12,7 @@ import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; @@ -126,8 +127,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.11 - string public constant version = "1.0.0-beta.11"; + /// @custom:semver 1.0.0-beta.12 + string public constant version = "1.0.0-beta.12"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -309,7 +310,10 @@ contract OPContractsManager is ISemver, Initializable { data = encodeOptimismPortalInitializer(impl.initializer, output); upgradeAndCall(output.opChainProxyAdmin, address(output.optimismPortalProxy), impl.logic, data); + // First we upgrade the implementation so it's version can be retrieved, then we initialize + // it afterwards. See the comments in encodeSystemConfigInitializer to learn more. impl = getLatestImplementation("SystemConfig"); + output.opChainProxyAdmin.upgrade(payable(address(output.systemConfigProxy)), impl.logic); data = encodeSystemConfigInitializer(impl.initializer, _input, output); upgradeAndCall(output.opChainProxyAdmin, address(output.systemConfigProxy), impl.logic, data); @@ -450,21 +454,48 @@ contract OPContractsManager is ISemver, Initializable { virtual returns (bytes memory) { - (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) = - defaultSystemConfigParams(_selector, _input, _output); - - return abi.encodeWithSelector( - _selector, - _input.roles.systemConfigOwner, - _input.basefeeScalar, - _input.blobBasefeeScalar, - bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, // gasLimit, TODO should this be an input? - _input.roles.unsafeBlockSigner, - referenceResourceConfig, - chainIdToBatchInboxAddress(_input.l2ChainId), - opChainAddrs - ); + // We inspect the SystemConfig contract and determine it's signature here. This is required + // because this OPCM contract is being developed in a repository that no longer contains the + // SystemConfig contract that was released as part of `op-contracts/v1.6.0`, but in production + // it needs to support that version, in addition to the version currently on develop. + string memory semver = _output.systemConfigProxy.version(); + if (keccak256(abi.encode(semver)) == keccak256(abi.encode(string("2.2.0")))) { + // We are using the op-contracts/v1.6.0 SystemConfig contract. + ( + ResourceMetering.ResourceConfig memory referenceResourceConfig, + ISystemConfigV160.Addresses memory opChainAddrs + ) = defaultSystemConfigV160Params(_selector, _input, _output); + + return abi.encodeWithSelector( + _selector, + _input.roles.systemConfigOwner, + _input.basefeeScalar, + _input.blobBasefeeScalar, + bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash + 30_000_000, // gasLimit, TODO should this be an input? + _input.roles.unsafeBlockSigner, + referenceResourceConfig, + chainIdToBatchInboxAddress(_input.l2ChainId), + opChainAddrs + ); + } else { + // We are using the latest SystemConfig contract from the repo. + (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) + = defaultSystemConfigParams(_selector, _input, _output); + + return abi.encodeWithSelector( + _selector, + _input.roles.systemConfigOwner, + _input.basefeeScalar, + _input.blobBasefeeScalar, + bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash + 30_000_000, // gasLimit, TODO should this be an input? + _input.roles.unsafeBlockSigner, + referenceResourceConfig, + chainIdToBatchInboxAddress(_input.l2ChainId), + opChainAddrs + ); + } } /// @notice Helper method for encoding the OptimismMintableERC20Factory initializer data. @@ -612,6 +643,45 @@ contract OPContractsManager is ISemver, Initializable { assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); } + /// @notice Returns default, standard config arguments for the SystemConfig initializer. + /// This is used by subclasses to reduce code duplication. + function defaultSystemConfigV160Params( + bytes4, /* selector */ + DeployInput memory, /* _input */ + DeployOutput memory _output + ) + internal + view + virtual + returns ( + ResourceMetering.ResourceConfig memory resourceConfig_, + ISystemConfigV160.Addresses memory opChainAddrs_ + ) + { + // We use assembly to easily convert from IResourceMetering.ResourceConfig to ResourceMetering.ResourceConfig. + // This is required because we have not yet fully migrated the codebase to be interface-based. + IResourceMetering.ResourceConfig memory resourceConfig = Constants.DEFAULT_RESOURCE_CONFIG(); + assembly ("memory-safe") { + resourceConfig_ := resourceConfig + } + + opChainAddrs_ = ISystemConfigV160.Addresses({ + l1CrossDomainMessenger: address(_output.l1CrossDomainMessengerProxy), + l1ERC721Bridge: address(_output.l1ERC721BridgeProxy), + l1StandardBridge: address(_output.l1StandardBridgeProxy), + disputeGameFactory: address(_output.disputeGameFactoryProxy), + optimismPortal: address(_output.optimismPortalProxy), + optimismMintableERC20Factory: address(_output.optimismMintableERC20FactoryProxy) + }); + + assertValidContractAddress(opChainAddrs_.l1CrossDomainMessenger); + assertValidContractAddress(opChainAddrs_.l1ERC721Bridge); + assertValidContractAddress(opChainAddrs_.l1StandardBridge); + assertValidContractAddress(opChainAddrs_.disputeGameFactory); + assertValidContractAddress(opChainAddrs_.optimismPortal); + assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); + } + /// @notice Makes an external call to the target to initialize the proxy with the specified data. /// First performs safety checks to ensure the target, implementation, and proxy admin are valid. function upgradeAndCall( diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol index 37ab1512a031..a7c5434d048b 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfig.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.0; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +/// @notice This interface corresponds to the Custom Gas Token version of the SystemConfig contract. interface ISystemConfig { enum UpdateType { BATCHER, diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol new file mode 100644 index 000000000000..6bf3e9a9cb6a --- /dev/null +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; + +/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the SystemConfig +/// contract, which has a semver of 2.2.0 as specified in +/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +interface ISystemConfigV160 { + enum UpdateType { + BATCHER, + GAS_CONFIG, + GAS_LIMIT, + UNSAFE_BLOCK_SIGNER + } + + struct Addresses { + address l1CrossDomainMessenger; + address l1ERC721Bridge; + address l1StandardBridge; + address disputeGameFactory; + address optimismPortal; + address optimismMintableERC20Factory; + } + + event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); + event Initialized(uint8 version); + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + function BATCH_INBOX_SLOT() external view returns (bytes32); + function DISPUTE_GAME_FACTORY_SLOT() external view returns (bytes32); + function L1_CROSS_DOMAIN_MESSENGER_SLOT() external view returns (bytes32); + function L1_ERC_721_BRIDGE_SLOT() external view returns (bytes32); + function L1_STANDARD_BRIDGE_SLOT() external view returns (bytes32); + function OPTIMISM_MINTABLE_ERC20_FACTORY_SLOT() external view returns (bytes32); + function OPTIMISM_PORTAL_SLOT() external view returns (bytes32); + function START_BLOCK_SLOT() external view returns (bytes32); + function UNSAFE_BLOCK_SIGNER_SLOT() external view returns (bytes32); + function VERSION() external view returns (uint256); + function basefeeScalar() external view returns (uint32); + function batchInbox() external view returns (address addr_); + function batcherHash() external view returns (bytes32); + function blobbasefeeScalar() external view returns (uint32); + function disputeGameFactory() external view returns (address addr_); + function gasLimit() external view returns (uint64); + function gasPayingToken() external view returns (address addr_, uint8 decimals_); + function gasPayingTokenName() external view returns (string memory name_); + function gasPayingTokenSymbol() external view returns (string memory symbol_); + function initialize( + address _owner, + uint32 _basefeeScalar, + uint32 _blobbasefeeScalar, + bytes32 _batcherHash, + uint64 _gasLimit, + address _unsafeBlockSigner, + IResourceMetering.ResourceConfig memory _config, + address _batchInbox, + Addresses memory _addresses + ) + external; + function isCustomGasToken() external view returns (bool); + function l1CrossDomainMessenger() external view returns (address addr_); + function l1ERC721Bridge() external view returns (address addr_); + function l1StandardBridge() external view returns (address addr_); + function maximumGasLimit() external pure returns (uint64); + function minimumGasLimit() external view returns (uint64); + function optimismMintableERC20Factory() external view returns (address addr_); + function optimismPortal() external view returns (address addr_); + function overhead() external view returns (uint256); + function owner() external view returns (address); + function renounceOwnership() external; + function resourceConfig() external view returns (IResourceMetering.ResourceConfig memory); + function scalar() external view returns (uint256); + function setBatcherHash(bytes32 _batcherHash) external; + function setGasConfig(uint256 _overhead, uint256 _scalar) external; + function setGasConfigEcotone(uint32 _basefeeScalar, uint32 _blobbasefeeScalar) external; + function setGasLimit(uint64 _gasLimit) external; + function setUnsafeBlockSigner(address _unsafeBlockSigner) external; + function startBlock() external view returns (uint256 startBlock_); + function transferOwnership(address newOwner) external; // nosemgrep + function unsafeBlockSigner() external view returns (address addr_); + function version() external pure returns (string memory); + + function __constructor__() external; +} From 0561fd477d741db7ee0a61cb9b7a0e9543d7a3a1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 26 Sep 2024 16:46:35 -0600 Subject: [PATCH 058/116] op-supervisor: cleanup cross-L2 safety types (#12098) * op-supervisor: cleanup cross-L2 safety types * op-node: fix interop test --- op-e2e/actions/interop/interop_test.go | 6 ++-- op-node/rollup/interop/interop.go | 2 +- op-node/rollup/interop/interop_test.go | 4 +-- op-service/sources/supervisor_client.go | 2 +- op-supervisor/supervisor/backend/backend.go | 16 ++++----- .../supervisor/backend/db/safety_checkers.go | 6 ++-- op-supervisor/supervisor/types/types.go | 35 +++++++++++++------ 7 files changed, 42 insertions(+), 29 deletions(-) diff --git a/op-e2e/actions/interop/interop_test.go b/op-e2e/actions/interop/interop_test.go index 4015ffa29ce9..8badc474e944 100644 --- a/op-e2e/actions/interop/interop_test.go +++ b/op-e2e/actions/interop/interop_test.go @@ -42,7 +42,7 @@ func TestInteropVerifier(gt *testing.T) { ver.ActL2PipelineFull(t) l2ChainID := types.ChainIDFromBig(sd.RollupCfg.L2ChainID) - seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) + seqMockBackend.ExpectCheckBlock(l2ChainID, 1, types.LocalUnsafe, nil) // create an unsafe L2 block seq.ActL2StartBlock(t) seq.ActL2EndBlock(t) @@ -99,8 +99,8 @@ func TestInteropVerifier(gt *testing.T) { require.Equal(t, uint64(0), status.FinalizedL2.Number) // The verifier might not see the L2 block that was just derived from L1 as cross-verified yet. - verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) // for the local unsafe check - verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.Unsafe, nil) // for the local safe check + verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.LocalUnsafe, nil) // for the local unsafe check + verMockBackend.ExpectCheckBlock(l2ChainID, 1, types.LocalUnsafe, nil) // for the local safe check ver.ActL1HeadSignal(t) ver.ActL2PipelineFull(t) verMockBackend.AssertExpectations(t) diff --git a/op-node/rollup/interop/interop.go b/op-node/rollup/interop/interop.go index c6c170478f21..35b1a86e9635 100644 --- a/op-node/rollup/interop/interop.go +++ b/op-node/rollup/interop/interop.go @@ -101,7 +101,7 @@ func (d *InteropDeriver) OnEvent(ev event.Event) bool { break } switch blockSafety { - case types.CrossUnsafe, types.CrossSafe, types.CrossFinalized: + case types.CrossUnsafe, types.CrossSafe, types.Finalized: // Hold off on promoting higher than cross-unsafe, // this will happen once we verify it to be local-safe first. d.emitter.Emit(engine.PromoteCrossUnsafeEvent{Ref: candidate}) diff --git a/op-node/rollup/interop/interop_test.go b/op-node/rollup/interop/interop_test.go index 62b71140770e..a7aaedcae7a1 100644 --- a/op-node/rollup/interop/interop_test.go +++ b/op-node/rollup/interop/interop_test.go @@ -61,7 +61,7 @@ func TestInteropDeriver(t *testing.T) { firstLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, crossUnsafe, crossUnsafe.L1Origin) lastLocalUnsafe := testutils.NextRandomL2Ref(rng, 2, firstLocalUnsafe, firstLocalUnsafe.L1Origin) interopBackend.ExpectCheckBlock( - chainID, firstLocalUnsafe.Number, supervisortypes.Unsafe, nil) + chainID, firstLocalUnsafe.Number, supervisortypes.LocalUnsafe, nil) l2Source.ExpectL2BlockRefByNumber(firstLocalUnsafe.Number, firstLocalUnsafe, nil) interopDeriver.OnEvent(engine.CrossUnsafeUpdateEvent{ CrossUnsafe: crossUnsafe, @@ -122,7 +122,7 @@ func TestInteropDeriver(t *testing.T) { DerivedFrom: derivedFrom, }) interopBackend.ExpectCheckBlock( - chainID, firstLocalSafe.Number, supervisortypes.Safe, nil) + chainID, firstLocalSafe.Number, supervisortypes.LocalSafe, nil) l2Source.ExpectL2BlockRefByNumber(firstLocalSafe.Number, firstLocalSafe, nil) interopDeriver.OnEvent(engine.CrossSafeUpdateEvent{ CrossSafe: crossSafe, diff --git a/op-service/sources/supervisor_client.go b/op-service/sources/supervisor_client.go index db40e55ef472..ff702010daff 100644 --- a/op-service/sources/supervisor_client.go +++ b/op-service/sources/supervisor_client.go @@ -74,7 +74,7 @@ func (cl *SupervisorClient) CheckBlock(ctx context.Context, "supervisor_checkBlock", (*hexutil.U256)(&chainID), blockHash, hexutil.Uint64(blockNumber)) if err != nil { - return types.Unsafe, fmt.Errorf("failed to check Block %s:%d (chain %s): %w", blockHash, blockNumber, chainID, err) + return types.LocalUnsafe, fmt.Errorf("failed to check Block %s:%d (chain %s): %w", blockHash, blockNumber, chainID, err) } return result, nil } diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index f21217e82c42..1f020889f2f1 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -192,7 +192,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa logIdx := identifier.LogIndex _, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash) if errors.Is(err, logs.ErrFuture) { - return types.Unsafe, nil + return types.LocalUnsafe, nil } if errors.Is(err, logs.ErrConflict) { return types.Invalid, nil @@ -203,9 +203,9 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa safest := types.CrossUnsafe // at this point we have the log entry, and we can check if it is safe by various criteria for _, checker := range []db.SafetyChecker{ - db.NewSafetyChecker(types.Unsafe, su.db), - db.NewSafetyChecker(types.Safe, su.db), - db.NewSafetyChecker(types.Finalized, su.db), + db.NewSafetyChecker(db.Unsafe, su.db), + db.NewSafetyChecker(db.Safe, su.db), + db.NewSafetyChecker(db.Finalized, su.db), } { // check local safety limit first as it's more permissive localPtr := checker.LocalHead(chainID) @@ -248,7 +248,7 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common. id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)} _, err := su.db.FindSealedBlock(types.ChainID(*chainID), id) if errors.Is(err, logs.ErrFuture) { - return types.Unsafe, nil + return types.LocalUnsafe, nil } if errors.Is(err, logs.ErrConflict) { return types.Invalid, nil @@ -259,9 +259,9 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common. } // at this point we have the extent of the block, and we can check if it is safe by various criteria for _, checker := range []db.SafetyChecker{ - db.NewSafetyChecker(types.Unsafe, su.db), - db.NewSafetyChecker(types.Safe, su.db), - db.NewSafetyChecker(types.Finalized, su.db), + db.NewSafetyChecker(db.Unsafe, su.db), + db.NewSafetyChecker(db.Safe, su.db), + db.NewSafetyChecker(db.Finalized, su.db), } { // check local safety limit first as it's more permissive localPtr := checker.LocalHead(types.ChainID(*chainID)) diff --git a/op-supervisor/supervisor/backend/db/safety_checkers.go b/op-supervisor/supervisor/backend/db/safety_checkers.go index 745f74134662..cbf4e3ddd6d7 100644 --- a/op-supervisor/supervisor/backend/db/safety_checkers.go +++ b/op-supervisor/supervisor/backend/db/safety_checkers.go @@ -115,7 +115,7 @@ func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker { case Unsafe: return &checker{ chains: c, - localSafety: types.Unsafe, + localSafety: types.LocalUnsafe, crossSafety: types.CrossUnsafe, updateCross: c.heads.UpdateCrossUnsafe, updateLocal: c.heads.UpdateLocalUnsafe, @@ -127,7 +127,7 @@ func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker { case Safe: return &checker{ chains: c, - localSafety: types.Safe, + localSafety: types.LocalSafe, crossSafety: types.CrossSafe, updateCross: c.heads.UpdateCrossSafe, updateLocal: c.heads.UpdateLocalSafe, @@ -140,7 +140,7 @@ func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker { return &checker{ chains: c, localSafety: types.Finalized, - crossSafety: types.CrossFinalized, + crossSafety: types.Finalized, updateCross: c.heads.UpdateCrossFinalized, updateLocal: c.heads.UpdateLocalFinalized, crossHead: c.heads.CrossFinalized, diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index e89e8e9515bb..ea480afa8b3c 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -73,7 +73,7 @@ func (lvl SafetyLevel) String() string { func (lvl SafetyLevel) Valid() bool { switch lvl { - case CrossFinalized, Finalized, Safe, CrossUnsafe, Unsafe: + case Finalized, CrossSafe, LocalSafe, CrossUnsafe, LocalUnsafe: return true default: return false @@ -101,10 +101,10 @@ func (lvl *SafetyLevel) AtLeastAsSafe(min SafetyLevel) bool { switch min { case Invalid: return true - case Unsafe: + case CrossUnsafe: return *lvl != Invalid - case Safe: - return *lvl == Safe || *lvl == Finalized + case CrossSafe: + return *lvl == CrossSafe || *lvl == Finalized case Finalized: return *lvl == Finalized default: @@ -113,13 +113,26 @@ func (lvl *SafetyLevel) AtLeastAsSafe(min SafetyLevel) bool { } const ( - CrossFinalized SafetyLevel = "cross-finalized" - Finalized SafetyLevel = "finalized" - CrossSafe SafetyLevel = "cross-safe" - Safe SafetyLevel = "safe" - CrossUnsafe SafetyLevel = "cross-unsafe" - Unsafe SafetyLevel = "unsafe" - Invalid SafetyLevel = "invalid" + // Finalized is CrossSafe, with the additional constraint that every + // dependency is derived only from finalized L1 input data. + // This matches RPC label "finalized". + Finalized SafetyLevel = "finalized" + // CrossSafe is as safe as LocalSafe, with all its dependencies + // also fully verified to be reproducible from L1. + // This matches RPC label "safe". + CrossSafe SafetyLevel = "safe" + // LocalSafe is verified to be reproducible from L1, + // without any verified cross-L2 dependencies. + // This does not have an RPC label. + LocalSafe SafetyLevel = "local-safe" + // CrossUnsafe is as safe as LocalUnsafe, + // but with verified cross-L2 dependencies that are at least CrossUnsafe. + // This does not have an RPC label. + CrossUnsafe SafetyLevel = "cross-unsafe" + // LocalUnsafe is the safety of the tip of the chain. This matches RPC label "unsafe". + LocalUnsafe SafetyLevel = "unsafe" + // Invalid is the safety of when the message or block is not matching the expected data. + Invalid SafetyLevel = "invalid" ) type ChainID uint256.Int From c7ab63b7e763bff829e1fb095d2a0e74d907b94a Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Thu, 26 Sep 2024 19:06:35 -0400 Subject: [PATCH 059/116] fix: Update absolute prestate (#12161) * fix: Update absolute prestate * fix: just pre-pr * fix: absolute prestate updated as part of assertions. * chore: semver lock --------- Co-authored-by: Matt Solomon --- packages/contracts-bedrock/scripts/DeployOPChain.s.sol | 8 +++++++- packages/contracts-bedrock/semver-lock.json | 4 ++-- packages/contracts-bedrock/src/L1/OPContractsManager.sol | 6 +++--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index d4d35bbf347a..ddf9f39f3023 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -322,7 +322,13 @@ contract DeployOPChainOutput is BaseDeployIO { PermissionedDisputeGame game = permissionedDisputeGame(); require(GameType.unwrap(game.gameType()) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON), "DPG-10"); - require(Claim.unwrap(game.absolutePrestate()) == bytes32(hex"dead"), "DPG-20"); + // This hex string is the absolutePrestate of the latest op-program release, see where the + // `EXPECTED_PRESTATE_HASH` is defined in `config.yml`. + require( + Claim.unwrap(game.absolutePrestate()) + == bytes32(hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"), + "DPG-20" + ); OPContractsManager opcm = _doi.opcmProxy(); (address mips,) = opcm.implementations(opcm.latestRelease(), "MIPS"); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index cf14e6995ca6..86c46694dc61 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x15ace86b1d389d02654392e10c3d444000f22e549ad2736cd5a869e4d862ddc8", - "sourceCodeHash": "0xffb7698ab51b7ea58460146de3d094d8e2f869c1425d289832855062c77692a8" + "initCodeHash": "0x620481066bd0979c409ed9c089d32a1b7a05c610509222901ee3e73b0dc5565d", + "sourceCodeHash": "0x1bd201aef876cd32a34f8b100362df87ffaa0c1ddfbf5a19a5c43ced4c26d791" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 1e81946578ae..5a00bb6f6613 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -127,8 +127,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.12 - string public constant version = "1.0.0-beta.12"; + /// @custom:semver 1.0.0-beta.13 + string public constant version = "1.0.0-beta.13"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -592,7 +592,7 @@ contract OPContractsManager is ISemver, Initializable { { return abi.encode( GameType.wrap(1), // Permissioned Cannon - Claim.wrap(bytes32(hex"dead")), // absolutePrestate + Claim.wrap(bytes32(hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c")), // absolutePrestate 73, // maxGameDepth 30, // splitDepth Duration.wrap(3 hours), // clockExtension From 6163b3216075411348d6445b24573a5151032fcc Mon Sep 17 00:00:00 2001 From: Inphi Date: Thu, 26 Sep 2024 21:29:14 -0400 Subject: [PATCH 060/116] ci: Add Cannon STF verify recurring job (#12165) * ci: Add Cannon STF verify recurring job * setup remote docker * change make dir * fix cwd --- .circleci/config.yml | 30 ++++++++++++++++++++++++++++++ cannon/Dockerfile.diff | 34 ++++++++++++++++++++++++++++++++++ cannon/Makefile | 6 +++++- 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 cannon/Dockerfile.diff diff --git a/.circleci/config.yml b/.circleci/config.yml index 03b998a12203..7994feb7526a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1146,6 +1146,32 @@ jobs: - notify-failures-on-develop: mentions: "@proofs-squad" + cannon-stf-verify: + docker: + - image: <> + steps: + - checkout + - setup_remote_docker + - restore_cache: + name: Restore Go modules cache + key: gomod-{{ checksum "go.sum" }} + - restore_cache: + name: Restore Go build cache + keys: + - golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} + - golang-build-cache-cannon-stf-verify- + - run: + name: Build cannon + command: make cannon + - run: + name: Verify the Cannon STF + command: make -C ./cannon cannon-stf-verify + - save_cache: + name: Save Go build cache + key: golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} + paths: + - "/root/.cache/go-build" + devnet: machine: image: <> @@ -1867,6 +1893,9 @@ workflows: - cannon-prestate: requires: - go-mod-download + - cannon-stf-verify: + requires: + - go-mod-download - contracts-bedrock-build: skip_pattern: test context: @@ -1884,6 +1913,7 @@ workflows: requires: - contracts-bedrock-build - cannon-prestate + - cannon-stf-verify context: - slack diff --git a/cannon/Dockerfile.diff b/cannon/Dockerfile.diff new file mode 100644 index 000000000000..78384fa30010 --- /dev/null +++ b/cannon/Dockerfile.diff @@ -0,0 +1,34 @@ +FROM golang:1.21.3-alpine3.18 as builder + +RUN apk add --no-cache make bash + +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum + +WORKDIR /app + +RUN echo "go mod cache: $(go env GOMODCACHE)" +RUN echo "go build cache: $(go env GOCACHE)" + +RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download + +COPY . /app + +# We avoid copying the full .git dir into the build for just some metadata. +# Instead, specify: +# --build-arg GIT_COMMIT=$(git rev-parse HEAD) +# --build-arg GIT_DATE=$(git show -s --format='%ct') +ARG GIT_COMMIT +ARG GIT_DATE + +ARG TARGETOS TARGETARCH + +FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon:v1.0.0-alpha.2 AS cannon-v1 + +FROM --platform=$BUILDPLATFORM builder as cannon-verify +COPY --from=cannon-v1 /usr/local/bin/cannon /usr/local/bin/cannon-v1 +# verify the latest singlethreaded VM behavior against cannon-v1 +RUN cd cannon && make diff-singlethreaded-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v1 +RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && \ + make diff-singlethreaded-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v1 \ + GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE diff --git a/cannon/Makefile b/cannon/Makefile index e80de55b5e44..6a0275e16aca 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -65,6 +65,9 @@ diff-%-cannon: cannon elf exit 1; \ fi +cannon-stf-verify: + @docker build --progress plain -f Dockerfile.diff ../ + fuzz: # Common vm tests go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm/tests @@ -88,4 +91,5 @@ fuzz: test \ lint \ fuzz \ - diff-%-cannon + diff-%-cannon \ + cannon-stf-verify From 8be15505a3848b7ab6fafd1c44ea7bee47df3f2f Mon Sep 17 00:00:00 2001 From: tsnobster Date: Fri, 27 Sep 2024 03:56:34 +0200 Subject: [PATCH 061/116] Update CONTRIBUTING.md (#12164) --- packages/contracts-bedrock/CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/CONTRIBUTING.md b/packages/contracts-bedrock/CONTRIBUTING.md index a249ae5bedca..43f6a710747b 100644 --- a/packages/contracts-bedrock/CONTRIBUTING.md +++ b/packages/contracts-bedrock/CONTRIBUTING.md @@ -39,7 +39,7 @@ If you have any questions about the smart contracts, please feel free to ask the #### How Do I Submit a Good Enhancement Suggestion? -Enhancement suggestions are tracked as [GitHub issues](/issues). +Enhancement suggestions are tracked as [GitHub issues](https://github.com/ethereum-optimism/optimism/issues). - Use a **clear and descriptive title** for the issue to identify the suggestion. - Provide a **step-by-step** description of the suggested enhancement in as many details as possible. From a9c7f349d10b136529ab588b4679ec22d6074225 Mon Sep 17 00:00:00 2001 From: Karl Bartel Date: Fri, 27 Sep 2024 04:01:38 +0200 Subject: [PATCH 062/116] Use DA /put path from spec (#12081) * Accept /put path as described in spec In addition to the currently used `/put/` path to ease the migration. See https://github.com/ethereum-optimism/optimism/issues/11499. * alt-DA: write to /put path as described in spec The spec mandates using `/put` and not `/put/`, so that is what we should do. Warning: This will break DA solutions that only accept `/put/` at the moment. It is recommended that DA solutions support both paths for a while, so that updating OP-stack can happen independently of the exact DA implementation version. Closes https://github.com/ethereum-optimism/optimism/issues/11499. --- op-alt-da/daclient.go | 2 +- op-alt-da/damock.go | 2 +- op-alt-da/daserver.go | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/op-alt-da/daclient.go b/op-alt-da/daclient.go index 269b71f3c104..9f0bdab11fbd 100644 --- a/op-alt-da/daclient.go +++ b/op-alt-da/daclient.go @@ -119,7 +119,7 @@ func (c *DAClient) setInput(ctx context.Context, img []byte) (CommitmentData, er } body := bytes.NewReader(img) - url := fmt.Sprintf("%s/put/", c.url) + url := fmt.Sprintf("%s/put", c.url) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body) if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) diff --git a/op-alt-da/damock.go b/op-alt-da/damock.go index 0db129171a82..ad388d0b2653 100644 --- a/op-alt-da/damock.go +++ b/op-alt-da/damock.go @@ -141,7 +141,7 @@ func (s *FakeDAServer) Start() error { // Override the HandleGet/Put method registrations mux := http.NewServeMux() mux.HandleFunc("/get/", s.HandleGet) - mux.HandleFunc("/put/", s.HandlePut) + mux.HandleFunc("/put", s.HandlePut) s.httpServer.Handler = mux return nil } diff --git a/op-alt-da/daserver.go b/op-alt-da/daserver.go index 94446944b543..ccdc2a0cb4d3 100644 --- a/op-alt-da/daserver.go +++ b/op-alt-da/daserver.go @@ -54,6 +54,7 @@ func (d *DAServer) Start() error { mux.HandleFunc("/get/", d.HandleGet) mux.HandleFunc("/put/", d.HandlePut) + mux.HandleFunc("/put", d.HandlePut) d.httpServer.Handler = mux @@ -128,7 +129,7 @@ func (d *DAServer) HandlePut(w http.ResponseWriter, r *http.Request) { d.log.Info("PUT", "url", r.URL) route := path.Dir(r.URL.Path) - if route != "/put" { + if route != "/put" && r.URL.Path != "/put" { w.WriteHeader(http.StatusBadRequest) return } From 32a3637ef893e656fa4526880a4c3fa428115ccc Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Thu, 26 Sep 2024 23:58:32 -0400 Subject: [PATCH 063/116] fix(ct): correct constructor parameters (#12158) Certain constructor parameters were not being inserted correctly. Although this didn't cause a failure during deployment, it did cause Kontrol tests to fail. --- .../scripts/deploy/Deploy.s.sol | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 0787c965e199..4fec5729fca7 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -678,7 +678,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "SuperchainConfig", - _args: abi.encodeCall(ISuperchainConfig.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISuperchainConfig.__constructor__, ())) }) ); @@ -694,7 +694,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L1CrossDomainMessenger", - _args: abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ())) }) ); @@ -718,7 +718,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "OptimismPortal", - _args: abi.encodeCall(IOptimismPortal.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismPortal.__constructor__, ())) }); // Override the `OptimismPortal` contract to the deployed implementation. This is necessary @@ -778,7 +778,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L2OutputOracle", - _args: abi.encodeCall(IL2OutputOracle.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL2OutputOracle.__constructor__, ())) }) ); @@ -804,7 +804,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "OptimismMintableERC20Factory", - _args: abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ())) }) ); @@ -825,7 +825,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "DisputeGameFactory", - _args: abi.encodeCall(IDisputeGameFactory.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IDisputeGameFactory.__constructor__, ())) }) ); @@ -872,7 +872,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "ProtocolVersions", - _args: abi.encodeCall(IProtocolVersions.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProtocolVersions.__constructor__, ())) }) ); @@ -951,7 +951,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "SystemConfigInterop", - _args: abi.encodeCall(ISystemConfigInterop.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) }); save("SystemConfig", addr_); } else { @@ -959,7 +959,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "SystemConfig", - _args: abi.encodeCall(ISystemConfig.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) }); } @@ -978,7 +978,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L1StandardBridge", - _args: abi.encodeCall(IL1StandardBridge.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) }) ); @@ -999,7 +999,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "L1ERC721Bridge", - _args: abi.encodeCall(IL1ERC721Bridge.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) }) ); @@ -1035,7 +1035,7 @@ contract Deploy is Deployer { _save: this, _salt: _implSalt(), _name: "DataAvailabilityChallenge", - _args: abi.encodeCall(IDataAvailabilityChallenge.__constructor__, ()) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IDataAvailabilityChallenge.__constructor__, ())) }) ); addr_ = address(dac); From 169f808f4095620d2065f73d0de5d20a67e711f9 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 27 Sep 2024 08:18:27 -0600 Subject: [PATCH 064/116] Remove devnet tests (#12170) The devnet tests aren't providing a lot of value. They use our outdated Python devnet tooling, and only run a tiny subset of our overall test suite. They are also are flaky and slow to run. To prioritize improving the DevX of the monorepo, this PR removes the devnet tests from CCI. --- .circleci/config.yml | 191 ------------------------------------------- 1 file changed, 191 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7994feb7526a..6930b4ddd1e5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1172,185 +1172,6 @@ jobs: paths: - "/root/.cache/go-build" - devnet: - machine: - image: <> - parameters: - variant: - type: string - environment: - DOCKER_BUILDKIT: 1 - DEVNET_NO_BUILD: 'true' - # Default value; Can be overridden. - DEVNET_L2OO: 'false' - DEVNET_ALTDA: 'false' - steps: - - checkout - - attach_workspace: { at: "." } - - check-changed: - patterns: op-(.+),packages,ops-bedrock,bedrock-devnet - - when: - condition: - equal: ['altda', <>] - steps: - - run: - name: Set DEVNET_ALTDA = true - command: echo 'export DEVNET_ALTDA=true' >> $BASH_ENV - - when: - condition: - equal: ['altda-generic', <>] - steps: - - run: - name: Set DEVNET_ALTDA = true - command: echo 'export DEVNET_ALTDA=true' >> $BASH_ENV - - run: - name: Set GENERIC_ALTDA = true - command: echo 'export GENERIC_ALTDA=true' >> $BASH_ENV - - restore_cache: - name: Restore Go modules cache - key: gomod-{{ checksum "go.sum" }} - - restore_cache: - name: Restore Go build cache - keys: - - golang-build-cache-devnet-{{ checksum "go.sum" }} - - golang-build-cache-devnet- - - run: - name: Install latest golang - command: | - VER=$(jq -r .go < versions.json) - sudo rm -rf /usr/local/go - wget "https://go.dev/dl/go${VER}.linux-amd64.tar.gz" -O - | sudo tar -C /usr/local -xz - export PATH=$PATH:/usr/local/go/bin - go version - - run: - name: Install Geth - command: | - VER=$(jq -r .geth_release < versions.json) - wget "https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-${VER}.tar.gz" -O - | tar xz - sudo cp "geth-alltools-linux-amd64-${VER}"/* /usr/local/bin - - run: - name: Install eth2-testnet-genesis - command: | - go install -v github.com/protolambda/eth2-testnet-genesis@$(jq -r .eth2_testnet_genesis < versions.json) - - run: - name: foundryup - command: | - curl -L https://foundry.paradigm.xyz | bash - source $HOME/.bashrc - foundryup - echo 'export PATH=$HOME/.foundry/bin:$PATH' >> $BASH_ENV - source $HOME/.bashrc - forge --version - - run: - name: Install Just - command: | - VER=$(jq -r .just < versions.json) - curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to $HOME/bin --tag "${VER}" - echo 'export PATH="${PATH}:$HOME/bin"' >> $BASH_ENV - - install-contracts-dependencies - - when: - condition: - not: - equal: ['default', <>] - steps: - - run: - name: Use non-default devnet allocs - command: rm -r .devnet && mv .devnet-<> .devnet - - run: - name: Load and tag docker images - command: | - IMAGE_BASE_PREFIX="us-docker.pkg.dev/oplabs-tools-artifacts/images" - # Load from previous docker-build job - docker load < "./op-node.tar" - docker load < "./op-proposer.tar" - docker load < "./op-batcher.tar" - docker load < "./op-challenger.tar" - docker load < "./da-server.tar" - # rename to the tags that the docker-compose of the devnet expects - docker tag "$IMAGE_BASE_PREFIX/op-node:<>" "$IMAGE_BASE_PREFIX/op-node:devnet" - docker tag "$IMAGE_BASE_PREFIX/op-proposer:<>" "$IMAGE_BASE_PREFIX/op-proposer:devnet" - docker tag "$IMAGE_BASE_PREFIX/op-batcher:<>" "$IMAGE_BASE_PREFIX/op-batcher:devnet" - docker tag "$IMAGE_BASE_PREFIX/op-challenger:<>" "$IMAGE_BASE_PREFIX/op-challenger:devnet" - docker tag "$IMAGE_BASE_PREFIX/da-server:<>" "$IMAGE_BASE_PREFIX/da-server:devnet" - - run: - name: Bring up the stack - command: | - # Specify like this to avoid a forced rebuild of the contracts + devnet L1 - PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. - echo "Waiting for 10 seconds to give the devnet time to settle in..." - sleep 10 - - run: - name: Test the stack - command: make devnet-test - - run: - name: Dump op-node logs - command: | - docker logs ops-bedrock-op-node-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-geth logs - command: | - docker logs ops-bedrock-l2-1 || echo "No logs." - when: on_fail - - run: - name: Dump l1 logs - command: | - docker logs ops-bedrock-l1-1 || echo "No logs." - when: on_fail - - run: - name: Dump l1-bn logs - command: | - docker logs ops-bedrock-l1-bn-1 || echo "No logs." - when: on_fail - - run: - name: Dump l1-vc logs - command: | - docker logs ops-bedrock-l1-vc-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-batcher logs - command: | - docker logs ops-bedrock-op-batcher-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-proposer logs - command: | - docker logs ops-bedrock-op-proposer-1 || echo "No logs." - when: on_fail - - run: - name: Dump op-challenger logs - command: | - docker logs ops-bedrock-op-challenger-1 || echo "No logs." - when: on_fail - - run: - name: Dump da-server logs - command: | - docker logs ops-bedrock-da-server-1 || echo "No logs." - when: on_fail - - run: - name: Log deployment artifact - command: | - cat broadcast/Deploy.s.sol/900/run-latest.json || echo "No deployment file found" - when: on_fail - working_directory: packages/contracts-bedrock - - run: - name: Log devnet config - command: | - cat deploy-config/devnetL1.json || echo "No devnet config found" - when: on_fail - working_directory: packages/contracts-bedrock - - run: - name: Log artifacts directory - command: | - ls -R forge-artifacts || echo "No forge artifacts found" - when: on_fail - working_directory: packages/contracts-bedrock - - save_cache: - name: Save Go build cache - key: golang-build-cache-devnet-{{ checksum "go.sum" }} - paths: - - /home/circleci/.cache/go-build - semgrep-scan: parameters: diff_branch: @@ -1729,18 +1550,6 @@ workflows: - cannon-prestate: requires: - go-mod-download - - devnet: - matrix: - parameters: - variant: ["default", "altda", "altda-generic"] - requires: - - contracts-bedrock-build - - op-batcher-docker-build - - op-proposer-docker-build - - op-node-docker-build - - op-challenger-docker-build - - da-server-docker-build - - cannon-prestate - check-generated-mocks-op-node - check-generated-mocks-op-service - cannon-go-lint-and-test: From b0a4c11489abc7adfa4e801d6b06fb9e496c200f Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Fri, 27 Sep 2024 10:43:13 -0400 Subject: [PATCH 065/116] maint(ct): add leftover interfaces (#12167) Adds a few remaining interfaces required before we can migrate OPCM to use interfaces over source contracts. --- .../scripts/DeployImplementations.s.sol | 2 +- .../scripts/deploy/ChainAssertions.sol | 3 - .../scripts/deploy/Deploy.s.sol | 195 ++++++++++-------- .../scripts/fpac/FPACOPS.s.sol | 20 +- .../scripts/fpac/FPACOPS2.s.sol | 16 +- .../scripts/libraries/DeployUtils.sol | 6 +- .../periphery/deploy/DeployPeriphery.s.sol | 1 + packages/contracts-bedrock/semver-lock.json | 8 +- .../snapshots/abi/ProxyAdmin.json | 4 +- .../snapshots/storageLayout/ProxyAdmin.json | 2 +- .../src/L1/OPContractsManager.sol | 7 +- .../src/cannon/PreimageOracle.sol | 68 ++++-- .../src/cannon/interfaces/IPreimageOracle.sol | 5 + .../interfaces/IResolvedDelegateProxy.sol | 6 + .../src/universal/ProxyAdmin.sol | 27 ++- .../IOptimismMintableERC721Factory.sol | 20 ++ .../src/universal/interfaces/IProxy.sol | 19 ++ .../src/universal/interfaces/IProxyAdmin.sol | 34 +++ .../test/L1/DataAvailabilityChallenge.t.sol | 1 - .../test/L1/OptimismPortal.t.sol | 6 +- .../test/L1/OptimismPortal2.t.sol | 6 +- .../test/L1/ProtocolVersions.t.sol | 10 +- .../test/L1/ResourceMetering.t.sol | 11 +- .../test/L1/SystemConfig.t.sol | 1 - .../contracts-bedrock/test/cannon/MIPS.t.sol | 10 +- .../contracts-bedrock/test/cannon/MIPS2.t.sol | 12 +- .../test/dispute/FaultDisputeGame.t.sol | 12 +- .../dispute/PermissionedDisputeGame.t.sol | 2 +- .../test/mocks/AlphabetVM.sol | 9 +- .../OptimismMintableERC20Factory.t.sol | 4 +- .../test/universal/ProxyAdmin.t.sol | 14 +- 31 files changed, 355 insertions(+), 186 deletions(-) create mode 100644 packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol create mode 100644 packages/contracts-bedrock/src/universal/interfaces/IProxy.sol create mode 100644 packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index bdcba80d712c..5a0e6af0005e 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -885,7 +885,7 @@ contract DeployImplementations is Script { if (existingImplementation != address(0)) { singleton = MIPS(payable(existingImplementation)); } else if (isDevelopRelease(release)) { - IPreimageOracle preimageOracle = IPreimageOracle(_dio.preimageOracleSingleton()); + IPreimageOracle preimageOracle = IPreimageOracle(address(_dio.preimageOracleSingleton())); vm.broadcast(msg.sender); singleton = new MIPS(preimageOracle); } else { diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index 7e5a9164f466..af1b939014b0 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -10,9 +10,6 @@ import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; import { Deployer } from "scripts/deploy/Deployer.sol"; import { ISystemConfigV0 } from "scripts/interfaces/ISystemConfigV0.sol"; -// Contracts -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; - // Libraries import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 4fec5729fca7..eda0a5695eb2 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -26,25 +26,19 @@ import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Contracts -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; import { AddressManager } from "src/legacy/AddressManager.sol"; -import { Proxy } from "src/universal/Proxy.sol"; -import { StandardBridge } from "src/universal/StandardBridge.sol"; -import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; -import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; -import { MIPS } from "src/cannon/MIPS.sol"; -import { MIPS2 } from "src/cannon/MIPS2.sol"; import { StorageSetter } from "src/universal/StorageSetter.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Types } from "scripts/libraries/Types.sol"; -import "src/dispute/lib/Types.sol"; import { LibClaim, Duration } from "src/dispute/lib/LibUDT.sol"; +import "src/dispute/lib/Types.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; @@ -65,8 +59,13 @@ import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol" import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IMIPS2 } from "src/cannon/interfaces/IMIPS2.sol"; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IResolvedDelegateProxy } from "src/legacy/interfaces/IResolvedDelegateProxy.sol"; /// @title Deploy /// @notice Script used to deploy a bedrock system. The entire system is deployed within the `run` function. @@ -245,7 +244,7 @@ contract Deploy is Deployer { address proxyAdmin = mustGetAddress("ProxyAdmin"); bytes memory data = - abi.encodeCall(ProxyAdmin.upgradeAndCall, (payable(_proxy), _implementation, _innerCallData)); + abi.encodeCall(IProxyAdmin.upgradeAndCall, (payable(_proxy), _implementation, _innerCallData)); Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); _callViaSafe({ _safe: safe, _target: proxyAdmin, _data: data }); @@ -253,7 +252,7 @@ contract Deploy is Deployer { /// @notice Transfer ownership of the ProxyAdmin contract to the final system owner function transferProxyAdminOwnership() public broadcast { - ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); address owner = proxyAdmin.owner(); address safe = mustGetAddress("SystemOwnerSafe"); if (owner != safe) { @@ -267,7 +266,7 @@ contract Deploy is Deployer { /// have been performed on the proxy. /// @param _name The name of the proxy to transfer ownership of. function transferProxyToProxyAdmin(string memory _name) public broadcast { - Proxy proxy = Proxy(mustGetAddress(_name)); + IProxy proxy = IProxy(mustGetAddress(_name)); address proxyAdmin = mustGetAddress("ProxyAdmin"); proxy.changeAdmin(proxyAdmin); console.log("Proxy %s ownership transferred to ProxyAdmin at: %s", _name, proxyAdmin); @@ -301,11 +300,11 @@ contract Deploy is Deployer { console.log("Deploying a fresh OP Stack with existing SuperchainConfig and ProtocolVersions"); - Proxy scProxy = Proxy(_superchainConfigProxy); + IProxy scProxy = IProxy(_superchainConfigProxy); save("SuperchainConfig", scProxy.implementation()); save("SuperchainConfigProxy", _superchainConfigProxy); - Proxy pvProxy = Proxy(_protocolVersionsProxy); + IProxy pvProxy = IProxy(_protocolVersionsProxy); save("ProtocolVersions", pvProxy.implementation()); save("ProtocolVersionsProxy", _protocolVersionsProxy); @@ -569,19 +568,22 @@ contract Deploy is Deployer { /// @notice Deploy the ProxyAdmin function deployProxyAdmin() public broadcast returns (address addr_) { - console.log("Deploying ProxyAdmin"); - ProxyAdmin admin = new ProxyAdmin({ _owner: msg.sender }); + IProxyAdmin admin = IProxyAdmin( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "ProxyAdmin", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (msg.sender))) + }) + ); require(admin.owner() == msg.sender); - AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); + IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); if (admin.addressManager() != addressManager) { admin.setAddressManager(addressManager); } require(admin.addressManager() == addressManager); - - save("ProxyAdmin", address(admin)); - console.log("ProxyAdmin deployed at %s", address(admin)); addr_ = address(admin); } @@ -601,26 +603,36 @@ contract Deploy is Deployer { /// @notice Deploy the L1StandardBridgeProxy using a ChugSplashProxy function deployL1StandardBridgeProxy() public broadcast returns (address addr_) { - console.log("Deploying proxy for L1StandardBridge"); address proxyAdmin = mustGetAddress("ProxyAdmin"); - L1ChugSplashProxy proxy = new L1ChugSplashProxy(proxyAdmin); - + IL1ChugSplashProxy proxy = IL1ChugSplashProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "L1ChugSplashProxy", + _nick: "L1StandardBridgeProxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ChugSplashProxy.__constructor__, (proxyAdmin))) + }) + ); require(EIP1967Helper.getAdmin(address(proxy)) == proxyAdmin); - - save("L1StandardBridgeProxy", address(proxy)); - console.log("L1StandardBridgeProxy deployed at %s", address(proxy)); addr_ = address(proxy); } /// @notice Deploy the L1CrossDomainMessengerProxy using a ResolvedDelegateProxy function deployL1CrossDomainMessengerProxy() public broadcast returns (address addr_) { - console.log("Deploying proxy for L1CrossDomainMessenger"); - AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); - ResolvedDelegateProxy proxy = new ResolvedDelegateProxy(addressManager, "OVM_L1CrossDomainMessenger"); - - save("L1CrossDomainMessengerProxy", address(proxy)); - console.log("L1CrossDomainMessengerProxy deployed at %s", address(proxy)); - + IResolvedDelegateProxy proxy = IResolvedDelegateProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "ResolvedDelegateProxy", + _nick: "L1CrossDomainMessengerProxy", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IResolvedDelegateProxy.__constructor__, + (IAddressManager(mustGetAddress("AddressManager")), "OVM_L1CrossDomainMessenger") + ) + ) + }) + ); addr_ = address(proxy); } @@ -643,27 +655,32 @@ contract Deploy is Deployer { broadcast returns (address addr_) { - console.log(string.concat("Deploying ERC1967 proxy for ", _name)); - Proxy proxy = new Proxy({ _admin: _proxyOwner }); - + IProxy proxy = IProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: keccak256(abi.encode(_implSalt(), _name)), + _name: "Proxy", + _nick: _name, + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (_proxyOwner))) + }) + ); require(EIP1967Helper.getAdmin(address(proxy)) == _proxyOwner); - - save(_name, address(proxy)); - console.log(" at %s", address(proxy)); addr_ = address(proxy); } /// @notice Deploy the DataAvailabilityChallengeProxy function deployDataAvailabilityChallengeProxy() public broadcast returns (address addr_) { - console.log("Deploying proxy for DataAvailabilityChallenge"); address proxyAdmin = mustGetAddress("ProxyAdmin"); - Proxy proxy = new Proxy({ _admin: proxyAdmin }); - + IProxy proxy = IProxy( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "Proxy", + _nick: "DataAvailabilityChallengeProxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (proxyAdmin))) + }) + ); require(EIP1967Helper.getAdmin(address(proxy)) == proxyAdmin); - - save("DataAvailabilityChallengeProxy", address(proxy)); - console.log("DataAvailabilityChallengeProxy deployed at %s", address(proxy)); - addr_ = address(proxy); } @@ -888,43 +905,35 @@ contract Deploy is Deployer { /// @notice Deploy the PreimageOracle function deployPreimageOracle() public broadcast returns (address addr_) { - console.log("Deploying PreimageOracle implementation"); - PreimageOracle preimageOracle = new PreimageOracle{ salt: _implSalt() }({ - _minProposalSize: cfg.preimageOracleMinProposalSize(), - _challengePeriod: cfg.preimageOracleChallengePeriod() - }); - save("PreimageOracle", address(preimageOracle)); - console.log("PreimageOracle deployed at %s", address(preimageOracle)); - + IPreimageOracle preimageOracle = IPreimageOracle( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "PreimageOracle", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IPreimageOracle.__constructor__, + (cfg.preimageOracleMinProposalSize(), cfg.preimageOracleChallengePeriod()) + ) + ) + }) + ); addr_ = address(preimageOracle); } /// @notice Deploy Mips VM. Deploys either MIPS or MIPS2 depending on the environment function deployMips() public broadcast returns (address addr_) { - if (Config.useMultithreadedCannon()) { - addr_ = _deployMips2(); - } else { - addr_ = _deployMips(); - } + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: Config.useMultithreadedCannon() ? "MIPS2" : "MIPS", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IMIPS2.__constructor__, (IPreimageOracle(mustGetAddress("PreimageOracle")))) + ) + }); save("Mips", address(addr_)); } - /// @notice Deploy MIPS - function _deployMips() internal returns (address addr_) { - console.log("Deploying Mips implementation"); - MIPS mips = new MIPS{ salt: _implSalt() }(IPreimageOracle(mustGetAddress("PreimageOracle"))); - console.log("MIPS deployed at %s", address(mips)); - addr_ = address(mips); - } - - /// @notice Deploy MIPS2 - function _deployMips2() internal returns (address addr_) { - console.log("Deploying Mips2 implementation"); - MIPS2 mips2 = new MIPS2{ salt: _implSalt() }(IPreimageOracle(mustGetAddress("PreimageOracle"))); - console.log("MIPS2 deployed at %s", address(mips2)); - addr_ = address(mips2); - } - /// @notice Deploy the AnchorStateRegistry function deployAnchorStateRegistry() public broadcast returns (address addr_) { IAnchorStateRegistry anchorStateRegistry = IAnchorStateRegistry( @@ -1017,7 +1026,7 @@ contract Deploy is Deployer { /// @notice Transfer ownership of the address manager to the ProxyAdmin function transferAddressManagerOwnership() public broadcast { console.log("Transferring AddressManager ownership to ProxyAdmin"); - AddressManager addressManager = AddressManager(mustGetAddress("AddressManager")); + IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); address owner = addressManager.owner(); address proxyAdmin = mustGetAddress("ProxyAdmin"); if (owner != proxyAdmin) { @@ -1225,7 +1234,7 @@ contract Deploy is Deployer { /// @notice Initialize the L1StandardBridge function initializeL1StandardBridge() public broadcast { console.log("Upgrading and initializing L1StandardBridge proxy"); - ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); address l1StandardBridgeProxy = mustGetAddress("L1StandardBridgeProxy"); address l1StandardBridge = mustGetAddress("L1StandardBridge"); address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); @@ -1234,14 +1243,14 @@ contract Deploy is Deployer { uint256 proxyType = uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)); Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); - if (proxyType != uint256(ProxyAdmin.ProxyType.CHUGSPLASH)) { + if (proxyType != uint256(IProxyAdmin.ProxyType.CHUGSPLASH)) { _callViaSafe({ _safe: safe, _target: address(proxyAdmin), - _data: abi.encodeCall(ProxyAdmin.setProxyType, (l1StandardBridgeProxy, ProxyAdmin.ProxyType.CHUGSPLASH)) + _data: abi.encodeCall(IProxyAdmin.setProxyType, (l1StandardBridgeProxy, IProxyAdmin.ProxyType.CHUGSPLASH)) }); } - require(uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)) == uint256(ProxyAdmin.ProxyType.CHUGSPLASH)); + require(uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)) == uint256(IProxyAdmin.ProxyType.CHUGSPLASH)); _upgradeAndCallViaSafe({ _proxy: payable(l1StandardBridgeProxy), @@ -1309,7 +1318,7 @@ contract Deploy is Deployer { /// @notice initializeL1CrossDomainMessenger function initializeL1CrossDomainMessenger() public broadcast { console.log("Upgrading and initializing L1CrossDomainMessenger proxy"); - ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin")); + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); address l1CrossDomainMessenger = mustGetAddress("L1CrossDomainMessenger"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); @@ -1318,14 +1327,16 @@ contract Deploy is Deployer { uint256 proxyType = uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)); Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); - if (proxyType != uint256(ProxyAdmin.ProxyType.RESOLVED)) { + if (proxyType != uint256(IProxyAdmin.ProxyType.RESOLVED)) { _callViaSafe({ _safe: safe, _target: address(proxyAdmin), - _data: abi.encodeCall(ProxyAdmin.setProxyType, (l1CrossDomainMessengerProxy, ProxyAdmin.ProxyType.RESOLVED)) + _data: abi.encodeCall( + IProxyAdmin.setProxyType, (l1CrossDomainMessengerProxy, IProxyAdmin.ProxyType.RESOLVED) + ) }); } - require(uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)) == uint256(ProxyAdmin.ProxyType.RESOLVED)); + require(uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)) == uint256(IProxyAdmin.ProxyType.RESOLVED)); string memory contractName = "OVM_L1CrossDomainMessenger"; string memory implName = proxyAdmin.implementationName(l1CrossDomainMessenger); @@ -1333,7 +1344,7 @@ contract Deploy is Deployer { _callViaSafe({ _safe: safe, _target: address(proxyAdmin), - _data: abi.encodeCall(ProxyAdmin.setImplementationName, (l1CrossDomainMessengerProxy, contractName)) + _data: abi.encodeCall(IProxyAdmin.setImplementationName, (l1CrossDomainMessengerProxy, contractName)) }); } require( @@ -1648,7 +1659,7 @@ contract Deploy is Deployer { weth: weth, gameType: GameTypes.ALPHABET, absolutePrestate: outputAbsolutePrestate, - faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, PreimageOracle(mustGetAddress("PreimageOracle")))), + faultVm: IBigStepper(new AlphabetVM(outputAbsolutePrestate, IPreimageOracle(mustGetAddress("PreimageOracle")))), // The max depth for the alphabet trace is always 3. Add 1 because split depth is fully inclusive. maxGameDepth: cfg.faultGameSplitDepth() + 3 + 1, maxClockDuration: Duration.wrap(uint64(cfg.faultGameMaxClockDuration())) @@ -1663,7 +1674,17 @@ contract Deploy is Deployer { IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); Claim outputAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())); - PreimageOracle fastOracle = new PreimageOracle(cfg.preimageOracleMinProposalSize(), 0); + IPreimageOracle fastOracle = IPreimageOracle( + DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "PreimageOracle", + _nick: "FastPreimageOracle", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IPreimageOracle.__constructor__, (cfg.preimageOracleMinProposalSize(), 0)) + ) + }) + ); _setFaultGameImplementation({ _factory: factory, _allowUpgrade: _allowUpgrade, diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol index 2dc07b525bd0..a5064cdd0a17 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol @@ -5,13 +5,11 @@ pragma solidity 0.8.15; import { StdAssertions } from "forge-std/StdAssertions.sol"; import "scripts/deploy/Deploy.s.sol"; -// Contracts -import { Proxy } from "src/universal/Proxy.sol"; - // Libraries import "src/dispute/lib/Types.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; @@ -78,7 +76,7 @@ contract FPACOPS is Deploy, StdAssertions { console.log("Initializing DisputeGameFactoryProxy with DisputeGameFactory."); address dgfProxy = mustGetAddress("DisputeGameFactoryProxy"); - Proxy(payable(dgfProxy)).upgradeToAndCall( + IProxy(payable(dgfProxy)).upgradeToAndCall( mustGetAddress("DisputeGameFactory"), abi.encodeCall(IDisputeGameFactory.initialize, msg.sender) ); @@ -93,7 +91,7 @@ contract FPACOPS is Deploy, StdAssertions { address wethProxy = mustGetAddress("DelayedWETHProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - Proxy(payable(wethProxy)).upgradeToAndCall( + IProxy(payable(wethProxy)).upgradeToAndCall( mustGetAddress("DelayedWETH"), abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) ); @@ -121,7 +119,7 @@ contract FPACOPS is Deploy, StdAssertions { }); address asrProxy = mustGetAddress("AnchorStateRegistryProxy"); - Proxy(payable(asrProxy)).upgradeToAndCall( + IProxy(payable(asrProxy)).upgradeToAndCall( mustGetAddress("AnchorStateRegistry"), abi.encodeCall(IAnchorStateRegistry.initialize, (roots, superchainConfig)) ); @@ -136,7 +134,7 @@ contract FPACOPS is Deploy, StdAssertions { dgf.transferOwnership(_systemOwnerSafe); // Transfer the admin rights of the DisputeGameFactoryProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(dgf))); + IProxy prox = IProxy(payable(address(dgf))); prox.changeAdmin(_proxyAdmin); } @@ -149,7 +147,7 @@ contract FPACOPS is Deploy, StdAssertions { weth.transferOwnership(_systemOwnerSafe); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(weth))); + IProxy prox = IProxy(payable(address(weth))); prox.changeAdmin(_proxyAdmin); } @@ -158,7 +156,7 @@ contract FPACOPS is Deploy, StdAssertions { IAnchorStateRegistry asr = IAnchorStateRegistry(mustGetAddress("AnchorStateRegistryProxy")); // Transfer the admin rights of the AnchorStateRegistryProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(asr))); + IProxy prox = IProxy(payable(address(asr))); prox.changeAdmin(_proxyAdmin); } @@ -182,11 +180,11 @@ contract FPACOPS is Deploy, StdAssertions { // Check the config elements in the deployed contracts. ChainAssertions.checkOptimismPortal2(contracts, cfg, false); - PreimageOracle oracle = PreimageOracle(mustGetAddress("PreimageOracle")); + IPreimageOracle oracle = IPreimageOracle(mustGetAddress("PreimageOracle")); assertEq(oracle.minProposalSize(), cfg.preimageOracleMinProposalSize()); assertEq(oracle.challengePeriod(), cfg.preimageOracleChallengePeriod()); - MIPS mips = MIPS(mustGetAddress("Mips")); + IMIPS mips = IMIPS(mustGetAddress("Mips")); assertEq(address(mips.oracle()), address(oracle)); // Check the AnchorStateRegistry configuration. diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol index 5408d9acb151..f28bc214cd2a 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol @@ -5,13 +5,11 @@ pragma solidity 0.8.15; import { StdAssertions } from "forge-std/StdAssertions.sol"; import "scripts/deploy/Deploy.s.sol"; -// Contracts -import { Proxy } from "src/universal/Proxy.sol"; - // Libraries import "src/dispute/lib/Types.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; @@ -152,7 +150,7 @@ contract FPACOPS2 is Deploy, StdAssertions { address wethProxy = mustGetAddress("DelayedWETHProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - Proxy(payable(wethProxy)).upgradeToAndCall( + IProxy(payable(wethProxy)).upgradeToAndCall( mustGetAddress("DelayedWETH"), abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) ); @@ -164,7 +162,7 @@ contract FPACOPS2 is Deploy, StdAssertions { address wethProxy = mustGetAddress("PermissionedDelayedWETHProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - Proxy(payable(wethProxy)).upgradeToAndCall( + IProxy(payable(wethProxy)).upgradeToAndCall( mustGetAddress("DelayedWETH"), abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) ); @@ -181,7 +179,7 @@ contract FPACOPS2 is Deploy, StdAssertions { weth.transferOwnership(_systemOwnerSafe); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(weth))); + IProxy prox = IProxy(payable(address(weth))); prox.changeAdmin(_proxyAdmin); } @@ -196,7 +194,7 @@ contract FPACOPS2 is Deploy, StdAssertions { weth.transferOwnership(_systemOwnerSafe); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. - Proxy prox = Proxy(payable(address(weth))); + IProxy prox = IProxy(payable(address(weth))); prox.changeAdmin(_proxyAdmin); } @@ -225,12 +223,12 @@ contract FPACOPS2 is Deploy, StdAssertions { ChainAssertions.checkPermissionedDelayedWETH(contracts, cfg, true, _systemOwnerSafe); // Verify PreimageOracle configuration. - PreimageOracle oracle = PreimageOracle(mustGetAddress("PreimageOracle")); + IPreimageOracle oracle = IPreimageOracle(mustGetAddress("PreimageOracle")); assertEq(oracle.minProposalSize(), cfg.preimageOracleMinProposalSize()); assertEq(oracle.challengePeriod(), cfg.preimageOracleChallengePeriod()); // Verify MIPS configuration. - MIPS mips = MIPS(mustGetAddress("Mips")); + IMIPS mips = IMIPS(mustGetAddress("Mips")); assertEq(address(mips.oracle()), address(oracle)); // Grab ASR diff --git a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol index 7b078b45e2d4..3654424696b7 100644 --- a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol +++ b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol @@ -10,8 +10,8 @@ import { Artifacts } from "scripts/Artifacts.s.sol"; import { LibString } from "@solady/utils/LibString.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -// Contracts -import { Proxy } from "src/universal/Proxy.sol"; +// Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; library DeployUtils { Vm internal constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); @@ -221,7 +221,7 @@ library DeployUtils { // We prank as the zero address due to the Proxy's `proxyCallIfNotAdmin` modifier. // Pranking inside this function also means it can no longer be considered `view`. vm.prank(address(0)); - address implementation = Proxy(payable(_proxy)).implementation(); + address implementation = IProxy(payable(_proxy)).implementation(); assertValidContractAddress(implementation); } diff --git a/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol b/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol index 8f4b86b95d62..ea7cf2f4f5c3 100644 --- a/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol +++ b/packages/contracts-bedrock/scripts/periphery/deploy/DeployPeriphery.s.sol @@ -1,5 +1,6 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// TODO: Migrate this script to use DeployUtils import { console2 as console } from "forge-std/console2.sol"; import { Script } from "forge-std/Script.sol"; diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 86c46694dc61..728e761ed8b8 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x620481066bd0979c409ed9c089d32a1b7a05c610509222901ee3e73b0dc5565d", - "sourceCodeHash": "0x1bd201aef876cd32a34f8b100362df87ffaa0c1ddfbf5a19a5c43ced4c26d791" + "initCodeHash": "0x944deadee322fdbae8a8fffd16deceb3766509cfb54da06adb8aa84473f79f53", + "sourceCodeHash": "0x1a48119cbc0b778a4dd3454179060b71361ba44b61af1ac6398cc9274bb5e89f" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", @@ -148,8 +148,8 @@ "sourceCodeHash": "0x50ed780b621521047ed36ffb260032f2e5ec287f3e1ab3d742c7de45febb280d" }, "src/cannon/PreimageOracle.sol": { - "initCodeHash": "0x801e52f9c8439fcf7089575fa93272dfb874641dbfc7d82f36d979c987271c0b", - "sourceCodeHash": "0xdb9421a552e6d7581b3db9e4c2a02d8210ad6ca66ba0f8703d77f7cd4b8e132b" + "initCodeHash": "0x3690e6dafe588c29de74790123bf6de5b0f741869bf5dbd8a122fdef96cab733", + "sourceCodeHash": "0x19b48b7d5fcb296cacf0cb15326b2e12a9556d6d811a16cbe2344792afa30427" }, "src/dispute/AnchorStateRegistry.sol": { "initCodeHash": "0x13d00eef8c3f769863fc766180acc8586f5da309ca0a098e67d4d90bd3243341", diff --git a/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json b/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json index fbf37d51b40d..0cd7aff17952 100644 --- a/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json +++ b/packages/contracts-bedrock/snapshots/abi/ProxyAdmin.json @@ -15,7 +15,7 @@ "name": "addressManager", "outputs": [ { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "", "type": "address" } @@ -171,7 +171,7 @@ { "inputs": [ { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "_address", "type": "address" } diff --git a/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json b/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json index 70f8300e6bed..a0b6f46bf85e 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/ProxyAdmin.json @@ -25,7 +25,7 @@ "label": "addressManager", "offset": 0, "slot": "3", - "type": "contract AddressManager" + "type": "contract IAddressManager" }, { "bytes": "1", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 5a00bb6f6613..7107fd4fe091 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -13,6 +13,7 @@ import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; @@ -127,8 +128,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.13 - string public constant version = "1.0.0-beta.13"; + /// @custom:semver 1.0.0-beta.14 + string public constant version = "1.0.0-beta.14"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -240,7 +241,7 @@ contract OPContractsManager is ISemver, Initializable { output.addressManager = AddressManager(Blueprint.deployFrom(blueprint.addressManager, salt)); output.opChainProxyAdmin = ProxyAdmin(Blueprint.deployFrom(blueprint.proxyAdmin, salt, abi.encode(address(this)))); - output.opChainProxyAdmin.setAddressManager(output.addressManager); + output.opChainProxyAdmin.setAddressManager(IAddressManager(address(output.addressManager))); // -------- Deploy Proxy Contracts -------- diff --git a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol index c3fbc2b498ad..ff1affefc6f5 100644 --- a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol +++ b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol @@ -1,18 +1,20 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { PreimageKeyLib } from "./PreimageKeyLib.sol"; +// Libraries import { LibKeccak } from "@lib-keccak/LibKeccak.sol"; +import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import "src/cannon/libraries/CannonErrors.sol"; import "src/cannon/libraries/CannonTypes.sol"; +// Interfaces +import { ISemver } from "src/universal/interfaces/ISemver.sol"; + /// @title PreimageOracle /// @notice A contract for storing permissioned pre-images. /// @custom:attribution Solady /// @custom:attribution Beacon Deposit Contract <0x00000000219ab540356cbb839cbe05303d7705fa> -contract PreimageOracle is IPreimageOracle, ISemver { +contract PreimageOracle is ISemver { //////////////////////////////////////////////////////////////// // Constants & Immutables // //////////////////////////////////////////////////////////////// @@ -31,8 +33,8 @@ contract PreimageOracle is IPreimageOracle, ISemver { uint256 public constant PRECOMPILE_CALL_RESERVED_GAS = 100_000; /// @notice The semantic version of the Preimage Oracle contract. - /// @custom:semver 1.1.3-beta.2 - string public constant version = "1.1.3-beta.2"; + /// @custom:semver 1.1.3-beta.3 + string public constant version = "1.1.3-beta.3"; //////////////////////////////////////////////////////////////// // Authorized Preimage Parts // @@ -107,7 +109,11 @@ contract PreimageOracle is IPreimageOracle, ISemver { // Standard Preimage Route (External) // //////////////////////////////////////////////////////////////// - /// @inheritdoc IPreimageOracle + /// @notice Reads a preimage from the oracle. + /// @param _key The key of the preimage to read. + /// @param _offset The offset of the preimage to read. + /// @return dat_ The preimage data. + /// @return datLen_ The length of the preimage data. function readPreimage(bytes32 _key, uint256 _offset) external view returns (bytes32 dat_, uint256 datLen_) { require(preimagePartOk[_key][_offset], "pre-image must exist"); @@ -123,7 +129,27 @@ contract PreimageOracle is IPreimageOracle, ISemver { dat_ = preimageParts[_key][_offset]; } - /// @inheritdoc IPreimageOracle + /// @notice Loads of local data part into the preimage oracle. + /// @param _ident The identifier of the local data. + /// @param _localContext The local key context for the preimage oracle. Optionally, can be set as a constant + /// if the caller only requires one set of local keys. + /// @param _word The local data word. + /// @param _size The number of bytes in `_word` to load. + /// @param _partOffset The offset of the local data part to write to the oracle. + /// @dev The local data parts are loaded into the preimage oracle under the context + /// of the caller - no other account can write to the caller's context + /// specific data. + /// + /// There are 5 local data identifiers: + /// ┌────────────┬────────────────────────┐ + /// │ Identifier │ Data │ + /// ├────────────┼────────────────────────┤ + /// │ 1 │ L1 Head Hash (bytes32) │ + /// │ 2 │ Output Root (bytes32) │ + /// │ 3 │ Root Claim (bytes32) │ + /// │ 4 │ L2 Block Number (u64) │ + /// │ 5 │ Chain ID (u64) │ + /// └────────────┴────────────────────────┘ function loadLocalData( uint256 _ident, bytes32 _localContext, @@ -163,7 +189,10 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key_] = _size; } - /// @inheritdoc IPreimageOracle + /// @notice Prepares a preimage to be read by keccak256 key, starting at the given offset and up to 32 bytes + /// (clipped at preimage length, if out of data). + /// @param _partOffset The offset of the preimage to read. + /// @param _preimage The preimage data. function loadKeccak256PreimagePart(uint256 _partOffset, bytes calldata _preimage) external { uint256 size; bytes32 key; @@ -198,7 +227,10 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key] = size; } - /// @inheritdoc IPreimageOracle + /// @notice Prepares a preimage to be read by sha256 key, starting at the given offset and up to 32 bytes + /// (clipped at preimage length, if out of data). + /// @param _partOffset The offset of the preimage to read. + /// @param _preimage The preimage data. function loadSha256PreimagePart(uint256 _partOffset, bytes calldata _preimage) external { uint256 size; bytes32 key; @@ -247,7 +279,13 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key] = size; } - /// @inheritdoc IPreimageOracle + /// @notice Verifies that `p(_z) = _y` given `_commitment` that corresponds to the polynomial `p(x)` and a KZG + // proof. The value `y` is the pre-image, and the preimage key is `5 ++ keccak256(_commitment ++ z)[1:]`. + /// @param _z Big endian point value. Part of the preimage key. + /// @param _y Big endian point value. The preimage for the key. + /// @param _commitment The commitment to the polynomial. 48 bytes, part of the preimage key. + /// @param _proof The KZG proof, part of the preimage key. + /// @param _partOffset The offset of the preimage to store. function loadBlobPreimagePart( uint256 _z, uint256 _y, @@ -338,7 +376,13 @@ contract PreimageOracle is IPreimageOracle, ISemver { preimageLengths[key] = 32; } - /// @inheritdoc IPreimageOracle + /// @notice Prepares a precompile result to be read by a precompile key for the specified offset. + /// The precompile result data is a concatenation of the precompile call status byte and its return data. + /// The preimage key is `6 ++ keccak256(precompile ++ input)[1:]`. + /// @param _partOffset The offset of the precompile result being loaded. + /// @param _precompile The precompile address + /// @param _requiredGas The gas required to fully execute an L1 precompile. + /// @param _input The input to the precompile call. function loadPrecompilePreimagePart( uint256 _partOffset, address _precompile, diff --git a/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol b/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol index 79ee56f821c9..4a885d3dd03b 100644 --- a/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol +++ b/packages/contracts-bedrock/src/cannon/interfaces/IPreimageOracle.sol @@ -88,4 +88,9 @@ interface IPreimageOracle { bytes calldata _input ) external; + + /// @notice Returns the minimum size (in bytes) of a large preimage proposal. + function minProposalSize() external view returns (uint256); + + function __constructor__(uint256 _minProposalSize, uint256 _challengePeriod) external; } diff --git a/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol b/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol index abeb3817d9be..b3201ff0b1c7 100644 --- a/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol +++ b/packages/contracts-bedrock/src/legacy/interfaces/IResolvedDelegateProxy.sol @@ -1,8 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; + /// @title IResolvedDelegateProxy /// @notice Interface for the ResolvedDelegateProxy contract. interface IResolvedDelegateProxy { fallback() external payable; + + receive() external payable; + + function __constructor__(IAddressManager _addressManager, string memory _implementationName) external; } diff --git a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol index e554345d4264..dec119398c0f 100644 --- a/packages/contracts-bedrock/src/universal/ProxyAdmin.sol +++ b/packages/contracts-bedrock/src/universal/ProxyAdmin.sol @@ -1,13 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; -import { Proxy } from "src/universal/Proxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; -import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; + +// Libraries import { Constants } from "src/libraries/Constants.sol"; -import { IStaticERC1967Proxy } from "src/universal/interfaces/IStaticERC1967Proxy.sol"; + +// Interfaces +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; import { IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; +import { IStaticERC1967Proxy } from "src/universal/interfaces/IStaticERC1967Proxy.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; /// @title ProxyAdmin /// @notice This is an auxiliary contract meant to be assigned as the admin of an ERC1967 Proxy, @@ -34,7 +39,7 @@ contract ProxyAdmin is Ownable { /// @notice The address of the address manager, this is required to manage the /// ResolvedDelegateProxy type. - AddressManager public addressManager; + IAddressManager public addressManager; /// @notice A legacy upgrading indicator used by the old Chugsplash Proxy. bool internal upgrading; @@ -63,7 +68,7 @@ contract ProxyAdmin is Ownable { /// @notice Set the address of the AddressManager. This is required to manage legacy /// ResolvedDelegateProxy type proxy contracts. /// @param _address Address of the AddressManager. - function setAddressManager(AddressManager _address) external onlyOwner { + function setAddressManager(IAddressManager _address) external onlyOwner { addressManager = _address; } @@ -131,9 +136,9 @@ contract ProxyAdmin is Ownable { function changeProxyAdmin(address payable _proxy, address _newAdmin) external onlyOwner { ProxyType ptype = proxyType[_proxy]; if (ptype == ProxyType.ERC1967) { - Proxy(_proxy).changeAdmin(_newAdmin); + IProxy(_proxy).changeAdmin(_newAdmin); } else if (ptype == ProxyType.CHUGSPLASH) { - L1ChugSplashProxy(_proxy).setOwner(_newAdmin); + IL1ChugSplashProxy(_proxy).setOwner(_newAdmin); } else if (ptype == ProxyType.RESOLVED) { addressManager.transferOwnership(_newAdmin); } else { @@ -147,9 +152,9 @@ contract ProxyAdmin is Ownable { function upgrade(address payable _proxy, address _implementation) public onlyOwner { ProxyType ptype = proxyType[_proxy]; if (ptype == ProxyType.ERC1967) { - Proxy(_proxy).upgradeTo(_implementation); + IProxy(_proxy).upgradeTo(_implementation); } else if (ptype == ProxyType.CHUGSPLASH) { - L1ChugSplashProxy(_proxy).setStorage( + IL1ChugSplashProxy(_proxy).setStorage( Constants.PROXY_IMPLEMENTATION_ADDRESS, bytes32(uint256(uint160(_implementation))) ); } else if (ptype == ProxyType.RESOLVED) { @@ -178,7 +183,7 @@ contract ProxyAdmin is Ownable { { ProxyType ptype = proxyType[_proxy]; if (ptype == ProxyType.ERC1967) { - Proxy(_proxy).upgradeToAndCall{ value: msg.value }(_implementation, _data); + IProxy(_proxy).upgradeToAndCall{ value: msg.value }(_implementation, _data); } else { // reverts if proxy type is unknown upgrade(_proxy, _implementation); diff --git a/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol b/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol new file mode 100644 index 000000000000..2b09da39e515 --- /dev/null +++ b/packages/contracts-bedrock/src/universal/interfaces/IOptimismMintableERC721Factory.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IOptimismMintableERC721Factory { + event OptimismMintableERC721Created(address indexed localToken, address indexed remoteToken, address deployer); + + function BRIDGE() external view returns (address); + function REMOTE_CHAIN_ID() external view returns (uint256); + function createOptimismMintableERC721( + address _remoteToken, + string memory _name, + string memory _symbol + ) + external + returns (address); + function isOptimismMintableERC721(address) external view returns (bool); + function version() external view returns (string memory); + + function __constructor__(address _bridge, uint256 _remoteChainId) external; +} diff --git a/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol b/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol new file mode 100644 index 000000000000..a2c90f80828c --- /dev/null +++ b/packages/contracts-bedrock/src/universal/interfaces/IProxy.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IProxy { + event AdminChanged(address previousAdmin, address newAdmin); + event Upgraded(address indexed implementation); + + fallback() external payable; + + receive() external payable; + + function admin() external returns (address); + function changeAdmin(address _admin) external; + function implementation() external returns (address); + function upgradeTo(address _implementation) external; + function upgradeToAndCall(address _implementation, bytes memory _data) external payable returns (bytes memory); + + function __constructor__(address _admin) external; +} diff --git a/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol b/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol new file mode 100644 index 000000000000..b35947e6cd78 --- /dev/null +++ b/packages/contracts-bedrock/src/universal/interfaces/IProxyAdmin.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; + +interface IProxyAdmin { + enum ProxyType { + ERC1967, + CHUGSPLASH, + RESOLVED + } + + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + function addressManager() external view returns (IAddressManager); + function changeProxyAdmin(address payable _proxy, address _newAdmin) external; + function getProxyAdmin(address payable _proxy) external view returns (address); + function getProxyImplementation(address _proxy) external view returns (address); + function implementationName(address) external view returns (string memory); + function isUpgrading() external view returns (bool); + function owner() external view returns (address); + function proxyType(address) external view returns (ProxyType); + function renounceOwnership() external; + function setAddress(string memory _name, address _address) external; + function setAddressManager(IAddressManager _address) external; + function setImplementationName(address _address, string memory _name) external; + function setProxyType(address _address, ProxyType _type) external; + function setUpgrading(bool _upgrading) external; + function transferOwnership(address newOwner) external; // nosemgrep + function upgrade(address payable _proxy, address _implementation) external; + function upgradeAndCall(address payable _proxy, address _implementation, bytes memory _data) external payable; + + function __constructor__(address _owner) external; +} diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index 5304cf797449..dd14b349c68a 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -8,7 +8,6 @@ import { CommitmentType } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; import { computeCommitmentKeccak256 } from "src/L1/DataAvailabilityChallenge.sol"; -import { Proxy } from "src/universal/Proxy.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index 0472c0781ce8..6861a569c20b 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -10,7 +10,6 @@ import { NextImpl } from "test/mocks/NextImpl.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts -import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; // Libraries @@ -27,6 +26,7 @@ import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { IL2OutputOracle } from "src/L1/interfaces/IL2OutputOracle.sol"; import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; contract OptimismPortal_Test is CommonTest { address depositor; @@ -1173,10 +1173,10 @@ contract OptimismPortalUpgradeable_Test is CommonTest { vm.startPrank(EIP1967Helper.getAdmin(address(optimismPortal))); // The value passed to the initialize must be larger than the last value // that initialize was called with. - Proxy(payable(address(optimismPortal))).upgradeToAndCall( + IProxy(payable(address(optimismPortal))).upgradeToAndCall( address(nextImpl), abi.encodeWithSelector(NextImpl.initialize.selector, 2) ); - assertEq(Proxy(payable(address(optimismPortal))).implementation(), address(nextImpl)); + assertEq(IProxy(payable(address(optimismPortal))).implementation(), address(nextImpl)); // Verify that the NextImpl contract initialized its values according as expected bytes32 slot21After = vm.load(address(optimismPortal), bytes32(uint256(21))); diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index c131995c1a6b..3faa7e3d2261 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -10,7 +10,6 @@ import { NextImpl } from "test/mocks/NextImpl.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts -import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; // Libraries @@ -29,6 +28,7 @@ import { IL1Block } from "src/L2/interfaces/IL1Block.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; contract OptimismPortal2_Test is CommonTest { address depositor; @@ -1422,10 +1422,10 @@ contract OptimismPortal2_Upgradeable_Test is CommonTest { vm.startPrank(EIP1967Helper.getAdmin(address(optimismPortal2))); // The value passed to the initialize must be larger than the last value // that initialize was called with. - Proxy(payable(address(optimismPortal2))).upgradeToAndCall( + IProxy(payable(address(optimismPortal2))).upgradeToAndCall( address(nextImpl), abi.encodeWithSelector(NextImpl.initialize.selector, 2) ); - assertEq(Proxy(payable(address(optimismPortal2))).implementation(), address(nextImpl)); + assertEq(IProxy(payable(address(optimismPortal2))).implementation(), address(nextImpl)); // Verify that the NextImpl contract initialized its values according as expected bytes32 slot21After = vm.load(address(optimismPortal2), bytes32(uint256(21))); diff --git a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol index 957d2b914f38..41eed4a930e6 100644 --- a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol +++ b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol @@ -1,17 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; -// Target contract dependencies -import { Proxy } from "src/universal/Proxy.sol"; - -// Target contract +// Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; contract ProtocolVersions_Init is CommonTest { @@ -57,7 +55,7 @@ contract ProtocolVersions_Initialize_Test is ProtocolVersions_Init { emit ConfigUpdate(0, IProtocolVersions.UpdateType.RECOMMENDED_PROTOCOL_VERSION, abi.encode(recommended)); vm.prank(EIP1967Helper.getAdmin(address(protocolVersions))); - Proxy(payable(address(protocolVersions))).upgradeToAndCall( + IProxy(payable(address(protocolVersions))).upgradeToAndCall( address(protocolVersionsImpl), abi.encodeCall( IProtocolVersions.initialize, diff --git a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol index f315b5212fd6..6d01cdb30867 100644 --- a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol @@ -1,17 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { Test } from "forge-std/Test.sol"; +// Contracts +import { ResourceMetering } from "src/L1/ResourceMetering.sol"; + // Libraries import { Constants } from "src/libraries/Constants.sol"; -// Target contract dependencies -import { Proxy } from "src/universal/Proxy.sol"; - -// Target contract -import { ResourceMetering } from "src/L1/ResourceMetering.sol"; +// Interfaces import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; contract MeterUser is ResourceMetering { diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index ddfafc0edb2f..91819d8ef70a 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -7,7 +7,6 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -import { Proxy } from "src/universal/Proxy.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; diff --git a/packages/contracts-bedrock/test/cannon/MIPS.t.sol b/packages/contracts-bedrock/test/cannon/MIPS.t.sol index 9aafbdb5421d..2843b876e4ff 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS.t.sol @@ -1,14 +1,22 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; + +// Contracts import { MIPS } from "src/cannon/MIPS.sol"; import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; + +// Libraries import { MIPSInstructions } from "src/cannon/libraries/MIPSInstructions.sol"; import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; import { InvalidExitedValue, InvalidMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; import "src/dispute/lib/Types.sol"; +// Interfaces +import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; + contract MIPS_Test is CommonTest { MIPS internal mips; PreimageOracle internal oracle; @@ -16,7 +24,7 @@ contract MIPS_Test is CommonTest { function setUp() public virtual override { super.setUp(); oracle = new PreimageOracle(0, 0); - mips = new MIPS(oracle); + mips = new MIPS(IPreimageOracle(address(oracle))); vm.store(address(mips), 0x0, bytes32(abi.encode(address(oracle)))); vm.label(address(oracle), "PreimageOracle"); vm.label(address(mips), "MIPS"); diff --git a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol index 4c02d7a0bdd1..59b3e9e17eb4 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS2.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS2.t.sol @@ -1,13 +1,21 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; + +// Contracts import { MIPS2 } from "src/cannon/MIPS2.sol"; import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; + +// Libraries import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; import { MIPSInstructions as ins } from "src/cannon/libraries/MIPSInstructions.sol"; -import "src/dispute/lib/Types.sol"; import { InvalidExitedValue, InvalidMemoryProof, InvalidSecondMemoryProof } from "src/cannon/libraries/CannonErrors.sol"; +import "src/dispute/lib/Types.sol"; + +// Interfaces +import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; contract ThreadStack { bytes32 internal constant EMPTY_THREAD_ROOT = hex"ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"; @@ -127,7 +135,7 @@ contract MIPS2_Test is CommonTest { function setUp() public virtual override { super.setUp(); oracle = new PreimageOracle(0, 0); - mips = new MIPS2(oracle); + mips = new MIPS2(IPreimageOracle(address(oracle))); threading = new Threading(); vm.store(address(mips), 0x0, bytes32(abi.encode(address(oracle)))); vm.label(address(oracle), "PreimageOracle"); diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index bc6537e460e4..8cfb602e3d31 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -54,7 +54,7 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init { // Set preimage oracle challenge period to something arbitrary (4 seconds) just so we can // actually test the clock extensions later on. This is not a realistic value. PreimageOracle oracle = new PreimageOracle(0, 4); - AlphabetVM _vm = new AlphabetVM(absolutePrestate, oracle); + AlphabetVM _vm = new AlphabetVM(absolutePrestate, IPreimageOracle(address(oracle))); // Deploy an implementation of the fault game gameImpl = IFaultDisputeGame( @@ -123,7 +123,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { /// @dev Tests that the constructor of the `FaultDisputeGame` reverts when the `MAX_GAME_DEPTH` parameter is /// greater than `LibPosition.MAX_POSITION_BITLEN - 1`. function testFuzz_constructor_maxDepthTooLarge_reverts(uint256 _maxGameDepth) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); _maxGameDepth = bound(_maxGameDepth, LibPosition.MAX_POSITION_BITLEN, type(uint256).max - 1); vm.expectRevert(MaxDepthTooLarge.selector); @@ -148,7 +148,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { _challengePeriod = bound(_challengePeriod, uint256(type(uint64).max) + 1, type(uint256).max); PreimageOracle oracle = new PreimageOracle(0, 0); - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, oracle); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(oracle))); // PreimageOracle constructor will revert if the challenge period is too large, so we need // to mock the call to pretend this is a bugged implementation where the challenge period @@ -175,7 +175,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { /// @dev Tests that the constructor of the `FaultDisputeGame` reverts when the `_splitDepth` /// parameter is greater than or equal to the `MAX_GAME_DEPTH` function testFuzz_constructor_invalidSplitDepth_reverts(uint256 _splitDepth) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); uint256 maxGameDepth = 2 ** 3; _splitDepth = bound(_splitDepth, maxGameDepth - 1, type(uint256).max); @@ -197,7 +197,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { /// @dev Tests that the constructor of the `FaultDisputeGame` reverts when the `_splitDepth` /// parameter is less than the minimum split depth (currently 2). function testFuzz_constructor_lowSplitDepth_reverts(uint256 _splitDepth) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); uint256 minSplitDepth = 2; _splitDepth = bound(_splitDepth, 0, minSplitDepth - 1); @@ -224,7 +224,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ) public { - AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM alphabetVM = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); // Force the clock extension * 2 to be greater than the max clock duration, but keep things within // bounds of the uint64 type. diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index 8a3639b71cdf..36577a836df1 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -47,7 +47,7 @@ contract PermissionedDisputeGame_Init is DisputeGameFactory_Init { // Set the extra data for the game creation extraData = abi.encode(l2BlockNumber); - AlphabetVM _vm = new AlphabetVM(absolutePrestate, new PreimageOracle(0, 0)); + AlphabetVM _vm = new AlphabetVM(absolutePrestate, IPreimageOracle(address(new PreimageOracle(0, 0)))); // Use a 7 day delayed WETH to simulate withdrawals. IDelayedWETH _weth = IDelayedWETH(payable(new DelayedWETH(7 days))); diff --git a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol index b5d940c1cf6e..6ecf74e22868 100644 --- a/packages/contracts-bedrock/test/mocks/AlphabetVM.sol +++ b/packages/contracts-bedrock/test/mocks/AlphabetVM.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; -import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; -import { PreimageOracle, PreimageKeyLib } from "src/cannon/PreimageOracle.sol"; +// Libraries +import { PreimageKeyLib } from "src/cannon/PreimageKeyLib.sol"; import "src/dispute/lib/Types.sol"; +// Interfaces +import { IBigStepper, IPreimageOracle } from "src/dispute/interfaces/IBigStepper.sol"; + /// @title AlphabetVM /// @dev A mock VM for the purpose of testing the dispute game infrastructure. Note that this only works /// for games with an execution trace subgame max depth of 3 (8 instructions per subgame). @@ -12,7 +15,7 @@ contract AlphabetVM is IBigStepper { Claim internal immutable ABSOLUTE_PRESTATE; IPreimageOracle public oracle; - constructor(Claim _absolutePrestate, PreimageOracle _oracle) { + constructor(Claim _absolutePrestate, IPreimageOracle _oracle) { ABSOLUTE_PRESTATE = _absolutePrestate; oracle = _oracle; } diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol index 07aa2c61958d..cba5fc829086 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol @@ -9,9 +9,9 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Contracts import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; -import { Proxy } from "src/universal/Proxy.sol"; // Interfaces +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; contract OptimismMintableTokenFactory_Test is Bridge_Initializer { @@ -33,7 +33,7 @@ contract OptimismMintableTokenFactory_Test is Bridge_Initializer { /// @notice Tests that the upgrade is successful. function test_upgrading_succeeds() external { - Proxy proxy = Proxy(deploy.mustGetAddress("OptimismMintableERC20FactoryProxy")); + IProxy proxy = IProxy(deploy.mustGetAddress("OptimismMintableERC20FactoryProxy")); // Check an unused slot before upgrading. bytes32 slot21Before = vm.load(address(l1OptimismMintableERC20Factory), bytes32(uint256(21))); assertEq(bytes32(0), slot21Before); diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index 4de72c872572..e212644c9d50 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -1,13 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { Test } from "forge-std/Test.sol"; +import { SimpleStorage } from "test/universal/Proxy.t.sol"; + +// Contracts import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { SimpleStorage } from "test/universal/Proxy.t.sol"; +import { AddressManager } from "src/legacy/AddressManager.sol"; import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; + +// Interfaces +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; contract ProxyAdmin_Test is Test { address alice = address(64); @@ -45,7 +51,7 @@ contract ProxyAdmin_Test is Test { // Set the address of the address manager in the admin so that it // can resolve the implementation address of legacy // ResolvedDelegateProxy based proxies. - admin.setAddressManager(addressManager); + admin.setAddressManager(IAddressManager(address(addressManager))); // Set the reverse lookup of the ResolvedDelegateProxy // proxy admin.setImplementationName(address(resolved), "a"); @@ -67,7 +73,7 @@ contract ProxyAdmin_Test is Test { function test_setAddressManager_notOwner_reverts() external { vm.expectRevert("Ownable: caller is not the owner"); - admin.setAddressManager(AddressManager((address(0)))); + admin.setAddressManager(IAddressManager((address(0)))); } function test_setImplementationName_notOwner_reverts() external { From 6b2a3fed18b8659f32dcc9f53c456692fbd73e08 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Fri, 27 Sep 2024 18:00:33 +0200 Subject: [PATCH 066/116] op-node/rollup/derive: Implement Holocene Frame Queue (#12069) * op-node/rollup/derive: Implement Holocene Frame Queue * add FrameQueue test * use non-nil context * address reviews, refactor frame loading and pruning --- op-node/rollup/derive/frame_queue.go | 91 +++++++++- op-node/rollup/derive/frame_queue_test.go | 159 ++++++++++++++++++ op-node/rollup/derive/frame_test.go | 6 + .../rollup/derive/mocks/next_data_provider.go | 78 +++++++++ op-node/rollup/derive/pipeline.go | 6 +- 5 files changed, 328 insertions(+), 12 deletions(-) create mode 100644 op-node/rollup/derive/frame_queue_test.go create mode 100644 op-node/rollup/derive/mocks/next_data_provider.go diff --git a/op-node/rollup/derive/frame_queue.go b/op-node/rollup/derive/frame_queue.go index d57495a80558..77a2703290ce 100644 --- a/op-node/rollup/derive/frame_queue.go +++ b/op-node/rollup/derive/frame_queue.go @@ -6,11 +6,13 @@ import ( "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" ) var _ NextFrameProvider = &FrameQueue{} +//go:generate mockery --name NextDataProvider --case snake type NextDataProvider interface { NextData(context.Context) ([]byte, error) Origin() eth.L1BlockRef @@ -20,12 +22,14 @@ type FrameQueue struct { log log.Logger frames []Frame prev NextDataProvider + cfg *rollup.Config } -func NewFrameQueue(log log.Logger, prev NextDataProvider) *FrameQueue { +func NewFrameQueue(log log.Logger, cfg *rollup.Config, prev NextDataProvider) *FrameQueue { return &FrameQueue{ log: log, prev: prev, + cfg: cfg, } } @@ -34,18 +38,15 @@ func (fq *FrameQueue) Origin() eth.L1BlockRef { } func (fq *FrameQueue) NextFrame(ctx context.Context) (Frame, error) { - // Find more frames if we need to + // TODO(12157): reset frame queue once at Holocene L1 origin block + + // Only load more frames if necessary if len(fq.frames) == 0 { - if data, err := fq.prev.NextData(ctx); err != nil { + if err := fq.loadNextFrames(ctx); err != nil { return Frame{}, err - } else { - if new, err := ParseFrames(data); err == nil { - fq.frames = append(fq.frames, new...) - } else { - fq.log.Warn("Failed to parse frames", "origin", fq.prev.Origin(), "err", err) - } } } + // If we did not add more frames but still have more data, retry this function. if len(fq.frames) == 0 { return Frame{}, NotEnoughData @@ -56,6 +57,78 @@ func (fq *FrameQueue) NextFrame(ctx context.Context) (Frame, error) { return ret, nil } +func (fq *FrameQueue) loadNextFrames(ctx context.Context) error { + data, err := fq.prev.NextData(ctx) + if err != nil { + return err + } + + if frames, err := ParseFrames(data); err == nil { + fq.frames = append(fq.frames, frames...) + } else { + fq.log.Warn("Failed to parse frames", "origin", fq.prev.Origin(), "err", err) + return nil + } + + // Note: this implementation first parses all frames from the next L1 transaction and only then + // prunes all frames that were parsed. An even more memory-efficient implementation could prune + // the frame queue each time after pulling out only a single frame. + + if fq.cfg.IsHolocene(fq.Origin().Time) { + // We only need to prune the queue after adding more frames to it. + // Moving frames out of the queue to the next stage cannot invalidate any frames in + // the queue. + fq.prune() + } + + return nil +} + +func (fq *FrameQueue) prune() { + fq.frames = pruneFrameQueue(fq.frames) +} + +// pruneFrameQueue prunes the frame queue to only hold contiguous and ordered +// frames, conforming to Holocene frame queue rules. +func pruneFrameQueue(frames []Frame) []Frame { + for i := 0; i < len(frames)-1; { + current, next := frames[i], frames[i+1] + discard := func(d int) { + frames = append(frames[0:i+d], frames[i+1+d:]...) + } + // frames for the same channel ID must arrive in order + if current.ID == next.ID { + if current.IsLast { + discard(1) // discard next + continue + } + if next.FrameNumber != current.FrameNumber+1 { + discard(1) // discard next + continue + } + } else { + // first frames discard previously unclosed channels + if next.FrameNumber == 0 && !current.IsLast { + discard(0) // discard current + // make sure we backwards invalidate more frames of unclosed channel + if i > 0 { + i-- + } + continue + } + // non-first frames of new channels are dropped + if next.FrameNumber != 0 { + discard(1) // discard next + continue + } + } + // We only update the cursor if we didn't remove any frame, so if any frame got removed, the + // checks are applied to the new pair in the queue at the same position. + i++ + } + return frames +} + func (fq *FrameQueue) Reset(_ context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error { fq.frames = fq.frames[:0] return io.EOF diff --git a/op-node/rollup/derive/frame_queue_test.go b/op-node/rollup/derive/frame_queue_test.go new file mode 100644 index 000000000000..a0a57f4f387d --- /dev/null +++ b/op-node/rollup/derive/frame_queue_test.go @@ -0,0 +1,159 @@ +package derive + +import ( + "bytes" + "context" + "io" + "log/slog" + "testing" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/mocks" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestPruneFrameQueue(t *testing.T) { + for _, tt := range []struct { + desc string + frames []testFrame + expected []testFrame + }{ + { + desc: "empty", + frames: []testFrame{}, + expected: []testFrame{}, + }, + { + desc: "one", + frames: []testFrame{"a:2:"}, + expected: []testFrame{"a:2:"}, + }, + { + desc: "one-last", + frames: []testFrame{"a:2:!"}, + expected: []testFrame{"a:2:!"}, + }, + { + desc: "last-new", + frames: []testFrame{"a:2:!", "b:0:"}, + expected: []testFrame{"a:2:!", "b:0:"}, + }, + { + desc: "last-ooo", + frames: []testFrame{"a:2:!", "b:1:"}, + expected: []testFrame{"a:2:!"}, + }, + { + desc: "middle-lastooo", + frames: []testFrame{"b:1:", "a:2:!"}, + expected: []testFrame{"b:1:"}, + }, + { + desc: "middle-first", + frames: []testFrame{"b:1:", "a:0:"}, + expected: []testFrame{"a:0:"}, + }, + { + desc: "last-first", + frames: []testFrame{"b:1:!", "a:0:"}, + expected: []testFrame{"b:1:!", "a:0:"}, + }, + { + desc: "last-ooo", + frames: []testFrame{"b:1:!", "b:2:"}, + expected: []testFrame{"b:1:!"}, + }, + { + desc: "ooo", + frames: []testFrame{"b:1:", "b:3:"}, + expected: []testFrame{"b:1:"}, + }, + { + desc: "other-ooo", + frames: []testFrame{"b:1:", "c:3:"}, + expected: []testFrame{"b:1:"}, + }, + { + desc: "other-ooo-last", + frames: []testFrame{"b:1:", "c:3:", "b:2:!"}, + expected: []testFrame{"b:1:", "b:2:!"}, + }, + { + desc: "ooo-resubmit", + frames: []testFrame{"b:1:", "b:3:!", "b:2:", "b:3:!"}, + expected: []testFrame{"b:1:", "b:2:", "b:3:!"}, + }, + { + desc: "first-discards-multiple", + frames: []testFrame{"c:0:", "c:1:", "c:2:", "d:0:", "c:3:!"}, + expected: []testFrame{"d:0:"}, + }, + { + desc: "complex", + frames: []testFrame{"b:1:", "b:2:!", "a:0:", "c:1:!", "a:1:", "a:2:!", "c:0:", "c:1:", "d:0:", "c:2:!", "e:0:"}, + expected: []testFrame{"b:1:", "b:2:!", "a:0:", "a:1:", "a:2:!", "e:0:"}, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + pfs := pruneFrameQueue(testFramesToFrames(tt.frames...)) + require.Equal(t, testFramesToFrames(tt.expected...), pfs) + }) + } +} + +func TestFrameQueue_NextFrame(t *testing.T) { + t.Run("pre-holocene", func(t *testing.T) { testFrameQueue_NextFrame(t, false) }) + t.Run("holocene", func(t *testing.T) { testFrameQueue_NextFrame(t, true) }) +} + +func testFrameQueue_NextFrame(t *testing.T, holocene bool) { + lgr := testlog.Logger(t, slog.LevelWarn) + cfg := &rollup.Config{} + dp := mocks.NewNextDataProvider(t) + fq := NewFrameQueue(lgr, cfg, dp) + + inFrames := testFramesToFrames("b:1:", "b:2:!", "a:0:", "c:1:!", "a:1:", "a:2:!", "c:0:", "c:1:", "d:0:", "c:2:!", "e:0:") + var expFrames []Frame + if holocene { + cfg.HoloceneTime = ptr(uint64(0)) + // expect pruned frames with Holocene + expFrames = testFramesToFrames("b:1:", "b:2:!", "a:0:", "a:1:", "a:2:!", "e:0:") + } else { + expFrames = inFrames + } + + var inBuf bytes.Buffer + inBuf.WriteByte(DerivationVersion0) + for _, f := range inFrames { + require.NoError(t, f.MarshalBinary(&inBuf)) + } + + dp.On("Origin").Return(eth.L1BlockRef{}) + dp.On("NextData", mock.Anything).Return(inBuf.Bytes(), nil).Once() + dp.On("NextData", mock.Anything).Return(nil, io.EOF) + + gotFrames := make([]Frame, 0, len(expFrames)) + for i := 0; i <= len(inFrames); i++ { // make sure we hit EOF case + frame, err := fq.NextFrame(context.Background()) + if err != nil { + require.ErrorIs(t, err, io.EOF) + break + } + require.NoError(t, err) + gotFrames = append(gotFrames, frame) + } + require.Equal(t, expFrames, gotFrames) +} + +func ptr[T any](t T) *T { return &t } + +func testFramesToFrames(tfs ...testFrame) []Frame { + fs := make([]Frame, 0, len(tfs)) + for _, f := range tfs { + fs = append(fs, f.ToFrame()) + } + return fs +} diff --git a/op-node/rollup/derive/frame_test.go b/op-node/rollup/derive/frame_test.go index 46006398c707..240cc0a58d8d 100644 --- a/op-node/rollup/derive/frame_test.go +++ b/op-node/rollup/derive/frame_test.go @@ -163,6 +163,12 @@ func TestParseFramesInvalidVer(t *testing.T) { require.Error(t, err) } +func TestParseFramesOnlyVersion(t *testing.T) { + frames, err := ParseFrames([]byte{DerivationVersion0}) + require.Empty(t, frames) + require.Error(t, err) +} + func TestParseFrames(t *testing.T) { rng := rand.New(rand.NewSource(time.Now().UnixNano())) numFrames := rng.Intn(16) + 1 diff --git a/op-node/rollup/derive/mocks/next_data_provider.go b/op-node/rollup/derive/mocks/next_data_provider.go new file mode 100644 index 000000000000..e7a14d92eff7 --- /dev/null +++ b/op-node/rollup/derive/mocks/next_data_provider.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.46.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + eth "github.com/ethereum-optimism/optimism/op-service/eth" + + mock "github.com/stretchr/testify/mock" +) + +// NextDataProvider is an autogenerated mock type for the NextDataProvider type +type NextDataProvider struct { + mock.Mock +} + +// NextData provides a mock function with given fields: _a0 +func (_m *NextDataProvider) NextData(_a0 context.Context) ([]byte, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for NextData") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]byte, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Origin provides a mock function with given fields: +func (_m *NextDataProvider) Origin() eth.L1BlockRef { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Origin") + } + + var r0 eth.L1BlockRef + if rf, ok := ret.Get(0).(func() eth.L1BlockRef); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(eth.L1BlockRef) + } + + return r0 +} + +// NewNextDataProvider creates a new instance of NextDataProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNextDataProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *NextDataProvider { + mock := &NextDataProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index a06640086fde..f114e2a4b0d3 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -77,13 +77,13 @@ type DerivationPipeline struct { // NewDerivationPipeline creates a DerivationPipeline, to turn L1 data into L2 block-inputs. func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, - altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics) *DerivationPipeline { - + altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, +) *DerivationPipeline { // Pull stages l1Traversal := NewL1Traversal(log, rollupCfg, l1Fetcher) dataSrc := NewDataSourceFactory(log, rollupCfg, l1Fetcher, l1Blobs, altDA) // auxiliary stage for L1Retrieval l1Src := NewL1Retrieval(log, dataSrc, l1Traversal) - frameQueue := NewFrameQueue(log, l1Src) + frameQueue := NewFrameQueue(log, rollupCfg, l1Src) bank := NewChannelBank(log, rollupCfg, frameQueue, metrics) chInReader := NewChannelInReader(rollupCfg, log, bank, metrics) batchQueue := NewBatchQueue(log, rollupCfg, chInReader, l2Source) From 724b4e51c271144e7c59ad43830e3f573958ee91 Mon Sep 17 00:00:00 2001 From: Mark Tyneway Date: Fri, 27 Sep 2024 09:57:51 -0700 Subject: [PATCH 067/116] contracts-bedrock: better error message (#12148) The build fails when jq is not installed with a mysterious error message, so improve the error message. This will help debugging for new contributors. --- .../scripts/checks/check-foundry-install.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh b/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh index ccd337e958e7..a0fa104d5bc8 100755 --- a/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh +++ b/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh @@ -5,6 +5,13 @@ CONTRACTS_BASE=$(dirname "$(dirname "$SCRIPT_DIR")") MONOREPO_BASE=$(dirname "$(dirname "$CONTRACTS_BASE")") VERSIONS_FILE="${MONOREPO_BASE}/versions.json" +if ! command -v jq &> /dev/null +then + # shellcheck disable=SC2006 + echo "Please install jq" >&2 + exit 1 +fi + if ! command -v forge &> /dev/null then # shellcheck disable=SC2006 From d141b53e4f52a8eb96a552d46c2e1c6c068b032e Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Fri, 27 Sep 2024 18:19:38 +0100 Subject: [PATCH 068/116] typo fix (#12179) --- packages/contracts-bedrock/semver-lock.json | 4 ++-- packages/contracts-bedrock/src/cannon/PreimageOracle.sol | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 728e761ed8b8..bc47a83e80c5 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -148,8 +148,8 @@ "sourceCodeHash": "0x50ed780b621521047ed36ffb260032f2e5ec287f3e1ab3d742c7de45febb280d" }, "src/cannon/PreimageOracle.sol": { - "initCodeHash": "0x3690e6dafe588c29de74790123bf6de5b0f741869bf5dbd8a122fdef96cab733", - "sourceCodeHash": "0x19b48b7d5fcb296cacf0cb15326b2e12a9556d6d811a16cbe2344792afa30427" + "initCodeHash": "0xa0b19e18561da9990c95ebea9750dd901f73147b32b8b234eca0f35073c5a970", + "sourceCodeHash": "0x6235d602f84c4173e7a58666791e3db4c9e9651eaccb20db5aed2f898b76e896" }, "src/dispute/AnchorStateRegistry.sol": { "initCodeHash": "0x13d00eef8c3f769863fc766180acc8586f5da309ca0a098e67d4d90bd3243341", diff --git a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol index ff1affefc6f5..ac8e70b8da76 100644 --- a/packages/contracts-bedrock/src/cannon/PreimageOracle.sol +++ b/packages/contracts-bedrock/src/cannon/PreimageOracle.sol @@ -33,8 +33,8 @@ contract PreimageOracle is ISemver { uint256 public constant PRECOMPILE_CALL_RESERVED_GAS = 100_000; /// @notice The semantic version of the Preimage Oracle contract. - /// @custom:semver 1.1.3-beta.3 - string public constant version = "1.1.3-beta.3"; + /// @custom:semver 1.1.3-beta.4 + string public constant version = "1.1.3-beta.4"; //////////////////////////////////////////////////////////////// // Authorized Preimage Parts // @@ -129,7 +129,7 @@ contract PreimageOracle is ISemver { dat_ = preimageParts[_key][_offset]; } - /// @notice Loads of local data part into the preimage oracle. + /// @notice Loads local data parts into the preimage oracle. /// @param _ident The identifier of the local data. /// @param _localContext The local key context for the preimage oracle. Optionally, can be set as a constant /// if the caller only requires one set of local keys. From a96b2282eeaf3a967866e9f2f8255f40d61ce5b4 Mon Sep 17 00:00:00 2001 From: Hamdi Allam Date: Fri, 27 Sep 2024 13:00:39 -0700 Subject: [PATCH 069/116] sent message event and l2tol2cdm relayMessage entrypoint (#11592) --- packages/contracts-bedrock/semver-lock.json | 4 +- .../abi/L2ToL2CrossDomainMessenger.json | 130 +++-- .../src/L2/L2ToL2CrossDomainMessenger.sol | 161 +++--- .../IL2ToL2CrossDomainMessenger.sol | 21 +- .../src/libraries/Hashing.sol | 6 +- .../test/L2/L2ToL2CrossDomainMessenger.t.sol | 505 +++++++++--------- 6 files changed, 448 insertions(+), 379 deletions(-) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index bc47a83e80c5..87d291b2031a 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -112,8 +112,8 @@ "sourceCodeHash": "0xd08a2e6514dbd44e16aa312a1b27b2841a9eab5622cbd05a39c30f543fad673c" }, "src/L2/L2ToL2CrossDomainMessenger.sol": { - "initCodeHash": "0x652e07372d45f0f861aa65b4a73db55871291b875ced02df893a405419de723a", - "sourceCodeHash": "0xc3e73c2d9abf3c7853d2505a83e475d58e96ab5fc5ad7770d04dea5feb9e5717" + "initCodeHash": "0x6f19eb8ff0950156b65cd92872240c0153ac5f3b6f0861d57bf561fdbcacbeac", + "sourceCodeHash": "0xfea53344596d735eff3be945ed1300dc75a6f8b7b2c02c0043af5b0036f5f239" }, "src/L2/OptimismSuperchainERC20.sol": { "initCodeHash": "0xe3dbb0851669708901a4c6bb7ad7d55f9896deeec02cbe53ac58d689ff95b88b", diff --git a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json index a5cda3493911..2676f90b0491 100644 --- a/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json +++ b/packages/contracts-bedrock/snapshots/abi/L2ToL2CrossDomainMessenger.json @@ -54,33 +54,40 @@ { "inputs": [ { - "internalType": "uint256", - "name": "_destination", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_source", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "_nonce", - "type": "uint256" - }, - { - "internalType": "address", - "name": "_sender", - "type": "address" - }, - { - "internalType": "address", - "name": "_target", - "type": "address" + "components": [ + { + "internalType": "address", + "name": "origin", + "type": "address" + }, + { + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "logIndex", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "chainId", + "type": "uint256" + } + ], + "internalType": "struct ICrossL2Inbox.Identifier", + "name": "_id", + "type": "tuple" }, { "internalType": "bytes", - "name": "_message", + "name": "_sentMessage", "type": "bytes" } ], @@ -111,7 +118,7 @@ "outputs": [ { "internalType": "bytes32", - "name": "msgHash_", + "name": "", "type": "bytes32" } ], @@ -153,6 +160,18 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "source", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "messageNonce", + "type": "uint256" + }, { "indexed": true, "internalType": "bytes32", @@ -166,6 +185,18 @@ { "anonymous": false, "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "source", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "messageNonce", + "type": "uint256" + }, { "indexed": true, "internalType": "bytes32", @@ -176,9 +207,51 @@ "name": "RelayedMessage", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "destination", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "messageNonce", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "message", + "type": "bytes" + } + ], + "name": "SentMessage", + "type": "event" + }, + { + "inputs": [], + "name": "EventPayloadNotSentMessage", + "type": "error" + }, { "inputs": [], - "name": "CrossL2InboxOriginNotL2ToL2CrossDomainMessenger", + "name": "IdOriginNotL2ToL2CrossDomainMessenger", "type": "error" }, { @@ -215,10 +288,5 @@ "inputs": [], "name": "ReentrantCall", "type": "error" - }, - { - "inputs": [], - "name": "RelayMessageCallerNotCrossL2Inbox", - "type": "error" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol index 3eb72210a109..4c1ffc38760f 100644 --- a/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol @@ -5,6 +5,7 @@ import { Encoding } from "src/libraries/Encoding.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; +import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; import { IL2ToL2CrossDomainMessenger } from "src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol"; import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; @@ -13,14 +14,14 @@ import { TransientReentrancyAware } from "src/libraries/TransientContext.sol"; /// @notice Thrown when a non-written slot in transient storage is attempted to be read from. error NotEntered(); -/// @notice Thrown when attempting to send a message to the chain that the message is being sent from. -error MessageDestinationSameChain(); +/// @notice Thrown when attempting to relay a message where payload origin is not L2ToL2CrossDomainMessenger. +error IdOriginNotL2ToL2CrossDomainMessenger(); -/// @notice Thrown when attempting to relay a message and the function caller (msg.sender) is not CrossL2Inbox. -error RelayMessageCallerNotCrossL2Inbox(); +/// @notice Thrown when the payload provided to the relay is not a SentMessage event. +error EventPayloadNotSentMessage(); -/// @notice Thrown when attempting to relay a message where CrossL2Inbox's origin is not L2ToL2CrossDomainMessenger. -error CrossL2InboxOriginNotL2ToL2CrossDomainMessenger(); +/// @notice Thrown when attempting to send a message to the chain that the message is being sent from. +error MessageDestinationSameChain(); /// @notice Thrown when attempting to relay a message whose destination chain is not the chain relaying it. error MessageDestinationNotRelayChain(); @@ -54,12 +55,17 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra bytes32 internal constant CROSS_DOMAIN_MESSAGE_SOURCE_SLOT = 0x711dfa3259c842fffc17d6e1f1e0fc5927756133a2345ca56b4cb8178589fee7; + /// @notice Event selector for the SentMessage event. Will be removed in favor of reading + // the `selector` property directly once crytic/slithe/#2566 is fixed. + bytes32 internal constant SENT_MESSAGE_EVENT_SELECTOR = + 0x382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f320; + /// @notice Current message version identifier. uint16 public constant messageVersion = uint16(0); /// @notice Semantic version. - /// @custom:semver 1.0.0-beta.6 - string public constant version = "1.0.0-beta.6"; + /// @custom:semver 1.0.0-beta.7 + string public constant version = "1.0.0-beta.7"; /// @notice Mapping of message hashes to boolean receipt values. Note that a message will only be present in this /// mapping if it has successfully been relayed on this chain, and can therefore not be relayed again. @@ -70,13 +76,27 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra /// message. uint240 internal msgNonce; + /// @notice Emitted whenever a message is sent to a destination + /// @param destination Chain ID of the destination chain. + /// @param target Target contract or wallet address. + /// @param messageNonce Nonce associated with the messsage sent + /// @param sender Address initiating this message call + /// @param message Message payload to call target with. + event SentMessage( + uint256 indexed destination, address indexed target, uint256 indexed messageNonce, address sender, bytes message + ); + /// @notice Emitted whenever a message is successfully relayed on this chain. - /// @param messageHash Hash of the message that was relayed. - event RelayedMessage(bytes32 indexed messageHash); + /// @param source Chain ID of the source chain. + /// @param messageNonce Nonce associated with the messsage sent + /// @param messageHash Hash of the message that was relayed. + event RelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash); /// @notice Emitted whenever a message fails to be relayed on this chain. - /// @param messageHash Hash of the message that failed to be relayed. - event FailedRelayedMessage(bytes32 indexed messageHash); + /// @param source Chain ID of the source chain. + /// @param messageNonce Nonce associated with the messsage sent + /// @param messageHash Hash of the message that failed to be relayed. + event FailedRelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash); /// @notice Retrieves the sender of the current cross domain message. If not entered, reverts. /// @return sender_ Address of the sender of the current cross domain message. @@ -100,90 +120,81 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra /// @param _destination Chain ID of the destination chain. /// @param _target Target contract or wallet address. /// @param _message Message payload to call target with. - /// @return msgHash_ The hash of the message being sent, which can be used for tracking whether - /// the message has successfully been relayed. - function sendMessage( - uint256 _destination, - address _target, - bytes calldata _message - ) - external - returns (bytes32 msgHash_) - { + /// @return The hash of the message being sent, used to track whether the message has successfully been relayed. + function sendMessage(uint256 _destination, address _target, bytes calldata _message) external returns (bytes32) { if (_destination == block.chainid) revert MessageDestinationSameChain(); if (_target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); if (_target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert MessageTargetL2ToL2CrossDomainMessenger(); - (uint256 source, uint256 nonce, address sender) = (block.chainid, messageNonce(), msg.sender); - bytes memory data = abi.encodeCall( - L2ToL2CrossDomainMessenger.relayMessage, (_destination, source, nonce, sender, _target, _message) - ); - msgHash_ = Hashing.hashL2toL2CrossDomainMessengerRelayMessage({ + uint256 nonce = messageNonce(); + emit SentMessage(_destination, _target, nonce, msg.sender, _message); + + msgNonce++; + + return Hashing.hashL2toL2CrossDomainMessage({ _destination: _destination, - _source: source, + _source: block.chainid, _nonce: nonce, - _sender: sender, + _sender: msg.sender, _target: _target, _message: _message }); - assembly { - log0(add(data, 0x20), mload(data)) - } - msgNonce++; } - /// @notice Relays a message that was sent by the other CrossDomainMessenger contract. Can only be executed via - /// cross-chain call from the other messenger OR if the message was already received once and is currently - /// being replayed. - /// @param _destination Chain ID of the destination chain. - /// @param _source Chain ID of the source chain. - /// @param _nonce Nonce of the message being relayed. - /// @param _sender Address of the user who sent the message. - /// @param _target Address that the message is targeted at. - /// @param _message Message payload to call target with. + /// @notice Relays a message that was sent by the other L2ToL2CrossDomainMessenger contract. Can only be executed + /// via cross chain call from the other messenger OR if the message was already received once and is + /// currently being replayed. + /// @param _id Identifier of the SentMessage event to be relayed + /// @param _sentMessage Message payload of the `SentMessage` event function relayMessage( - uint256 _destination, - uint256 _source, - uint256 _nonce, - address _sender, - address _target, - bytes memory _message + ICrossL2Inbox.Identifier calldata _id, + bytes calldata _sentMessage ) external payable nonReentrant { - if (msg.sender != Predeploys.CROSS_L2_INBOX) revert RelayMessageCallerNotCrossL2Inbox(); - if (CrossL2Inbox(Predeploys.CROSS_L2_INBOX).origin() != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) { - revert CrossL2InboxOriginNotL2ToL2CrossDomainMessenger(); - } - if (_destination != block.chainid) revert MessageDestinationNotRelayChain(); - if (_target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); - if (_target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) { - revert MessageTargetL2ToL2CrossDomainMessenger(); + // Ensure the log came from the messenger. Since the log origin is the CDM, there isn't a scenario where + // this can be invoked from the CrossL2Inbox as the SentMessage log is not calldata for this function + if (_id.origin != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) { + revert IdOriginNotL2ToL2CrossDomainMessenger(); } - bytes32 messageHash = Hashing.hashL2toL2CrossDomainMessengerRelayMessage({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message + // Signal that this is a cross chain call that needs to have the identifier validated + CrossL2Inbox(Predeploys.CROSS_L2_INBOX).validateMessage(_id, keccak256(_sentMessage)); + + // Decode the payload + (uint256 destination, address target, uint256 nonce, address sender, bytes memory message) = + _decodeSentMessagePayload(_sentMessage); + + // Assert invariants on the message + if (destination != block.chainid) revert MessageDestinationNotRelayChain(); + if (target == Predeploys.CROSS_L2_INBOX) revert MessageTargetCrossL2Inbox(); + if (target == Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) revert MessageTargetL2ToL2CrossDomainMessenger(); + + uint256 source = _id.chainId; + bytes32 messageHash = Hashing.hashL2toL2CrossDomainMessage({ + _destination: destination, + _source: source, + _nonce: nonce, + _sender: sender, + _target: target, + _message: message }); + if (successfulMessages[messageHash]) { revert MessageAlreadyRelayed(); } - _storeMessageMetadata(_source, _sender); + _storeMessageMetadata(source, sender); - bool success = SafeCall.call(_target, msg.value, _message); + bool success = SafeCall.call(target, msg.value, message); if (success) { successfulMessages[messageHash] = true; - emit RelayedMessage(messageHash); + emit RelayedMessage(source, nonce, messageHash); } else { - emit FailedRelayedMessage(messageHash); + emit FailedRelayedMessage(source, nonce, messageHash); } _storeMessageMetadata(0, address(0)); @@ -205,4 +216,20 @@ contract L2ToL2CrossDomainMessenger is IL2ToL2CrossDomainMessenger, ISemver, Tra tstore(CROSS_DOMAIN_MESSAGE_SOURCE_SLOT, _source) } } + + function _decodeSentMessagePayload(bytes calldata _payload) + internal + pure + returns (uint256 destination_, address target_, uint256 nonce_, address sender_, bytes memory message_) + { + // Validate Selector (also reverts if LOG0 with no topics) + bytes32 selector = abi.decode(_payload[:32], (bytes32)); + if (selector != SENT_MESSAGE_EVENT_SELECTOR) revert EventPayloadNotSentMessage(); + + // Topics + (destination_, target_, nonce_) = abi.decode(_payload[32:128], (uint256, address, uint256)); + + // Data + (sender_, message_) = abi.decode(_payload[128:], (address, bytes)); + } } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol index e043bb43420a..2b5b945dec73 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL2ToL2CrossDomainMessenger.sol @@ -1,6 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; + /// @title IL2ToL2CrossDomainMessenger /// @notice Interface for the L2ToL2CrossDomainMessenger contract. interface IL2ToL2CrossDomainMessenger { @@ -45,20 +47,7 @@ interface IL2ToL2CrossDomainMessenger { /// @notice Relays a message that was sent by the other CrossDomainMessenger contract. Can only /// be executed via cross-chain call from the other messenger OR if the message was /// already received once and is currently being replayed. - /// @param _destination Chain ID of the destination chain. - /// @param _nonce Nonce of the message being relayed. - /// @param _sender Address of the user who sent the message. - /// @param _source Chain ID of the source chain. - /// @param _target Address that the message is targeted at. - /// @param _message Message to send to the target. - function relayMessage( - uint256 _destination, - uint256 _source, - uint256 _nonce, - address _sender, - address _target, - bytes calldata _message - ) - external - payable; + /// @param _id Identifier of the SentMessage event to be relayed + /// @param _sentMessage Message payload of the `SentMessage` event + function relayMessage(ICrossL2Inbox.Identifier calldata _id, bytes calldata _sentMessage) external payable; } diff --git a/packages/contracts-bedrock/src/libraries/Hashing.sol b/packages/contracts-bedrock/src/libraries/Hashing.sol index 07a31eb76006..0f0f15678f97 100644 --- a/packages/contracts-bedrock/src/libraries/Hashing.sol +++ b/packages/contracts-bedrock/src/libraries/Hashing.sol @@ -122,8 +122,8 @@ library Hashing { ); } - /// @notice Generates a unique hash for a message to be relayed across chains. This hash is - /// used to identify the message and ensure it is not relayed more than once. + /// @notice Generates a unique hash for cross l2 messages. This hash is used to identify + /// the message and ensure it is not relayed more than once. /// @param _destination Chain ID of the destination chain. /// @param _source Chain ID of the source chain. /// @param _nonce Unique nonce associated with the message to prevent replay attacks. @@ -131,7 +131,7 @@ library Hashing { /// @param _target Address of the contract or wallet that the message is targeting on the destination chain. /// @param _message The message payload to be relayed to the target on the destination chain. /// @return Hash of the encoded message parameters, used to uniquely identify the message. - function hashL2toL2CrossDomainMessengerRelayMessage( + function hashL2toL2CrossDomainMessage( uint256 _destination, uint256 _source, uint256 _nonce, diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index ffde996c21c7..f5ff43c832ca 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -10,19 +10,20 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; import { Hashing } from "src/libraries/Hashing.sol"; // Target contract +import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; +import { ICrossL2Inbox } from "src/L2/interfaces/ICrossL2Inbox.sol"; import { L2ToL2CrossDomainMessenger, NotEntered, MessageDestinationSameChain, - RelayMessageCallerNotCrossL2Inbox, - CrossL2InboxOriginNotL2ToL2CrossDomainMessenger, + IdOriginNotL2ToL2CrossDomainMessenger, + EventPayloadNotSentMessage, MessageDestinationNotRelayChain, MessageTargetCrossL2Inbox, MessageTargetL2ToL2CrossDomainMessenger, MessageAlreadyRelayed, ReentrantCall } from "src/L2/L2ToL2CrossDomainMessenger.sol"; -import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; /// @title L2ToL2CrossDomainMessengerWithModifiableTransientStorage /// @dev L2ToL2CrossDomainMessenger contract with methods to modify the transient storage. @@ -91,11 +92,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { vm.recordLogs(); // Call the sendMessage function - bytes32 msgHash = - l2ToL2CrossDomainMessenger.sendMessage({ _destination: _destination, _target: _target, _message: _message }); + bytes32 msgHash = l2ToL2CrossDomainMessenger.sendMessage(_destination, _target, _message); assertEq( msgHash, - Hashing.hashL2toL2CrossDomainMessengerRelayMessage( + Hashing.hashL2toL2CrossDomainMessage( _destination, block.chainid, messageNonce, address(this), _target, _message ) ); @@ -103,13 +103,15 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Check that the event was emitted with the correct parameters Vm.Log[] memory logs = vm.getRecordedLogs(); assertEq(logs.length, 1); - assertEq( - logs[0].data, - abi.encodeCall( - L2ToL2CrossDomainMessenger.relayMessage, - (_destination, block.chainid, messageNonce, address(this), _target, _message) - ) - ); + + // topics + assertEq(logs[0].topics[0], L2ToL2CrossDomainMessenger.SentMessage.selector); + assertEq(logs[0].topics[1], bytes32(_destination)); + assertEq(logs[0].topics[2], bytes32(uint256(uint160(_target)))); + assertEq(logs[0].topics[3], bytes32(messageNonce)); + + // data + assertEq(logs[0].data, abi.encode(address(this), _message)); // Check that the message nonce has been incremented assertEq(l2ToL2CrossDomainMessenger.messageNonce(), messageNonce + 1); @@ -198,16 +200,15 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Ensure that the target contract is not a Forge contract. - assumeNotForgeAddress(_target); - // Ensure that the target contract is not CrossL2Inbox or L2ToL2CrossDomainMessenger - vm.assume(_target != Predeploys.CROSS_L2_INBOX); - vm.assume(_target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + vm.assume(_target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); // Ensure that the target call is payable if value is sent if (_value > 0) assumePayable(_target); @@ -215,51 +216,68 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract does not revert vm.mockCall({ callee: _target, msgValue: _value, data: _message, returnData: abi.encode(true) }); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract + // Construct the SentMessage payload & identifier + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.RelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) + _source, _nonce, keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ); - // Ensure the target contract is called with the correct parameters - vm.expectCall({ callee: _target, msgValue: _value, data: _message }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Call the relayMessage function - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, // ensure the destination is the chain of L2ToL2CrossDomainMessenger - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); - - // Check that successfulMessages mapping updates the message hash correctly + // relay the message + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); assertEq( l2ToL2CrossDomainMessenger.successfulMessages( keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ), true ); + } - // Check that entered slot is cleared after the function call - assertEq(l2ToL2CrossDomainMessenger.entered(), false); + function testFuzz_relayMessage_eventPayloadNotSentMessage_reverts( + uint256 _source, + uint256 _nonce, + bytes32 _msgHash, + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time + ) + external + { + // Expect a revert with the EventPayloadNotSentMessage selector + vm.expectRevert(EventPayloadNotSentMessage.selector); - // Check that metadata is cleared after the function call. We need to set the `entered` slot to non-zero value - // to prevent NotEntered revert when calling the crossDomainMessageSender and crossDomainMessageSource functions - l2ToL2CrossDomainMessenger.setEntered(1); - assertEq(l2ToL2CrossDomainMessenger.crossDomainMessageSource(), 0); - assertEq(l2ToL2CrossDomainMessenger.crossDomainMessageSender(), address(0)); + // Point to a different remote log that the inbox validates + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = + abi.encode(L2ToL2CrossDomainMessenger.RelayedMessage.selector, _source, _nonce, _msgHash); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" + }); + + // Call + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Mock target function that checks the source and sender of the message in transient storage. @@ -281,7 +299,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _source, uint256 _nonce, address _sender, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -289,46 +310,39 @@ contract L2ToL2CrossDomainMessengerTest is Test { // contract has a non-zero balance. Thus, we set this contract's balance to zero and we hoax afterwards. vm.deal(address(this), 0); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - // Set the target and message for the reentrant call address target = address(this); bytes memory message = abi.encodeWithSelector(this.mockTarget.selector, _source, _sender); + bytes32 msgHash = keccak256(abi.encode(block.chainid, _source, _nonce, _sender, target, message)); + // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); - emit L2ToL2CrossDomainMessenger.RelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, target, message)) - ); + emit L2ToL2CrossDomainMessenger.RelayedMessage(_source, _nonce, msgHash); // Ensure the target contract is called with the correct parameters vm.expectCall({ callee: target, msgValue: _value, data: message }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Call the relayMessage function - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, // ensure the destination is the chain of L2ToL2CrossDomainMessenger - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: target, - _message: message + // Construct and relay the message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, target, _nonce), // topics + abi.encode(_sender, message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); + // Check that successfulMessages mapping updates the message hash correctly - assertEq( - l2ToL2CrossDomainMessenger.successfulMessages( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, target, message)) - ), - true - ); + assertEq(l2ToL2CrossDomainMessenger.successfulMessages(msgHash), true); // Check that entered slot is cleared after the function call assertEq(l2ToL2CrossDomainMessenger.entered(), false); @@ -353,14 +367,14 @@ contract L2ToL2CrossDomainMessengerTest is Test { vm.expectRevert(ReentrantCall.selector); - l2ToL2CrossDomainMessenger.relayMessage({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: address(0), - _message: "" - }); + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, 1, 1, 1, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, address(0), _nonce), // topics + abi.encode(_sender, "") // data + ); + + l2ToL2CrossDomainMessenger.relayMessage(id, sentMessage); // Ensure the function still reverts if `expectRevert` succeeds revert(); @@ -373,7 +387,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _source2, // sender passed to `relayMessage` by the reentrant call. address _sender2, // sender passed to `relayMessage` by the reentrant call. uint256 _nonce, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -381,13 +398,6 @@ contract L2ToL2CrossDomainMessengerTest is Test { // contract has a non-zero balance. Thus, we set this contract's balance to zero and we hoax afterwards. vm.deal(address(this), 0); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - // Set the target and message for the reentrant call address target = address(this); bytes memory message = abi.encodeWithSelector(this.mockTargetReentrant.selector, _source2, _nonce, _sender2); @@ -395,25 +405,30 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.FailedRelayedMessage( - keccak256(abi.encode(block.chainid, _source1, _nonce, _sender1, target, message)) + _source1, _nonce, keccak256(abi.encode(block.chainid, _source1, _nonce, _sender1, target, message)) ); // Ensure the target contract is called with the correct parameters vm.expectCall({ callee: target, msgValue: _value, data: message }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Call the relayMessage function - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, // ensure the destination is the chain of L2ToL2CrossDomainMessenger - _source: _source1, - _nonce: _nonce, - _sender: _sender1, - _target: target, - _message: message + // Construct and relay the message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source1); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, target, _nonce), // topics + abi.encode(_sender1, message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); + // Check that entered slot is cleared after the function call assertEq(l2ToL2CrossDomainMessenger.entered(), false); @@ -424,70 +439,36 @@ contract L2ToL2CrossDomainMessengerTest is Test { assertEq(l2ToL2CrossDomainMessenger.crossDomainMessageSender(), address(0)); } - /// @dev Tests that the `relayMessage` function reverts when the caller is not the CrossL2Inbox contract. - function testFuzz_relayMessage_callerNotCrossL2Inbox_reverts( - uint256 _destination, + /// @dev Tests that the `relayMessage` function reverts when log identifier is not the cdm + function testFuzz_relayMessage_idOriginNotL2ToL2CrossDomainMessenger_reverts( uint256 _source, uint256 _nonce, address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + address _origin, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Add sufficient value to the contract to relay the message with - vm.deal(address(this), _value); + // Incorrect identifier origin + vm.assume(_origin != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); - // Expect a revert with the RelayMessageCallerNotCrossL2Inbox selector - vm.expectRevert(RelayMessageCallerNotCrossL2Inbox.selector); - - // Call `relayMessage` with the current contract as the caller to provoke revert - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); - } + // Expect a revert with the IdOriginNotL2ToL2CrossDomainMessenger + vm.expectRevert(IdOriginNotL2ToL2CrossDomainMessenger.selector); - /// @dev Tests that the `relayMessage` function reverts when CrossL2Inbox's origin is not - /// L2ToL2CrossDomainMessenger. - function testFuzz_relayMessage_crossL2InboxOriginNotL2ToL2CrossDomainMessenger_reverts( - uint256 _destination, - uint256 _source, - uint256 _nonce, - address _sender, - address _target, - bytes calldata _message, - uint256 _value - ) - external - { - // Set address(0) as the origin of the CrossL2Inbox contract, which is not the L2ToL2CrossDomainMessenger - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(address(0)) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Expect a revert with the CrossL2InboxOriginNotL2ToL2CrossDomainMessenger selector - vm.expectRevert(CrossL2InboxOriginNotL2ToL2CrossDomainMessenger.selector); + ICrossL2Inbox.Identifier memory id = ICrossL2Inbox.Identifier(_origin, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); - // Call `relayMessage` with invalid CrossL2Inbox origin to provoke revert - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); + // Call + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the destination is not the relay chain. @@ -498,35 +479,36 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { // Ensure the destination is not this chain vm.assume(_destination != block.chainid); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract + // Expect a revert with the MessageDestinationNotRelayChain selector + vm.expectRevert(MessageDestinationNotRelayChain.selector); + + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, _destination, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message vm.mockCall({ callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - - // Expect a revert with the MessageDestinationNotRelayChain selector - vm.expectRevert(MessageDestinationNotRelayChain.selector); - // Call `relayMessage` - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: _destination, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the message target is CrossL2Inbox. @@ -535,33 +517,37 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _nonce, address _sender, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Expect a revert with the MessageTargetCrossL2Inbox selector vm.expectRevert(MessageTargetCrossL2Inbox.selector); // Call `relayMessage` with CrossL2Inbox as the target to provoke revert. The current chain is the destination // to prevent revert due to invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: Predeploys.CROSS_L2_INBOX, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode( + L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, Predeploys.CROSS_L2_INBOX, _nonce + ), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + + // Call + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the message target is L2ToL2CrossDomainMessenger. @@ -570,33 +556,39 @@ contract L2ToL2CrossDomainMessengerTest is Test { uint256 _nonce, address _sender, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Expect a revert with the MessageTargetL2ToL2CrossDomainMessenger selector vm.expectRevert(MessageTargetL2ToL2CrossDomainMessenger.selector); // Call `relayMessage` with L2ToL2CrossDomainMessenger as the target to provoke revert. The current chain is the // destination to prevent revert due to invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode( + L2ToL2CrossDomainMessenger.SentMessage.selector, + block.chainid, + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, + _nonce + ), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the message has already been relayed. @@ -606,7 +598,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -622,48 +617,37 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract does not revert vm.mockCall({ callee: _target, msgValue: _value, data: _message, returnData: abi.encode(true) }); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Look for correct emitted event for first call. vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.RelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) + _source, _nonce, keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ); - // First call to `relayMessage` should succeed. The current chain is the destination to prevent revert due to - // invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); + // First call to `relayMessage` should succeed. The current chain is the destination to prevent revert due to + // invalid destination + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); // Second call should fail with MessageAlreadyRelayed selector vm.expectRevert(MessageAlreadyRelayed.selector); // Call `relayMessage` again. The current chain is the destination to prevent revert due to invalid destination - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message - }); + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `relayMessage` function reverts when the target call fails. @@ -673,7 +657,10 @@ contract L2ToL2CrossDomainMessengerTest is Test { address _sender, address _target, bytes calldata _message, - uint256 _value + uint256 _value, + uint256 _blockNum, + uint256 _logIndex, + uint256 _time ) external { @@ -686,30 +673,28 @@ contract L2ToL2CrossDomainMessengerTest is Test { // Ensure that the target contract reverts vm.mockCallRevert({ callee: _target, msgValue: _value, data: _message, revertData: abi.encode(false) }); - // Mock the CrossL2Inbox origin to return the L2ToL2CrossDomainMessenger contract - vm.mockCall({ - callee: Predeploys.CROSS_L2_INBOX, - data: abi.encodeWithSelector(CrossL2Inbox.origin.selector), - returnData: abi.encode(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER) - }); - - // Ensure caller is CrossL2Inbox to prevent a revert from the caller check and that it has sufficient value - hoax(Predeploys.CROSS_L2_INBOX, _value); - // Look for correct emitted event vm.expectEmit(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); emit L2ToL2CrossDomainMessenger.FailedRelayedMessage( - keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) + _source, _nonce, keccak256(abi.encode(block.chainid, _source, _nonce, _sender, _target, _message)) ); - l2ToL2CrossDomainMessenger.relayMessage{ value: _value }({ - _destination: block.chainid, - _source: _source, - _nonce: _nonce, - _sender: _sender, - _target: _target, - _message: _message + ICrossL2Inbox.Identifier memory id = + ICrossL2Inbox.Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _blockNum, _logIndex, _time, _source); + bytes memory sentMessage = abi.encodePacked( + abi.encode(L2ToL2CrossDomainMessenger.SentMessage.selector, block.chainid, _target, _nonce), // topics + abi.encode(_sender, _message) // data + ); + + // Ensure the CrossL2Inbox validates this message + vm.mockCall({ + callee: Predeploys.CROSS_L2_INBOX, + data: abi.encodeWithSelector(CrossL2Inbox.validateMessage.selector, id, sentMessage), + returnData: "" }); + + hoax(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, _value); + l2ToL2CrossDomainMessenger.relayMessage{ value: _value }(id, sentMessage); } /// @dev Tests that the `crossDomainMessageSender` function returns the correct value. From 644dc2b545dd40d62cc9c07697a9c1506663a9f7 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Fri, 27 Sep 2024 15:23:16 -0600 Subject: [PATCH 070/116] Cleanups for production, add standard addresses (#12169) * Cleanups for production, add standard addresses This PR: - Cleans up the intent file to be more suitable for production deployments - Fixes various bugs encountered while preparing `op-deployer` for use against predeployed OPCM contracts - Adds a new CLI command to bootstrap a new OPCM deployment against an existing set of implementation contracts Note on Solidity changes: - Since the code for custom gas tokens is in the monorepo but isn't included in an official contracts release yet, we had to add interfaces for the pre-CGT contracts to the Solidity codebase. - The `DeployImplementations` script looks at the release identifier to determine whether or not it should use the pre- or post-CGT interfaces. * goimports * lints * fix tests * revert tx manger changes * Update packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol Co-authored-by: Maurelian * Update packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol Co-authored-by: Maurelian * use new opcm * fix test * semver * semver * bump semver * update manager deployment * natspec * SEMVER --------- Co-authored-by: Maurelian --- op-chain-ops/cmd/op-deployer/main.go | 7 + op-chain-ops/deployer/bootstrap/bootstrap.go | 206 ++++++++++++++++++ op-chain-ops/deployer/bootstrap/flags.go | 41 ++++ op-chain-ops/deployer/broadcaster/keyed.go | 2 +- op-chain-ops/deployer/flags.go | 11 +- op-chain-ops/deployer/init.go | 5 +- .../deployer/integration_test/apply_test.go | 2 - op-chain-ops/deployer/opcm/implementations.go | 2 +- op-chain-ops/deployer/opcm/opchain.go | 2 + op-chain-ops/deployer/opcm/standard.go | 53 ++++- .../deployer/pipeline/implementations.go | 17 +- op-chain-ops/deployer/pipeline/init.go | 53 ++--- op-chain-ops/deployer/pipeline/opchain.go | 71 ++---- op-chain-ops/deployer/state/deploy_config.go | 7 + op-chain-ops/deployer/state/intent.go | 50 +++-- op-chain-ops/deployer/state/state.go | 4 +- op-chain-ops/interopgen/deploy.go | 2 +- op-chain-ops/script/script.go | 22 +- op-service/txmgr/txmgr.go | 6 + .../scripts/DeployImplementations.s.sol | 75 +++++-- .../scripts/DeployOPChain.s.sol | 4 +- packages/contracts-bedrock/semver-lock.json | 4 +- .../src/L1/OPContractsManager.sol | 6 +- .../IL1CrossDomainMessengerV160.sol | 21 ++ .../L1/interfaces/IL1StandardBridgeV160.sol | 75 +++++++ .../src/L1/interfaces/ISystemConfigV160.sol | 4 +- .../test/opcm/DeployImplementations.t.sol | 27 +-- .../test/opcm/DeployOPChain.t.sol | 1 + 28 files changed, 595 insertions(+), 185 deletions(-) create mode 100644 op-chain-ops/deployer/bootstrap/bootstrap.go create mode 100644 op-chain-ops/deployer/bootstrap/flags.go create mode 100644 packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol create mode 100644 packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol diff --git a/op-chain-ops/cmd/op-deployer/main.go b/op-chain-ops/cmd/op-deployer/main.go index 023d8adca39d..d6daf959c103 100644 --- a/op-chain-ops/cmd/op-deployer/main.go +++ b/op-chain-ops/cmd/op-deployer/main.go @@ -4,6 +4,8 @@ import ( "fmt" "os" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/bootstrap" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/version" opservice "github.com/ethereum-optimism/optimism/op-service" @@ -41,6 +43,11 @@ func main() { Flags: cliapp.ProtectFlags(deployer.ApplyFlags), Action: deployer.ApplyCLI(), }, + { + Name: "bootstrap", + Usage: "bootstraps global contract instances", + Subcommands: bootstrap.Commands, + }, { Name: "inspect", Usage: "inspects the state of a deployment", diff --git a/op-chain-ops/deployer/bootstrap/bootstrap.go b/op-chain-ops/deployer/bootstrap/bootstrap.go new file mode 100644 index 000000000000..5f1fc7db254e --- /dev/null +++ b/op-chain-ops/deployer/bootstrap/bootstrap.go @@ -0,0 +1,206 @@ +package bootstrap + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "math/big" + "strings" + + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" + "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/urfave/cli/v2" +) + +type OPCMConfig struct { + L1RPCUrl string + PrivateKey string + Logger log.Logger + ArtifactsURL *state.ArtifactsURL + ContractsRelease string + + privateKeyECDSA *ecdsa.PrivateKey +} + +func (c *OPCMConfig) Check() error { + if c.L1RPCUrl == "" { + return fmt.Errorf("l1RPCUrl must be specified") + } + + if c.PrivateKey == "" { + return fmt.Errorf("private key must be specified") + } + + privECDSA, err := crypto.HexToECDSA(strings.TrimPrefix(c.PrivateKey, "0x")) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + c.privateKeyECDSA = privECDSA + + if c.Logger == nil { + return fmt.Errorf("logger must be specified") + } + + if c.ArtifactsURL == nil { + return fmt.Errorf("artifacts URL must be specified") + } + + if c.ContractsRelease == "" { + return fmt.Errorf("contracts release must be specified") + } + + return nil +} + +func OPCMCLI(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + l := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) + oplog.SetGlobalLogHandler(l.Handler()) + + l1RPCUrl := cliCtx.String(deployer.L1RPCURLFlagName) + privateKey := cliCtx.String(deployer.PrivateKeyFlagName) + artifactsURLStr := cliCtx.String(ArtifactsURLFlagName) + artifactsURL := new(state.ArtifactsURL) + if err := artifactsURL.UnmarshalText([]byte(artifactsURLStr)); err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + contractsRelease := cliCtx.String(ContractsReleaseFlagName) + + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) + + return OPCM(ctx, OPCMConfig{ + L1RPCUrl: l1RPCUrl, + PrivateKey: privateKey, + Logger: l, + ArtifactsURL: artifactsURL, + ContractsRelease: contractsRelease, + }) +} + +func OPCM(ctx context.Context, cfg OPCMConfig) error { + if err := cfg.Check(); err != nil { + return fmt.Errorf("invalid config for OPCM: %w", err) + } + + lgr := cfg.Logger + progressor := func(curr, total int64) { + lgr.Info("artifacts download progress", "current", curr, "total", total) + } + + artifactsFS, cleanup, err := pipeline.DownloadArtifacts(ctx, cfg.ArtifactsURL, progressor) + if err != nil { + return fmt.Errorf("failed to download artifacts: %w", err) + } + defer func() { + if err := cleanup(); err != nil { + lgr.Warn("failed to clean up artifacts", "err", err) + } + }() + + l1Client, err := ethclient.Dial(cfg.L1RPCUrl) + if err != nil { + return fmt.Errorf("failed to connect to L1 RPC: %w", err) + } + + chainID, err := l1Client.ChainID(ctx) + if err != nil { + return fmt.Errorf("failed to get chain ID: %w", err) + } + chainIDU64 := chainID.Uint64() + + superCfg, err := opcm.SuperchainFor(chainIDU64) + if err != nil { + return fmt.Errorf("error getting superchain config: %w", err) + } + standardVersionsTOML, err := opcm.StandardVersionsFor(chainIDU64) + if err != nil { + return fmt.Errorf("error getting standard versions TOML: %w", err) + } + opcmProxyOwnerAddr, err := opcm.ManagerOwnerAddrFor(chainIDU64) + if err != nil { + return fmt.Errorf("error getting superchain proxy admin: %w", err) + } + + signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) + chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) + + lgr.Info("deploying OPCM", "release", cfg.ContractsRelease) + + var dio opcm.DeployImplementationsOutput + err = pipeline.CallScriptBroadcast( + ctx, + pipeline.CallScriptBroadcastOpts{ + L1ChainID: chainID, + Logger: lgr, + ArtifactsFS: artifactsFS, + Deployer: chainDeployer, + Signer: signer, + Client: l1Client, + Broadcaster: pipeline.KeyedBroadcaster, + Handler: func(host *script.Host) error { + // We need to etch the Superchain addresses so that they have nonzero code + // and the checks in the OPCM constructor pass. + superchainConfigAddr := common.Address(*superCfg.Config.SuperchainConfigAddr) + protocolVersionsAddr := common.Address(*superCfg.Config.ProtocolVersionsAddr) + addresses := []common.Address{ + superchainConfigAddr, + protocolVersionsAddr, + } + for _, addr := range addresses { + host.ImportAccount(addr, types.Account{ + Code: []byte{0x00}, + }) + } + + var salt common.Hash + _, err = rand.Read(salt[:]) + if err != nil { + return fmt.Errorf("failed to generate CREATE2 salt: %w", err) + } + + dio, err = opcm.DeployImplementations( + host, + opcm.DeployImplementationsInput{ + Salt: salt, + WithdrawalDelaySeconds: big.NewInt(604800), + MinProposalSizeBytes: big.NewInt(126000), + ChallengePeriodSeconds: big.NewInt(86400), + ProofMaturityDelaySeconds: big.NewInt(604800), + DisputeGameFinalityDelaySeconds: big.NewInt(302400), + Release: cfg.ContractsRelease, + SuperchainConfigProxy: superchainConfigAddr, + ProtocolVersionsProxy: protocolVersionsAddr, + OpcmProxyOwner: opcmProxyOwnerAddr, + StandardVersionsToml: standardVersionsTOML, + UseInterop: false, + }, + ) + return err + }, + }, + ) + if err != nil { + return fmt.Errorf("error deploying implementations: %w", err) + } + + lgr.Info("deployed implementations") + + if err := jsonutil.WriteJSON(dio, ioutil.ToStdOut()); err != nil { + return fmt.Errorf("failed to write output: %w", err) + } + return nil +} diff --git a/op-chain-ops/deployer/bootstrap/flags.go b/op-chain-ops/deployer/bootstrap/flags.go new file mode 100644 index 000000000000..edb784da9fce --- /dev/null +++ b/op-chain-ops/deployer/bootstrap/flags.go @@ -0,0 +1,41 @@ +package bootstrap + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/deployer" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/urfave/cli/v2" +) + +const ( + ArtifactsURLFlagName = "artifacts-url" + ContractsReleaseFlagName = "contracts-release" +) + +var ( + ArtifactsURLFlag = &cli.StringFlag{ + Name: ArtifactsURLFlagName, + Usage: "URL to the artifacts directory.", + EnvVars: deployer.PrefixEnvVar("ARTIFACTS_URL"), + } + ContractsReleaseFlag = &cli.StringFlag{ + Name: ContractsReleaseFlagName, + Usage: "Release of the contracts to deploy.", + EnvVars: deployer.PrefixEnvVar("CONTRACTS_RELEASE"), + } +) + +var OPCMFlags = []cli.Flag{ + deployer.L1RPCURLFlag, + deployer.PrivateKeyFlag, + ArtifactsURLFlag, + ContractsReleaseFlag, +} + +var Commands = []*cli.Command{ + { + Name: "opcm", + Usage: "Bootstrap an instance of OPCM.", + Flags: cliapp.ProtectFlags(OPCMFlags), + Action: OPCMCLI, + }, +} diff --git a/op-chain-ops/deployer/broadcaster/keyed.go b/op-chain-ops/deployer/broadcaster/keyed.go index 63b72010042b..879d38b329b9 100644 --- a/op-chain-ops/deployer/broadcaster/keyed.go +++ b/op-chain-ops/deployer/broadcaster/keyed.go @@ -66,7 +66,7 @@ func NewKeyedBroadcaster(cfg KeyedBroadcasterOpts) (*KeyedBroadcaster, error) { mgrCfg.FeeLimitMultiplier.Store(5) mgrCfg.FeeLimitThreshold.Store(big.NewInt(100)) mgrCfg.MinTipCap.Store(minTipCap) - mgrCfg.MinTipCap.Store(minBaseFee) + mgrCfg.MinBaseFee.Store(minBaseFee) txmLogger := log.NewLogger(log.DiscardHandler()) if cfg.TXManagerLogger != nil { diff --git a/op-chain-ops/deployer/flags.go b/op-chain-ops/deployer/flags.go index e0ab864bdada..c0f2ba92f14b 100644 --- a/op-chain-ops/deployer/flags.go +++ b/op-chain-ops/deployer/flags.go @@ -30,28 +30,27 @@ var ( L1ChainIDFlag = &cli.Uint64Flag{ Name: L1ChainIDFlagName, Usage: "Chain ID of the L1 chain.", - EnvVars: prefixEnvVar("L1_CHAIN_ID"), + EnvVars: PrefixEnvVar("L1_CHAIN_ID"), Value: 900, } L2ChainIDsFlag = &cli.StringFlag{ Name: L2ChainIDsFlagName, Usage: "Comma-separated list of L2 chain IDs to deploy.", - EnvVars: prefixEnvVar("L2_CHAIN_IDS"), + EnvVars: PrefixEnvVar("L2_CHAIN_IDS"), } WorkdirFlag = &cli.StringFlag{ Name: WorkdirFlagName, Usage: "Directory storing intent and stage. Defaults to the current directory.", - EnvVars: prefixEnvVar("WORKDIR"), + EnvVars: PrefixEnvVar("WORKDIR"), Value: cwd(), Aliases: []string{ OutdirFlagName, }, } - PrivateKeyFlag = &cli.StringFlag{ Name: PrivateKeyFlagName, Usage: "Private key of the deployer account.", - EnvVars: prefixEnvVar("PRIVATE_KEY"), + EnvVars: PrefixEnvVar("PRIVATE_KEY"), } ) @@ -69,7 +68,7 @@ var ApplyFlags = []cli.Flag{ PrivateKeyFlag, } -func prefixEnvVar(name string) []string { +func PrefixEnvVar(name string) []string { return op_service.PrefixEnvVar(EnvVarPrefix, name) } diff --git a/op-chain-ops/deployer/init.go b/op-chain-ops/deployer/init.go index bd79f980cdff..a74f7ffa69bd 100644 --- a/op-chain-ops/deployer/init.go +++ b/op-chain-ops/deployer/init.go @@ -41,8 +41,8 @@ func InitCLI() func(ctx *cli.Context) error { outdir := ctx.String(OutdirFlagName) l2ChainIDsRaw := ctx.String(L2ChainIDsFlagName) - l2ChainIDsStr := strings.Split(l2ChainIDsRaw, ",") - l2ChainIDs := make([]common.Hash, 0, len(l2ChainIDsStr)) + l2ChainIDsStr := strings.Split(strings.TrimSpace(l2ChainIDsRaw), ",") + l2ChainIDs := make([]common.Hash, len(l2ChainIDsStr)) for _, idStr := range l2ChainIDsStr { id, err := op_service.Parse256BitChainID(idStr) if err != nil { @@ -66,7 +66,6 @@ func Init(cfg InitConfig) error { intent := &state.Intent{ L1ChainID: cfg.L1ChainID, - UseFaultProofs: true, FundDevAccounts: true, ContractsRelease: "dev", } diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index a9425f36d670..be4ef80e6374 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -198,7 +198,6 @@ func makeIntent( ProtocolVersionsOwner: addrFor(devkeys.SuperchainDeployerKey.Key(l1ChainID)), Guardian: addrFor(devkeys.SuperchainConfigGuardianKey.Key(l1ChainID)), }, - UseFaultProofs: true, FundDevAccounts: true, ContractArtifactsURL: (*state.ArtifactsURL)(artifactsURL), ContractsRelease: "dev", @@ -239,7 +238,6 @@ func validateOPChainDeployment(t *testing.T, ctx context.Context, l1Client *ethc {"OptimismPortalProxyAddress", chainState.OptimismPortalProxyAddress}, {"DisputeGameFactoryProxyAddress", chainState.DisputeGameFactoryProxyAddress}, {"AnchorStateRegistryProxyAddress", chainState.AnchorStateRegistryProxyAddress}, - {"AnchorStateRegistryImplAddress", chainState.AnchorStateRegistryImplAddress}, {"FaultDisputeGameAddress", chainState.FaultDisputeGameAddress}, {"PermissionedDisputeGameAddress", chainState.PermissionedDisputeGameAddress}, {"DelayedWETHPermissionedGameProxyAddress", chainState.DelayedWETHPermissionedGameProxyAddress}, diff --git a/op-chain-ops/deployer/opcm/implementations.go b/op-chain-ops/deployer/opcm/implementations.go index 1d88c9b74398..0c61658dc429 100644 --- a/op-chain-ops/deployer/opcm/implementations.go +++ b/op-chain-ops/deployer/opcm/implementations.go @@ -22,7 +22,7 @@ type DeployImplementationsInput struct { ProtocolVersionsProxy common.Address UseInterop bool // if true, deploy Interop implementations - SuperchainProxyAdmin common.Address + OpcmProxyOwner common.Address StandardVersionsToml string // contents of 'standard-versions-mainnet.toml' or 'standard-versions-sepolia.toml' file } diff --git a/op-chain-ops/deployer/opcm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go index 512b133c5876..ac118302e5a9 100644 --- a/op-chain-ops/deployer/opcm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -123,6 +123,7 @@ type opcmDeployInput struct { BlobBasefeeScalar uint32 L2ChainId *big.Int StartingAnchorRoots []byte + SaltMixer string } // decodeOutputABIJSON defines an ABI for a fake method called "decodeOutput" that returns the @@ -241,6 +242,7 @@ func DeployOPChainRaw( BlobBasefeeScalar: input.BlobBaseFeeScalar, L2ChainId: input.L2ChainId, StartingAnchorRoots: input.StartingAnchorRoots(), + SaltMixer: input.SaltMixer, }) if err != nil { return out, fmt.Errorf("failed to pack deploy input: %w", err) diff --git a/op-chain-ops/deployer/opcm/standard.go b/op-chain-ops/deployer/opcm/standard.go index c82e5de12a32..51de8a483fa7 100644 --- a/op-chain-ops/deployer/opcm/standard.go +++ b/op-chain-ops/deployer/opcm/standard.go @@ -1,6 +1,12 @@ package opcm -import "embed" +import ( + "embed" + "fmt" + + "github.com/ethereum-optimism/superchain-registry/superchain" + "github.com/ethereum/go-ethereum/common" +) //go:embed standard-versions-mainnet.toml var StandardVersionsMainnetData string @@ -9,3 +15,48 @@ var StandardVersionsMainnetData string var StandardVersionsSepoliaData string var _ embed.FS + +func StandardVersionsFor(chainID uint64) (string, error) { + switch chainID { + case 1: + return StandardVersionsMainnetData, nil + case 11155111: + return StandardVersionsSepoliaData, nil + default: + return "", fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func SuperchainFor(chainID uint64) (*superchain.Superchain, error) { + switch chainID { + case 1: + return superchain.Superchains["mainnet"], nil + case 11155111: + return superchain.Superchains["sepolia"], nil + default: + return nil, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func ManagerImplementationAddrFor(chainID uint64) (common.Address, error) { + switch chainID { + case 11155111: + // Generated using the bootstrap command on 09/26/2024. + return common.HexToAddress("0x0dc727671d5c08e4e41e8909983ebfa6f57aa0bf"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} + +func ManagerOwnerAddrFor(chainID uint64) (common.Address, error) { + switch chainID { + case 1: + // Set to superchain proxy admin + return common.HexToAddress("0x543bA4AADBAb8f9025686Bd03993043599c6fB04"), nil + case 11155111: + // Set to development multisig + return common.HexToAddress("0xDEe57160aAfCF04c34C887B5962D0a69676d3C8B"), nil + default: + return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) + } +} diff --git a/op-chain-ops/deployer/pipeline/implementations.go b/op-chain-ops/deployer/pipeline/implementations.go index d54d64abc564..12000be720ec 100644 --- a/op-chain-ops/deployer/pipeline/implementations.go +++ b/op-chain-ops/deployer/pipeline/implementations.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "strings" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" @@ -21,9 +22,17 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St lgr.Info("deploying implementations") + var standardVersionsTOML string + var err error + if strings.HasPrefix(intent.ContractsRelease, "op-contracts") { + standardVersionsTOML, err = opcm.StandardVersionsFor(intent.L1ChainID) + if err != nil { + return fmt.Errorf("error getting standard versions TOML: %w", err) + } + } + var dump *foundry.ForgeAllocs var dio opcm.DeployImplementationsOutput - var err error err = CallScriptBroadcast( ctx, CallScriptBroadcastOpts{ @@ -35,8 +44,8 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St Client: env.L1Client, Broadcaster: KeyedBroadcaster, Handler: func(host *script.Host) error { - host.SetEnvVar("IMPL_SALT", st.Create2Salt.Hex()[2:]) host.ImportState(st.SuperchainDeployment.StateDump) + dio, err = opcm.DeployImplementations( host, opcm.DeployImplementationsInput{ @@ -49,8 +58,8 @@ func DeployImplementations(ctx context.Context, env *Env, artifactsFS foundry.St Release: intent.ContractsRelease, SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, - SuperchainProxyAdmin: st.SuperchainDeployment.ProxyAdminAddress, - StandardVersionsToml: opcm.StandardVersionsMainnetData, + OpcmProxyOwner: st.SuperchainDeployment.ProxyAdminAddress, + StandardVersionsToml: standardVersionsTOML, UseInterop: false, }, ) diff --git a/op-chain-ops/deployer/pipeline/init.go b/op-chain-ops/deployer/pipeline/init.go index a680c7fdb48f..d7009e117269 100644 --- a/op-chain-ops/deployer/pipeline/init.go +++ b/op-chain-ops/deployer/pipeline/init.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "fmt" + "strings" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" @@ -19,7 +20,7 @@ func IsSupportedStateVersion(version int) bool { return version == 1 } -func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent *state.Intent, st *state.State) error { +func Init(ctx context.Context, env *Env, _ foundry.StatDirFs, intent *state.Intent, st *state.State) error { lgr := env.Logger.New("stage", "init") lgr.Info("initializing pipeline") @@ -35,37 +36,31 @@ func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent * } } - if intent.OPCMAddress != (common.Address{}) { - env.Logger.Info("using provided OPCM address, populating state", "address", intent.OPCMAddress.Hex()) - - if intent.ContractsRelease == "dev" { - env.Logger.Warn("using dev release with existing OPCM, this field will be ignored") - } - - opcmContract := opcm.NewContract(intent.OPCMAddress, env.L1Client) - protocolVersions, err := opcmContract.ProtocolVersions(ctx) + if strings.HasPrefix(intent.ContractsRelease, "op-contracts") { + superCfg, err := opcm.SuperchainFor(intent.L1ChainID) if err != nil { - return fmt.Errorf("error getting protocol versions address: %w", err) + return fmt.Errorf("error getting superchain config: %w", err) } - superchainConfig, err := opcmContract.SuperchainConfig(ctx) + + proxyAdmin, err := opcm.ManagerOwnerAddrFor(intent.L1ChainID) if err != nil { - return fmt.Errorf("error getting superchain config address: %w", err) + return fmt.Errorf("error getting superchain proxy admin address: %w", err) } - env.Logger.Debug( - "populating protocol versions and superchain config addresses", - "protocolVersions", protocolVersions.Hex(), - "superchainConfig", superchainConfig.Hex(), - ) - - // The below fields are the only ones required to perform an OP Chain - // deployment via an existing OPCM contract. All the others are used - // for deploying the OPCM itself, which isn't necessary in this case. + + // Have to do this weird pointer thing below because the Superchain Registry defines its + // own Address type. st.SuperchainDeployment = &state.SuperchainDeployment{ - ProtocolVersionsProxyAddress: protocolVersions, - SuperchainConfigProxyAddress: superchainConfig, + ProxyAdminAddress: proxyAdmin, + ProtocolVersionsProxyAddress: common.Address(*superCfg.Config.ProtocolVersionsAddr), + SuperchainConfigProxyAddress: common.Address(*superCfg.Config.SuperchainConfigAddr), + } + + opcmProxy, err := opcm.ManagerImplementationAddrFor(intent.L1ChainID) + if err != nil { + return fmt.Errorf("error getting OPCM proxy address: %w", err) } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpcmProxyAddress: intent.OPCMAddress, + OpcmProxyAddress: opcmProxy, } } @@ -81,14 +76,6 @@ func Init(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, intent * return immutableErr("L1ChainID", st.AppliedIntent.L1ChainID, intent.L1ChainID) } - if st.AppliedIntent.UseFaultProofs != intent.UseFaultProofs { - return immutableErr("useFaultProofs", st.AppliedIntent.UseFaultProofs, intent.UseFaultProofs) - } - - if st.AppliedIntent.UseAltDA != intent.UseAltDA { - return immutableErr("useAltDA", st.AppliedIntent.UseAltDA, intent.UseAltDA) - } - if st.AppliedIntent.FundDevAccounts != intent.FundDevAccounts { return immutableErr("fundDevAccounts", st.AppliedIntent.FundDevAccounts, intent.FundDevAccounts) } diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index a7bb0d6a96b8..cc375382b2f7 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/opcm" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" - "github.com/ethereum-optimism/optimism/op-chain-ops/script" "github.com/ethereum/go-ethereum/common" ) @@ -44,55 +43,27 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, } var dco opcm.DeployOPChainOutput - if intent.OPCMAddress == (common.Address{}) { - err = CallScriptBroadcast( - ctx, - CallScriptBroadcastOpts{ - L1ChainID: big.NewInt(int64(intent.L1ChainID)), - Logger: lgr, - ArtifactsFS: artifactsFS, - Deployer: env.Deployer, - Signer: env.Signer, - Client: env.L1Client, - Broadcaster: KeyedBroadcaster, - Handler: func(host *script.Host) error { - host.ImportState(st.ImplementationsDeployment.StateDump) - - dco, err = opcm.DeployOPChain( - host, - input, - ) - return err - }, - }, - ) - if err != nil { - return fmt.Errorf("error deploying OP chain: %w", err) - } - } else { - lgr.Info("deploying using existing OPCM", "address", intent.OPCMAddress.Hex()) - - bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ - Logger: lgr, - ChainID: big.NewInt(int64(intent.L1ChainID)), - Client: env.L1Client, - Signer: env.Signer, - From: env.Deployer, - }) - if err != nil { - return fmt.Errorf("failed to create broadcaster: %w", err) - } - dco, err = opcm.DeployOPChainRaw( - ctx, - env.L1Client, - bcaster, - env.Deployer, - artifactsFS, - input, - ) - if err != nil { - return fmt.Errorf("error deploying OP chain: %w", err) - } + lgr.Info("deploying using existing OPCM", "address", st.ImplementationsDeployment.OpcmProxyAddress.Hex()) + bcaster, err := broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: big.NewInt(int64(intent.L1ChainID)), + Client: env.L1Client, + Signer: env.Signer, + From: env.Deployer, + }) + if err != nil { + return fmt.Errorf("failed to create broadcaster: %w", err) + } + dco, err = opcm.DeployOPChainRaw( + ctx, + env.L1Client, + bcaster, + env.Deployer, + artifactsFS, + input, + ) + if err != nil { + return fmt.Errorf("error deploying OP chain: %w", err) } st.Chains = append(st.Chains, &state.ChainState{ diff --git a/op-chain-ops/deployer/state/deploy_config.go b/op-chain-ops/deployer/state/deploy_config.go index 81801e5865cb..5ea8590f537f 100644 --- a/op-chain-ops/deployer/state/deploy_config.go +++ b/op-chain-ops/deployer/state/deploy_config.go @@ -65,6 +65,13 @@ func DefaultDeployConfig() genesis.DeployConfig { SystemConfigStartBlock: 0, }, }, + FaultProofDeployConfig: genesis.FaultProofDeployConfig{ + FaultGameWithdrawalDelay: 604800, + PreimageOracleMinProposalSize: 126000, + PreimageOracleChallengePeriod: 86400, + ProofMaturityDelaySeconds: 604800, + DisputeGameFinalityDelaySeconds: 302400, + }, } } diff --git a/op-chain-ops/deployer/state/intent.go b/op-chain-ops/deployer/state/intent.go index 755ad6bbba54..b07a6c2acff4 100644 --- a/op-chain-ops/deployer/state/intent.go +++ b/op-chain-ops/deployer/state/intent.go @@ -17,17 +17,11 @@ type Intent struct { SuperchainRoles SuperchainRoles `json:"superchainRoles" toml:"superchainRoles"` - UseFaultProofs bool `json:"useFaultProofs" toml:"useFaultProofs"` - - UseAltDA bool `json:"useAltDA" toml:"useAltDA"` - FundDevAccounts bool `json:"fundDevAccounts" toml:"fundDevAccounts"` ContractArtifactsURL *ArtifactsURL `json:"contractArtifactsURL" toml:"contractArtifactsURL"` - ContractsRelease string `json:"contractsVersion" toml:"contractsVersion"` - - OPCMAddress common.Address `json:"opcmAddress" toml:"opcmAddress"` + ContractsRelease string `json:"contractsRelease" toml:"contractsRelease"` Chains []*ChainIntent `json:"chains" toml:"chains"` @@ -43,10 +37,28 @@ func (c *Intent) Check() error { return fmt.Errorf("l1ChainID must be set") } - if c.UseFaultProofs && c.UseAltDA { - return fmt.Errorf("cannot use both fault proofs and alt-DA") + if c.ContractsRelease == "dev" { + return c.checkDev() } + return c.checkProd() +} + +func (c *Intent) Chain(id common.Hash) (*ChainIntent, error) { + for i := range c.Chains { + if c.Chains[i].ID == id { + return c.Chains[i], nil + } + } + + return nil, fmt.Errorf("chain %d not found", id) +} + +func (c *Intent) WriteToFile(path string) error { + return jsonutil.WriteTOML(c, ioutil.ToAtomicFile(path, 0o755)) +} + +func (c *Intent) checkDev() error { if c.SuperchainRoles.ProxyAdminOwner == emptyAddress { return fmt.Errorf("proxyAdminOwner must be set") } @@ -60,28 +72,18 @@ func (c *Intent) Check() error { } if c.ContractArtifactsURL == nil { - return fmt.Errorf("contractArtifactsURL must be set") - } - - if c.ContractsRelease != "dev" && !strings.HasPrefix(c.ContractsRelease, "op-contracts/") { - return fmt.Errorf("contractsVersion must be either the literal \"dev\" or start with \"op-contracts/\"") + return fmt.Errorf("contractArtifactsURL must be set in dev mode") } return nil } -func (c *Intent) Chain(id common.Hash) (*ChainIntent, error) { - for i := range c.Chains { - if c.Chains[i].ID == id { - return c.Chains[i], nil - } +func (c *Intent) checkProd() error { + if !strings.HasPrefix(c.ContractsRelease, "op-contracts/") { + return fmt.Errorf("contractsVersion must be either the literal \"dev\" or start with \"op-contracts/\"") } - return nil, fmt.Errorf("chain %d not found", id) -} - -func (c *Intent) WriteToFile(path string) error { - return jsonutil.WriteTOML(c, ioutil.ToAtomicFile(path, 0o755)) + return nil } type SuperchainRoles struct { diff --git a/op-chain-ops/deployer/state/state.go b/op-chain-ops/deployer/state/state.go index 674e06d743a0..bc4d4c6f50e4 100644 --- a/op-chain-ops/deployer/state/state.go +++ b/op-chain-ops/deployer/state/state.go @@ -61,7 +61,7 @@ type SuperchainDeployment struct { SuperchainConfigImplAddress common.Address `json:"superchainConfigImplAddress"` ProtocolVersionsProxyAddress common.Address `json:"protocolVersionsProxyAddress"` ProtocolVersionsImplAddress common.Address `json:"protocolVersionsImplAddress"` - StateDump *foundry.ForgeAllocs `json:"stateDump"` + StateDump *foundry.ForgeAllocs `json:"-"` } type ImplementationsDeployment struct { @@ -76,7 +76,7 @@ type ImplementationsDeployment struct { L1StandardBridgeImplAddress common.Address `json:"l1StandardBridgeImplAddress"` OptimismMintableERC20FactoryImplAddress common.Address `json:"optimismMintableERC20FactoryImplAddress"` DisputeGameFactoryImplAddress common.Address `json:"disputeGameFactoryImplAddress"` - StateDump *foundry.ForgeAllocs `json:"stateDump"` + StateDump *foundry.ForgeAllocs `json:"-"` } type ChainState struct { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 6701e9c940c2..7550b599aa94 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -169,7 +169,7 @@ func DeploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup Release: superCfg.Implementations.Release, SuperchainConfigProxy: superDeployment.SuperchainConfigProxy, ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, - SuperchainProxyAdmin: superDeployment.SuperchainProxyAdmin, + OpcmProxyOwner: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, StandardVersionsToml: opcm.StandardVersionsMainnetData, }) diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index d156adb67187..8402418ba788 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -391,12 +391,22 @@ func (h *Host) GetNonce(addr common.Address) uint64 { // when importing. func (h *Host) ImportState(allocs *foundry.ForgeAllocs) { for addr, alloc := range allocs.Accounts { - h.state.SetBalance(addr, uint256.MustFromBig(alloc.Balance), tracing.BalanceChangeUnspecified) - h.state.SetNonce(addr, alloc.Nonce) - h.state.SetCode(addr, alloc.Code) - for key, value := range alloc.Storage { - h.state.SetState(addr, key, value) - } + h.ImportAccount(addr, alloc) + } +} + +func (h *Host) ImportAccount(addr common.Address, account types.Account) { + var balance *uint256.Int + if account.Balance == nil { + balance = uint256.NewInt(0) + } else { + balance = uint256.MustFromBig(account.Balance) + } + h.state.SetBalance(addr, balance, tracing.BalanceChangeUnspecified) + h.state.SetNonce(addr, account.Nonce) + h.state.SetCode(addr, account.Code) + for key, value := range account.Storage { + h.state.SetState(addr, key, value) } } diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index e8e8ae1d1883..4e4c3e633f87 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -819,6 +819,12 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa "gasFeeCap", bumpedFee, "gasTipCap", bumpedTip) } + if tx.Gas() > gas { + // Don't bump the gas limit down if the passed-in gas limit is higher than + // what was originally specified. + gas = tx.Gas() + } + var newTx *types.Transaction if tx.Type() == types.BlobTxType { // Blob transactions have an additional blob gas price we must specify, so we must make sure it is diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 5a0e6af0005e..12659dbdc814 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -8,6 +8,8 @@ import { LibString } from "@solady/utils/LibString.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; +import { IL1CrossDomainMessengerV160 } from "src/L1/interfaces/IL1CrossDomainMessengerV160.sol"; +import { IL1StandardBridgeV160 } from "src/L1/interfaces/IL1StandardBridgeV160.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -65,6 +67,8 @@ contract DeployImplementationsInput is BaseDeployIO { string internal _standardVersionsToml; + address internal _opcmProxyOwner; + function set(bytes4 _sel, uint256 _value) public { require(_value != 0, "DeployImplementationsInput: cannot set zero value"); @@ -95,6 +99,7 @@ contract DeployImplementationsInput is BaseDeployIO { require(_addr != address(0), "DeployImplementationsInput: cannot set zero address"); if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_addr); else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_addr); + else if (_sel == this.opcmProxyOwner.selector) _opcmProxyOwner = _addr; else revert("DeployImplementationsInput: unknown selector"); } @@ -156,13 +161,9 @@ contract DeployImplementationsInput is BaseDeployIO { return _protocolVersionsProxy; } - function superchainProxyAdmin() public returns (ProxyAdmin) { - SuperchainConfig proxy = this.superchainConfigProxy(); - // Can infer the superchainProxyAdmin from the superchainConfigProxy. - vm.prank(address(0)); - ProxyAdmin proxyAdmin = ProxyAdmin(Proxy(payable(address(proxy))).admin()); - require(address(proxyAdmin) != address(0), "DeployImplementationsInput: not set"); - return proxyAdmin; + function opcmProxyOwner() public view returns (address) { + require(address(_opcmProxyOwner) != address(0), "DeployImplementationsInput: not set"); + return _opcmProxyOwner; } } @@ -308,7 +309,7 @@ contract DeployImplementationsOutput is BaseDeployIO { Proxy proxy = Proxy(payable(address(opcmProxy()))); vm.prank(address(0)); address admin = proxy.admin(); - require(admin == address(_dii.superchainProxyAdmin()), "OPCMP-10"); + require(admin == address(_dii.opcmProxyOwner()), "OPCMP-10"); // Then we check the proxy as OPCM. DeployUtils.assertInitialized({ _contractAddress: address(opcmProxy()), _slot: 0, _offset: 0 }); @@ -504,6 +505,42 @@ contract DeployImplementations is Script { }); } + function l1CrossDomainMessengerConfigSetter( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + internal + view + virtual + returns (OPContractsManager.ImplementationSetter memory) + { + bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") + ? IL1CrossDomainMessengerV160.initialize.selector + : L1CrossDomainMessenger.initialize.selector; + return OPContractsManager.ImplementationSetter({ + name: "L1CrossDomainMessenger", + info: OPContractsManager.Implementation(address(_dio.l1CrossDomainMessengerImpl()), selector) + }); + } + + function l1StandardBridgeConfigSetter( + DeployImplementationsInput _dii, + DeployImplementationsOutput _dio + ) + internal + view + virtual + returns (OPContractsManager.ImplementationSetter memory) + { + bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") + ? IL1StandardBridgeV160.initialize.selector + : L1StandardBridge.initialize.selector; + return OPContractsManager.ImplementationSetter({ + name: "L1StandardBridge", + info: OPContractsManager.Implementation(address(_dio.l1StandardBridgeImpl()), selector) + }); + } + // Deploy and initialize a proxied OPContractsManager. function createOPCMContract( DeployImplementationsInput _dii, @@ -516,7 +553,7 @@ contract DeployImplementations is Script { virtual returns (OPContractsManager opcmProxy_) { - ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); + address opcmProxyOwner = _dii.opcmProxyOwner(); vm.broadcast(msg.sender); Proxy proxy = new Proxy(address(msg.sender)); @@ -532,7 +569,7 @@ contract DeployImplementations is Script { address(opcmImpl), abi.encodeWithSelector(opcmImpl.initialize.selector, initializerInputs) ); - proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract + proxy.changeAdmin(address(opcmProxyOwner)); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); opcmProxy_ = OPContractsManager(address(proxy)); @@ -579,18 +616,8 @@ contract DeployImplementations is Script { address(_dio.optimismMintableERC20FactoryImpl()), OptimismMintableERC20Factory.initialize.selector ) }); - setters[4] = OPContractsManager.ImplementationSetter({ - name: "L1CrossDomainMessenger", - info: OPContractsManager.Implementation( - address(_dio.l1CrossDomainMessengerImpl()), L1CrossDomainMessenger.initialize.selector - ) - }); - setters[5] = OPContractsManager.ImplementationSetter({ - name: "L1StandardBridge", - info: OPContractsManager.Implementation( - address(_dio.l1StandardBridgeImpl()), L1StandardBridge.initialize.selector - ) - }); + setters[4] = l1CrossDomainMessengerConfigSetter(_dii, _dio); + setters[5] = l1StandardBridgeConfigSetter(_dii, _dio); setters[6] = OPContractsManager.ImplementationSetter({ name: "DisputeGameFactory", info: OPContractsManager.Implementation( @@ -1036,7 +1063,7 @@ contract DeployImplementationsInterop is DeployImplementations { override returns (OPContractsManager opcmProxy_) { - ProxyAdmin proxyAdmin = _dii.superchainProxyAdmin(); + address opcmProxyOwner = _dii.opcmProxyOwner(); vm.broadcast(msg.sender); Proxy proxy = new Proxy(address(msg.sender)); @@ -1052,7 +1079,7 @@ contract DeployImplementationsInterop is DeployImplementations { address(opcmImpl), abi.encodeWithSelector(opcmImpl.initialize.selector, initializerInputs) ); - proxy.changeAdmin(address(proxyAdmin)); // transfer ownership of Proxy contract to the ProxyAdmin contract + proxy.changeAdmin(opcmProxyOwner); // transfer ownership of Proxy contract to the ProxyAdmin contract vm.stopBroadcast(); opcmProxy_ = OPContractsManagerInterop(address(proxy)); diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index ddf9f39f3023..d4de3636d245 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -471,7 +471,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(vm.load(address(portal), bytes32(uint256(61))) == bytes32(0)); } - function assertValidDisputeGameFactory(DeployOPChainInput) internal view { + function assertValidDisputeGameFactory(DeployOPChainInput _doi) internal view { DisputeGameFactory factory = disputeGameFactoryProxy(); DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); @@ -479,7 +479,7 @@ contract DeployOPChainOutput is BaseDeployIO { require( address(factory.gameImpls(GameTypes.PERMISSIONED_CANNON)) == address(permissionedDisputeGame()), "DF-10" ); - require(factory.owner() == address(opChainProxyAdmin()), "DF-20"); + require(factory.owner() == address(_doi.opChainProxyAdminOwner()), "DF-20"); } function assertValidDelayedWETHs(DeployOPChainInput) internal view { diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 87d291b2031a..cbd9611d6800 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x944deadee322fdbae8a8fffd16deceb3766509cfb54da06adb8aa84473f79f53", - "sourceCodeHash": "0x1a48119cbc0b778a4dd3454179060b71361ba44b61af1ac6398cc9274bb5e89f" + "initCodeHash": "0x7c5d90928ce882ed5360939722271e9af36e81c394e4110ba32864b14c3d78be", + "sourceCodeHash": "0x25372ad554eaeb64d7512e19642210bb3736e4047ea97518b2992b3ab67e1a5d" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 7107fd4fe091..e2707b4204d1 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -128,8 +128,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.14 - string public constant version = "1.0.0-beta.14"; + /// @custom:semver 1.0.0-beta.15 + string public constant version = "1.0.0-beta.15"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -342,7 +342,7 @@ contract OPContractsManager is ISemver, Initializable { output.disputeGameFactoryProxy.setImplementation( GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(output.permissionedDisputeGame)) ); - output.disputeGameFactoryProxy.transferOwnership(address(output.opChainProxyAdmin)); + output.disputeGameFactoryProxy.transferOwnership(address(_input.roles.opChainProxyAdminOwner)); impl.logic = address(output.anchorStateRegistryImpl); impl.initializer = AnchorStateRegistry.initialize.selector; diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol new file mode 100644 index 000000000000..a1023100d92d --- /dev/null +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessengerV160.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; + +/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the L1CrossDomainMessenger +/// contract, which has a semver of 2.3.0 as specified in +/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +interface IL1CrossDomainMessengerV160 is ICrossDomainMessenger { + function PORTAL() external view returns (address); + function initialize(ISuperchainConfig _superchainConfig, IOptimismPortal _portal) external; + function portal() external view returns (address); + function superchainConfig() external view returns (address); + function systemConfig() external view returns (address); + function version() external view returns (string memory); + + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol new file mode 100644 index 000000000000..b382c4f1ad6d --- /dev/null +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1StandardBridgeV160.sol @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IStandardBridge } from "src/universal/interfaces/IStandardBridge.sol"; +import { ICrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; + +/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the L1StandardBridge +/// contract, which has a semver of 2.1.0 as specified in +/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 +interface IL1StandardBridgeV160 is IStandardBridge { + event ERC20DepositInitiated( + address indexed l1Token, + address indexed l2Token, + address indexed from, + address to, + uint256 amount, + bytes extraData + ); + event ERC20WithdrawalFinalized( + address indexed l1Token, + address indexed l2Token, + address indexed from, + address to, + uint256 amount, + bytes extraData + ); + event ETHDepositInitiated(address indexed from, address indexed to, uint256 amount, bytes extraData); + event ETHWithdrawalFinalized(address indexed from, address indexed to, uint256 amount, bytes extraData); + + function depositERC20( + address _l1Token, + address _l2Token, + uint256 _amount, + uint32 _minGasLimit, + bytes memory _extraData + ) + external; + function depositERC20To( + address _l1Token, + address _l2Token, + address _to, + uint256 _amount, + uint32 _minGasLimit, + bytes memory _extraData + ) + external; + function depositETH(uint32 _minGasLimit, bytes memory _extraData) external payable; + function depositETHTo(address _to, uint32 _minGasLimit, bytes memory _extraData) external payable; + function finalizeERC20Withdrawal( + address _l1Token, + address _l2Token, + address _from, + address _to, + uint256 _amount, + bytes memory _extraData + ) + external; + function finalizeETHWithdrawal( + address _from, + address _to, + uint256 _amount, + bytes memory _extraData + ) + external + payable; + function initialize(ICrossDomainMessenger _messenger, ISuperchainConfig _superchainConfig) external; + function l2TokenBridge() external view returns (address); + function superchainConfig() external view returns (ISuperchainConfig); + function systemConfig() external view returns (ISystemConfig); + function version() external view returns (string memory); + + function __constructor__() external; +} diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol index 6bf3e9a9cb6a..deb0dd2c52ad 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol @@ -48,8 +48,8 @@ interface ISystemConfigV160 { function gasPayingTokenSymbol() external view returns (string memory symbol_); function initialize( address _owner, - uint32 _basefeeScalar, - uint32 _blobbasefeeScalar, + uint256 _basefeeScalar, + uint256 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 1dca71b4ec70..7dd603a55c9d 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -69,31 +69,21 @@ contract DeployImplementationsInput_Test is Test { dii.protocolVersionsProxy(); vm.expectRevert("DeployImplementationsInput: not set"); - dii.superchainProxyAdmin(); + dii.opcmProxyOwner(); vm.expectRevert("DeployImplementationsInput: not set"); dii.standardVersionsToml(); } - function test_superchainProxyAdmin_whenNotSet_reverts() public { + function test_opcmProxyOwner_whenNotSet_reverts() public { vm.expectRevert("DeployImplementationsInput: not set"); - dii.superchainProxyAdmin(); - - dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); - vm.expectRevert(); - dii.superchainProxyAdmin(); - - Proxy noAdminProxy = new Proxy(address(0)); - dii.set(dii.superchainConfigProxy.selector, address(noAdminProxy)); - vm.expectRevert("DeployImplementationsInput: not set"); - dii.superchainProxyAdmin(); + dii.opcmProxyOwner(); } - function test_superchainProxyAdmin_succeeds() public { - Proxy proxyWithAdminSet = new Proxy(msg.sender); - dii.set(dii.superchainConfigProxy.selector, address(proxyWithAdminSet)); - ProxyAdmin proxyAdmin = dii.superchainProxyAdmin(); - assertEq(address(msg.sender), address(proxyAdmin), "100"); + function test_opcmProxyOwner_succeeds() public { + dii.set(dii.opcmProxyOwner.selector, address(msg.sender)); + address opcmProxyOwner = dii.opcmProxyOwner(); + assertEq(address(msg.sender), address(opcmProxyOwner), "100"); } } @@ -433,6 +423,7 @@ contract DeployImplementations_Test is Test { dii.set(dii.release.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); + dii.set(dii.opcmProxyOwner.selector, msg.sender); deployImplementations.run(dii, dio); @@ -445,7 +436,7 @@ contract DeployImplementations_Test is Test { assertEq(release, dii.release(), "525"); assertEq(address(superchainConfigProxy), address(dii.superchainConfigProxy()), "550"); assertEq(address(protocolVersionsProxy), address(dii.protocolVersionsProxy()), "575"); - assertEq(address(superchainProxyAdmin), address(dii.superchainProxyAdmin()), "580"); + assertEq(msg.sender, dii.opcmProxyOwner(), "580"); // Architecture assertions. assertEq(address(dio.mipsSingleton().oracle()), address(dio.preimageOracleSingleton()), "600"); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 26c3977cb5dd..bd17e43bd26b 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -417,6 +417,7 @@ contract DeployOPChain_TestBase is Test { string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml"); string memory standardVersionsToml = vm.readFile(standardVersionsTomlPath); dii.set(dii.standardVersionsToml.selector, standardVersionsToml); + dii.set(dii.opcmProxyOwner.selector, address(1)); deployImplementations.run(dii, dio); // Deploy DeployOpChain, but defer populating the input values to the test suites inheriting this contract. From 289cd71be84c19d441e1def19c4e3a7aac7cbd7d Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Fri, 27 Sep 2024 17:12:41 -0500 Subject: [PATCH 071/116] Supervisor: Safety Index (#12154) * fixes * op-supervisor: head db init fix, logging, op-node debug logging * interop: track recent safety data * Early integration and refactor of Views and SafetyIndex * update for rebase * rename RecentSafetyIndex ; reorganize * refactor Pointer method on iterator * logging * Delete unused Tracking Code ; New ChainsDB.Safest * fix naming miss * fix mistaken line deletion * Update op-supervisor/supervisor/backend/safety/safety.go Co-authored-by: protolambda * Add issue numbers to TODO ; Address Proto Comments --------- Co-authored-by: protolambda --- op-e2e/interop/interop_test.go | 2 +- op-e2e/interop/supersystem.go | 2 +- op-node/rollup/interop/interop.go | 3 + op-node/rollup/status/status.go | 4 + op-supervisor/supervisor/backend/backend.go | 56 +--- op-supervisor/supervisor/backend/db/db.go | 261 ++++------------- .../supervisor/backend/db/heads/types.go | 1 + .../supervisor/backend/db/logs/iterator.go | 28 ++ .../supervisor/backend/db/logs/state.go | 13 + .../supervisor/backend/db/safety_checkers.go | 153 ---------- .../backend/db/safety_checkers_test.go | 215 -------------- .../supervisor/backend/safety/safety.go | 270 ++++++++++++++++++ .../supervisor/backend/safety/views.go | 91 ++++++ .../supervisor/backend/source/chain.go | 15 +- .../backend/source/chain_processor.go | 10 +- .../backend/source/log_processor.go | 11 +- .../backend/source/log_processor_test.go | 10 +- 17 files changed, 497 insertions(+), 648 deletions(-) delete mode 100644 op-supervisor/supervisor/backend/db/safety_checkers.go delete mode 100644 op-supervisor/supervisor/backend/db/safety_checkers_test.go create mode 100644 op-supervisor/supervisor/backend/safety/safety.go create mode 100644 op-supervisor/supervisor/backend/safety/views.go diff --git a/op-e2e/interop/interop_test.go b/op-e2e/interop/interop_test.go index 65265c22e7c2..0d593673ecce 100644 --- a/op-e2e/interop/interop_test.go +++ b/op-e2e/interop/interop_test.go @@ -95,6 +95,6 @@ func TestInteropTrivial(t *testing.T) { fmt.Println("Result of emitting event:", rec) - time.Sleep(10 * time.Second) + time.Sleep(60 * time.Second) } diff --git a/op-e2e/interop/supersystem.go b/op-e2e/interop/supersystem.go index ffa91bef97f3..3630b87dc896 100644 --- a/op-e2e/interop/supersystem.go +++ b/op-e2e/interop/supersystem.go @@ -471,7 +471,7 @@ func (s *interopE2ESystem) SupervisorClient() *sources.SupervisorClient { // their creation can't be safely skipped or reordered at this time func (s *interopE2ESystem) prepare(t *testing.T, w worldResourcePaths) { s.t = t - s.logger = testlog.Logger(s.t, log.LevelInfo) + s.logger = testlog.Logger(s.t, log.LevelDebug) s.hdWallet = s.prepareHDWallet() s.worldDeployment, s.worldOutput = s.prepareWorld(w) diff --git a/op-node/rollup/interop/interop.go b/op-node/rollup/interop/interop.go index 35b1a86e9635..152020f09c70 100644 --- a/op-node/rollup/interop/interop.go +++ b/op-node/rollup/interop/interop.go @@ -107,6 +107,7 @@ func (d *InteropDeriver) OnEvent(ev event.Event) bool { d.emitter.Emit(engine.PromoteCrossUnsafeEvent{Ref: candidate}) } case engine.LocalSafeUpdateEvent: + d.log.Debug("Local safe update event", "block", x.Ref.Hash, "derivedFrom", x.DerivedFrom) d.derivedFrom[x.Ref.Hash] = x.DerivedFrom d.emitter.Emit(engine.RequestCrossSafeEvent{}) case engine.CrossSafeUpdateEvent: @@ -132,10 +133,12 @@ func (d *InteropDeriver) OnEvent(ev event.Event) bool { } derivedFrom, ok := d.derivedFrom[candidate.Hash] if !ok { + d.log.Warn("Unknown block candidate source, cannot promote block safety", "block", candidate, "safety", blockSafety) break } switch blockSafety { case types.CrossSafe: + d.log.Info("Verified cross-safe block", "block", candidate, "derivedFrom", derivedFrom) // TODO(#11673): once we have interop reorg support, we need to clean stale blocks also. delete(d.derivedFrom, candidate.Hash) d.emitter.Emit(engine.PromoteSafeEvent{ diff --git a/op-node/rollup/status/status.go b/op-node/rollup/status/status.go index 65121b1294aa..26e9ddbc2197 100644 --- a/op-node/rollup/status/status.go +++ b/op-node/rollup/status/status.go @@ -63,6 +63,7 @@ func (st *StatusTracker) OnEvent(ev event.Event) bool { switch x := ev.(type) { case engine.ForkchoiceUpdateEvent: + st.log.Debug("Forkchoice update", "unsafe", x.UnsafeL2Head, "safe", x.SafeL2Head, "finalized", x.FinalizedL2Head) st.data.UnsafeL2 = x.UnsafeL2Head st.data.SafeL2 = x.SafeL2Head st.data.FinalizedL2 = x.FinalizedL2Head @@ -70,11 +71,14 @@ func (st *StatusTracker) OnEvent(ev event.Event) bool { st.data.UnsafeL2 = x.Unsafe st.data.PendingSafeL2 = x.PendingSafe case engine.CrossUnsafeUpdateEvent: + st.log.Debug("Cross unsafe head updated", "cross_unsafe", x.CrossUnsafe, "local_unsafe", x.LocalUnsafe) st.data.CrossUnsafeL2 = x.CrossUnsafe st.data.UnsafeL2 = x.LocalUnsafe case engine.LocalSafeUpdateEvent: + st.log.Debug("Local safe head updated", "local_safe", x.Ref) st.data.LocalSafeL2 = x.Ref case engine.CrossSafeUpdateEvent: + st.log.Debug("Cross safe head updated", "cross_safe", x.CrossSafe, "local_safe", x.LocalSafe) st.data.SafeL2 = x.CrossSafe st.data.LocalSafeL2 = x.LocalSafe case derive.DeriverL1StatusEvent: diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index 1f020889f2f1..8216eaa9c0b5 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "path/filepath" "sync/atomic" "time" @@ -18,7 +17,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/config" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" @@ -33,8 +31,6 @@ type SupervisorBackend struct { chainMonitors map[types.ChainID]*source.ChainMonitor db *db.ChainsDB - - maintenanceCancel context.CancelFunc } var _ frontend.Backend = (*SupervisorBackend)(nil) @@ -47,14 +43,8 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg return nil, err } - // create the head tracker - headTracker, err := heads.NewHeadTracker(logger, filepath.Join(cfg.Datadir, "heads.json")) - if err != nil { - return nil, fmt.Errorf("failed to load existing heads: %w", err) - } - // create the chains db - db := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, headTracker, logger) + db := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, logger) // create an empty map of chain monitors chainMonitors := make(map[types.ChainID]*source.ChainMonitor, len(cfg.L2RPCs)) @@ -145,10 +135,6 @@ func (su *SupervisorBackend) Start(ctx context.Context) error { return fmt.Errorf("failed to start chain monitor: %w", err) } } - // start db maintenance loop - maintenanceCtx, cancel := context.WithCancel(context.Background()) - su.db.StartCrossHeadMaintenance(maintenanceCtx) - su.maintenanceCancel = cancel return nil } @@ -158,8 +144,6 @@ func (su *SupervisorBackend) Stop(ctx context.Context) error { if !su.started.CompareAndSwap(true, false) { return errAlreadyStopped } - // signal the maintenance loop to stop - su.maintenanceCancel() // collect errors from stopping chain monitors var errs error for _, monitor := range su.chainMonitors { @@ -200,24 +184,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa if err != nil { return types.Invalid, fmt.Errorf("failed to check log: %w", err) } - safest := types.CrossUnsafe - // at this point we have the log entry, and we can check if it is safe by various criteria - for _, checker := range []db.SafetyChecker{ - db.NewSafetyChecker(db.Unsafe, su.db), - db.NewSafetyChecker(db.Safe, su.db), - db.NewSafetyChecker(db.Finalized, su.db), - } { - // check local safety limit first as it's more permissive - localPtr := checker.LocalHead(chainID) - if localPtr.WithinRange(blockNum, uint32(logIdx)) { - safest = checker.LocalSafetyLevel() - } - // check cross safety level - crossPtr := checker.CrossHead(chainID) - if crossPtr.WithinRange(blockNum, uint32(logIdx)) { - safest = checker.CrossSafetyLevel() - } - } + safest := su.db.Safest(chainID, blockNum, uint32(logIdx)) return safest, nil } @@ -243,7 +210,6 @@ func (su *SupervisorBackend) CheckMessages( // The block is considered safe if all logs in the block are safe // this is decided by finding the last log in the block and func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) { - safest := types.CrossUnsafe // find the last log index in the block id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)} _, err := su.db.FindSealedBlock(types.ChainID(*chainID), id) @@ -257,22 +223,6 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common. su.logger.Error("failed to scan block", "err", err) return "", err } - // at this point we have the extent of the block, and we can check if it is safe by various criteria - for _, checker := range []db.SafetyChecker{ - db.NewSafetyChecker(db.Unsafe, su.db), - db.NewSafetyChecker(db.Safe, su.db), - db.NewSafetyChecker(db.Finalized, su.db), - } { - // check local safety limit first as it's more permissive - localPtr := checker.LocalHead(types.ChainID(*chainID)) - if localPtr.IsSealed(uint64(blockNumber)) { - safest = checker.LocalSafetyLevel() - } - // check cross safety level - crossPtr := checker.CrossHead(types.ChainID(*chainID)) - if crossPtr.IsSealed(uint64(blockNumber)) { - safest = checker.CrossSafetyLevel() - } - } + safest := su.db.Safest(types.ChainID(*chainID), uint64(blockNumber), 0) return safest, nil } diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index 6c5e354dd0ab..8459266c0704 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -1,19 +1,17 @@ package db import ( - "context" "errors" "fmt" "io" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/safety" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -49,39 +47,21 @@ type LogStorage interface { var _ LogStorage = (*logs.DB)(nil) -type HeadsStorage interface { - CrossUnsafe(id types.ChainID) heads.HeadPointer - CrossSafe(id types.ChainID) heads.HeadPointer - CrossFinalized(id types.ChainID) heads.HeadPointer - LocalUnsafe(id types.ChainID) heads.HeadPointer - LocalSafe(id types.ChainID) heads.HeadPointer - LocalFinalized(id types.ChainID) heads.HeadPointer - - UpdateCrossUnsafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateCrossSafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateCrossFinalized(id types.ChainID, pointer heads.HeadPointer) error - - UpdateLocalUnsafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateLocalSafe(id types.ChainID, pointer heads.HeadPointer) error - UpdateLocalFinalized(id types.ChainID, pointer heads.HeadPointer) error -} - // ChainsDB is a database that stores logs and heads for multiple chains. // it implements the ChainsStorage interface. type ChainsDB struct { - logDBs map[types.ChainID]LogStorage - heads HeadsStorage - maintenanceReady chan struct{} - logger log.Logger + logDBs map[types.ChainID]LogStorage + safetyIndex safety.SafetyIndex + logger log.Logger } -func NewChainsDB(logDBs map[types.ChainID]LogStorage, heads HeadsStorage, l log.Logger) *ChainsDB { - return &ChainsDB{ - logDBs: logDBs, - heads: heads, - logger: l, - maintenanceReady: make(chan struct{}, 1), +func NewChainsDB(logDBs map[types.ChainID]LogStorage, l log.Logger) *ChainsDB { + ret := &ChainsDB{ + logDBs: logDBs, + logger: l, } + ret.safetyIndex = safety.NewSafetyIndex(l, ret) + return ret } func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) { @@ -91,6 +71,14 @@ func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) { db.logDBs[chain] = logDB } +func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) { + logDB, ok := db.logDBs[chain] + if !ok { + return nil, fmt.Errorf("%w: %v", ErrUnknownChain, chain) + } + return logDB.IteratorStartingAt(sealedNum, logIndex) +} + // ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart. // It rewinds the database to the last block that is guaranteed to have been fully recorded to the database, // to ensure it can resume recording from the first log of the next block. @@ -110,187 +98,39 @@ func (db *ChainsDB) ResumeFromLastSealedBlock() error { return nil } -// StartCrossHeadMaintenance starts a background process that maintains the cross-heads of the chains -// for now it does not prevent multiple instances of this process from running -func (db *ChainsDB) StartCrossHeadMaintenance(ctx context.Context) { - go func() { - db.logger.Info("cross-head maintenance loop started") - // run the maintenance loop every 1 seconds for now - ticker := time.NewTicker(time.Second * 1) - for { - select { - case <-ctx.Done(): - db.logger.Warn("context cancelled, stopping maintenance loop") - return - case <-ticker.C: - db.logger.Debug("regular maintenance requested") - db.RequestMaintenance() - case <-db.maintenanceReady: - db.logger.Debug("running maintenance") - if err := db.updateAllHeads(); err != nil { - db.logger.Error("failed to update cross-heads", "err", err) - } - } - } - }() -} - // Check calls the underlying logDB to determine if the given log entry is safe with respect to the checker's criteria. -func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (entrydb.EntryIdx, error) { +func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (common.Hash, error) { logDB, ok := db.logDBs[chain] if !ok { - return 0, fmt.Errorf("%w: %v", ErrUnknownChain, chain) - } - return logDB.Contains(blockNum, logIdx, logHash) -} - -// RequestMaintenance requests that the maintenance loop update the cross-heads -// it does not block if maintenance is already scheduled -func (db *ChainsDB) RequestMaintenance() { - select { - case db.maintenanceReady <- struct{}{}: - return - default: - return + return common.Hash{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain) } -} - -// updateAllHeads updates the cross-heads of all safety levels -// it is called by the maintenance loop -func (db *ChainsDB) updateAllHeads() error { - // create three safety checkers, one for each safety level - unsafeChecker := NewSafetyChecker(Unsafe, db) - safeChecker := NewSafetyChecker(Safe, db) - finalizedChecker := NewSafetyChecker(Finalized, db) - for _, checker := range []SafetyChecker{ - unsafeChecker, - safeChecker, - finalizedChecker} { - if err := db.UpdateCrossHeads(checker); err != nil { - return fmt.Errorf("failed to update cross-heads for safety level %s: %w", checker, err) - } + _, err := logDB.Contains(blockNum, logIdx, logHash) + if err != nil { + return common.Hash{}, err } - return nil + // TODO(#11693): need to get the actual block hash for this log entry for reorg detection + return common.Hash{}, nil } -// UpdateCrossHeadsForChain updates the cross-head for a single chain. -// the provided checker controls which heads are considered. -func (db *ChainsDB) UpdateCrossHeadsForChain(chainID types.ChainID, checker SafetyChecker) error { - // start with the xsafe head of the chain - xHead := checker.CrossHead(chainID) - // advance as far as the local head - localHead := checker.LocalHead(chainID) - // get an iterator for the next item - iter, err := db.logDBs[chainID].IteratorStartingAt(xHead.LastSealedBlockNum, xHead.LogsSince) - if err != nil { - return fmt.Errorf("failed to open iterator at sealed block %d logsSince %d for chain %v: %w", - xHead.LastSealedBlockNum, xHead.LogsSince, chainID, err) +// Safest returns the strongest safety level that can be guaranteed for the given log entry. +// it assumes the log entry has already been checked and is valid, this funcion only checks safety levels. +func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel) { + safest = types.LocalUnsafe + if crossUnsafe, err := db.safetyIndex.CrossUnsafeL2(chainID); err == nil && crossUnsafe.WithinRange(blockNum, index) { + safest = types.CrossUnsafe } - // track if we updated the cross-head - updated := false - // advance the logDB through all executing messages we can - // this loop will break: - // - when we reach the local head - // - when we reach a message that is not safe - // - if an error occurs - for { - if err := iter.NextInitMsg(); errors.Is(err, logs.ErrFuture) { - // We ran out of events, but there can still be empty blocks. - // Take the last block we've processed, and try to update the x-head with it. - sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock() - if !ok { - break - } - // We can only drop the logsSince value to 0 if the block is not seen. - if sealedBlockNum > xHead.LastSealedBlockNum { - // if we would exceed the local head, then abort - if !localHead.WithinRange(sealedBlockNum, 0) { - break - } - xHead = heads.HeadPointer{ - LastSealedBlockHash: sealedBlockHash, - LastSealedBlockNum: sealedBlockNum, - LogsSince: 0, - } - updated = true - } - break - } else if err != nil { - return fmt.Errorf("failed to read next executing message for chain %v: %w", chainID, err) - } - - sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock() - if !ok { - break - } - _, logIdx, ok := iter.InitMessage() - if !ok { - break - } - // if we would exceed the local head, then abort - if !localHead.WithinRange(sealedBlockNum, logIdx) { - break - } - - // Check the executing message, if any - exec := iter.ExecMessage() - if exec != nil { - // Use the checker to determine if this message exists in the canonical chain, - // within the view of the checker's safety level - if err := checker.CheckCross( - types.ChainIDFromUInt64(uint64(exec.Chain)), - exec.BlockNum, - exec.LogIdx, - exec.Hash); err != nil { - if errors.Is(err, logs.ErrConflict) { - db.logger.Error("Bad executing message!", "err", err) - } else if errors.Is(err, logs.ErrFuture) { - db.logger.Warn("Executing message references future message", "err", err) - } else { - db.logger.Error("Failed to check executing message") - } - break - } - } - // if all is well, prepare the x-head update to this point - xHead = heads.HeadPointer{ - LastSealedBlockHash: sealedBlockHash, - LastSealedBlockNum: sealedBlockNum, - LogsSince: logIdx + 1, - } - updated = true + if localSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && localSafe.WithinRange(blockNum, index) { + safest = types.LocalSafe } - // if any chain was updated, we can trigger a maintenance request - // this allows for the maintenance loop to handle cascading updates - // instead of waiting for the next scheduled update - if updated { - db.logger.Info("Promoting cross-head", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel()) - err = checker.UpdateCross(chainID, xHead) - if err != nil { - return fmt.Errorf("failed to update cross-head for chain %v: %w", chainID, err) - } - db.RequestMaintenance() - } else { - db.logger.Debug("No cross-head update", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel()) + if crossSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && crossSafe.WithinRange(blockNum, index) { + safest = types.CrossSafe } - return nil -} - -func (db *ChainsDB) Heads() HeadsStorage { - return db.heads -} - -// UpdateCrossHeads updates the cross-heads of all chains -// based on the provided SafetyChecker. The SafetyChecker is used to determine -// the safety of each log entry in the database, and the cross-head associated with it. -func (db *ChainsDB) UpdateCrossHeads(checker SafetyChecker) error { - for chainID := range db.logDBs { - err := db.UpdateCrossHeadsForChain(chainID, checker) - if err != nil { - return err + if finalized, err := db.safetyIndex.FinalizedL2(chainID); err == nil { + if finalized.Number >= blockNum { + safest = types.Finalized } } - return nil + return } func (db *ChainsDB) FindSealedBlock(chain types.ChainID, block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) { @@ -312,20 +152,35 @@ func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { return logDB.LatestSealedBlockNum() } -func (db *ChainsDB) SealBlock(chain types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error { +func (db *ChainsDB) AddLog( + chain types.ChainID, + logHash common.Hash, + parentBlock eth.BlockID, + logIdx uint32, + execMsg *types.ExecutingMessage) error { logDB, ok := db.logDBs[chain] if !ok { return fmt.Errorf("%w: %v", ErrUnknownChain, chain) } - return logDB.SealBlock(parentHash, block, timestamp) + return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) } -func (db *ChainsDB) AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error { +func (db *ChainsDB) SealBlock( + chain types.ChainID, + block eth.L2BlockRef) error { logDB, ok := db.logDBs[chain] if !ok { return fmt.Errorf("%w: %v", ErrUnknownChain, chain) } - return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) + err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time) + if err != nil { + return fmt.Errorf("failed to seal block %v: %w", block, err) + } + err = db.safetyIndex.UpdateLocalUnsafe(chain, block) + if err != nil { + return fmt.Errorf("failed to update local-unsafe: %w", err) + } + return nil } func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { diff --git a/op-supervisor/supervisor/backend/db/heads/types.go b/op-supervisor/supervisor/backend/db/heads/types.go index 3e54593e33c7..7db0bff2d106 100644 --- a/op-supervisor/supervisor/backend/db/heads/types.go +++ b/op-supervisor/supervisor/backend/db/heads/types.go @@ -13,6 +13,7 @@ type HeadPointer struct { // LastSealedBlockHash is the last fully-processed block LastSealedBlockHash common.Hash LastSealedBlockNum uint64 + LastSealedTimestamp uint64 // Number of logs that have been verified since the LastSealedBlock. // These logs are contained in the block that builds on top of the LastSealedBlock. diff --git a/op-supervisor/supervisor/backend/db/logs/iterator.go b/op-supervisor/supervisor/backend/db/logs/iterator.go index 4b3bd1b65908..f9e65c41e890 100644 --- a/op-supervisor/supervisor/backend/db/logs/iterator.go +++ b/op-supervisor/supervisor/backend/db/logs/iterator.go @@ -8,11 +8,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type IteratorState interface { NextIndex() entrydb.EntryIdx + HeadPointer() (heads.HeadPointer, error) SealedBlock() (hash common.Hash, num uint64, ok bool) InitMessage() (hash common.Hash, logIndex uint32, ok bool) ExecMessage() *types.ExecutingMessage @@ -23,6 +25,7 @@ type Iterator interface { NextInitMsg() error NextExecMsg() error NextBlock() error + TraverseConditional(traverseConditionalFn) error IteratorState } @@ -32,6 +35,8 @@ type iterator struct { entriesRead int64 } +type traverseConditionalFn func(state IteratorState) error + // End traverses the iterator to the end of the DB. // It does not return io.EOF or ErrFuture. func (i *iterator) End() error { @@ -105,6 +110,25 @@ func (i *iterator) NextBlock() error { } } +func (i *iterator) TraverseConditional(fn traverseConditionalFn) error { + var snapshot logContext + for { + snapshot = i.current // copy the iterator state + _, err := i.next() + if err != nil { + i.current = snapshot + return err + } + if i.current.need != 0 { // skip intermediate states + continue + } + if err := fn(&i.current); err != nil { + i.current = snapshot + return err + } + } +} + // Read and apply the next entry. func (i *iterator) next() (entrydb.EntryType, error) { index := i.current.nextEntryIndex @@ -142,3 +166,7 @@ func (i *iterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) { func (i *iterator) ExecMessage() *types.ExecutingMessage { return i.current.ExecMessage() } + +func (i *iterator) HeadPointer() (heads.HeadPointer, error) { + return i.current.HeadPointer() +} diff --git a/op-supervisor/supervisor/backend/db/logs/state.go b/op-supervisor/supervisor/backend/db/logs/state.go index bb00762acc2e..df63f96e3599 100644 --- a/op-supervisor/supervisor/backend/db/logs/state.go +++ b/op-supervisor/supervisor/backend/db/logs/state.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -126,6 +127,18 @@ func (l *logContext) ExecMessage() *types.ExecutingMessage { return nil } +func (l *logContext) HeadPointer() (heads.HeadPointer, error) { + if l.need != 0 { + return heads.HeadPointer{}, errors.New("cannot provide head pointer while state is incomplete") + } + return heads.HeadPointer{ + LastSealedBlockHash: l.blockHash, + LastSealedBlockNum: l.blockNum, + LastSealedTimestamp: l.timestamp, + LogsSince: l.logsSince, + }, nil +} + // ApplyEntry applies an entry on top of the current state. func (l *logContext) ApplyEntry(entry entrydb.Entry) error { // Wrap processEntry to add common useful error message info diff --git a/op-supervisor/supervisor/backend/db/safety_checkers.go b/op-supervisor/supervisor/backend/db/safety_checkers.go deleted file mode 100644 index cbf4e3ddd6d7..000000000000 --- a/op-supervisor/supervisor/backend/db/safety_checkers.go +++ /dev/null @@ -1,153 +0,0 @@ -package db - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -const ( - Unsafe = "unsafe" - Safe = "safe" - Finalized = "finalized" -) - -// SafetyChecker is an interface for checking the safety of a log entry -// it maintains a consistent view between local and cross chain for a given safety level -type SafetyChecker interface { - LocalHead(chainID types.ChainID) heads.HeadPointer - CrossHead(chainID types.ChainID) heads.HeadPointer - CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error - CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error - UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error - UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error - String() string - LocalSafetyLevel() types.SafetyLevel - CrossSafetyLevel() types.SafetyLevel -} - -// NewSafetyChecker creates a new SafetyChecker of the given type -func NewSafetyChecker(t types.SafetyLevel, chainsDB *ChainsDB) SafetyChecker { - return NewChecker(t, chainsDB) -} - -// check checks if the log entry is safe, provided a local head for the chain -// it is used by the individual SafetyCheckers to determine if a log entry is safe -func check( - chainsDB *ChainsDB, - head heads.HeadPointer, - chain types.ChainID, - blockNum uint64, - logIdx uint32, - logHash common.Hash) error { - - // for the Check to be valid, the log must: - // 1. have the expected logHash at the indicated blockNum and logIdx - _, err := chainsDB.logDBs[chain].Contains(blockNum, logIdx, logHash) - if err != nil { - return err - } - // 2. be within the range of the given head - if !head.WithinRange(blockNum, logIdx) { - return logs.ErrFuture - } - return nil -} - -// checker is a composition of accessor and update functions for a given safety level. -// they implement the SafetyChecker interface. -// checkers can be made with NewChecker. -type checker struct { - chains *ChainsDB - localSafety types.SafetyLevel - crossSafety types.SafetyLevel - updateCross func(chain types.ChainID, pointer heads.HeadPointer) error - updateLocal func(chain types.ChainID, pointer heads.HeadPointer) error - localHead func(chain types.ChainID) heads.HeadPointer - crossHead func(chain types.ChainID) heads.HeadPointer - checkCross func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error - checkLocal func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error -} - -func (c *checker) String() string { - return fmt.Sprintf("%s+%s", c.localSafety.String(), c.crossSafety.String()) -} - -func (c *checker) LocalSafetyLevel() types.SafetyLevel { - return c.localSafety -} - -func (c *checker) CrossSafetyLevel() types.SafetyLevel { - return c.crossSafety -} - -func (c *checker) UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error { - return c.updateCross(chain, pointer) -} -func (c *checker) UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error { - return c.updateLocal(chain, pointer) -} -func (c *checker) LocalHead(chain types.ChainID) heads.HeadPointer { - return c.localHead(chain) -} -func (c *checker) CrossHead(chain types.ChainID) heads.HeadPointer { - return c.crossHead(chain) -} -func (c *checker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return c.checkCross(chain, blockNum, logIdx, logHash) -} -func (c *checker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return c.checkLocal(chain, blockNum, logIdx, logHash) -} - -func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker { - // checkWith creates a function which takes a chain-getter and returns a function that returns the head for the chain - checkWith := func(getHead func(chain types.ChainID) heads.HeadPointer) func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error { - return check(c, getHead(chain), chain, blockNum, logIdx, logHash) - } - } - switch t { - case Unsafe: - return &checker{ - chains: c, - localSafety: types.LocalUnsafe, - crossSafety: types.CrossUnsafe, - updateCross: c.heads.UpdateCrossUnsafe, - updateLocal: c.heads.UpdateLocalUnsafe, - crossHead: c.heads.CrossUnsafe, - localHead: c.heads.LocalUnsafe, - checkCross: checkWith(c.heads.CrossUnsafe), - checkLocal: checkWith(c.heads.LocalUnsafe), - } - case Safe: - return &checker{ - chains: c, - localSafety: types.LocalSafe, - crossSafety: types.CrossSafe, - updateCross: c.heads.UpdateCrossSafe, - updateLocal: c.heads.UpdateLocalSafe, - crossHead: c.heads.CrossSafe, - localHead: c.heads.LocalSafe, - checkCross: checkWith(c.heads.CrossSafe), - checkLocal: checkWith(c.heads.LocalSafe), - } - case Finalized: - return &checker{ - chains: c, - localSafety: types.Finalized, - crossSafety: types.Finalized, - updateCross: c.heads.UpdateCrossFinalized, - updateLocal: c.heads.UpdateLocalFinalized, - crossHead: c.heads.CrossFinalized, - localHead: c.heads.LocalFinalized, - checkCross: checkWith(c.heads.CrossFinalized), - checkLocal: checkWith(c.heads.LocalFinalized), - } - } - return &checker{} -} diff --git a/op-supervisor/supervisor/backend/db/safety_checkers_test.go b/op-supervisor/supervisor/backend/db/safety_checkers_test.go deleted file mode 100644 index fa0954bc6b65..000000000000 --- a/op-supervisor/supervisor/backend/db/safety_checkers_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package db - -/* -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// TestHeadsForChain tests the heads for a chain, -// confirming the Unsafe, Safe and Finalized all return the correct head for the chain. -// and confirming that the chainID matters when finding the value -func TestHeadsForChain(t *testing.T) { - h := heads.NewHeads() - chainHeads := heads.ChainHeads{ - Unsafe: entrydb.EntryIdx(1), - CrossUnsafe: entrydb.EntryIdx(2), - LocalSafe: entrydb.EntryIdx(3), - CrossSafe: entrydb.EntryIdx(4), - LocalFinalized: entrydb.EntryIdx(5), - CrossFinalized: entrydb.EntryIdx(6), - } - h.Put(types.ChainIDFromUInt64(1), chainHeads) - chainsDB := NewChainsDB(nil, &stubHeadStorage{h}, testlog.Logger(t, log.LevelDebug)) - tcases := []struct { - name string - chainID types.ChainID - checkerType types.SafetyLevel - expectedLocal entrydb.EntryIdx - expectedCross entrydb.EntryIdx - }{ - { - "Unsafe Head", - types.ChainIDFromUInt64(1), - Unsafe, - entrydb.EntryIdx(1), - entrydb.EntryIdx(2), - }, - { - "Safe Head", - types.ChainIDFromUInt64(1), - Safe, - entrydb.EntryIdx(3), - entrydb.EntryIdx(4), - }, - { - "Finalized Head", - types.ChainIDFromUInt64(1), - Finalized, - entrydb.EntryIdx(5), - entrydb.EntryIdx(6), - }, - { - "Incorrect Chain", - types.ChainIDFromUInt64(100), - Safe, - entrydb.EntryIdx(0), - entrydb.EntryIdx(0), - }, - } - - for _, c := range tcases { - t.Run(c.name, func(t *testing.T) { - checker := NewSafetyChecker(c.checkerType, chainsDB) - localHead := checker.LocalHeadForChain(c.chainID) - crossHead := checker.CrossHeadForChain(c.chainID) - require.Equal(t, c.expectedLocal, localHead) - require.Equal(t, c.expectedCross, crossHead) - }) - } -} - -func TestCheck(t *testing.T) { - h := heads.NewHeads() - chainHeads := heads.ChainHeads{ - Unsafe: entrydb.EntryIdx(6), - CrossUnsafe: entrydb.EntryIdx(5), - LocalSafe: entrydb.EntryIdx(4), - CrossSafe: entrydb.EntryIdx(3), - LocalFinalized: entrydb.EntryIdx(2), - CrossFinalized: entrydb.EntryIdx(1), - } - h.Put(types.ChainIDFromUInt64(1), chainHeads) - - // the logStore contains just a single stubbed log DB - logDB := &stubLogDB{} - logsStore := map[types.ChainID]LogStorage{ - types.ChainIDFromUInt64(1): logDB, - } - - chainsDB := NewChainsDB(logsStore, &stubHeadStorage{h}, testlog.Logger(t, log.LevelDebug)) - - tcases := []struct { - name string - checkerType types.SafetyLevel - chainID types.ChainID - blockNum uint64 - logIdx uint32 - loghash common.Hash - containsResponse containsResponse - expected bool - }{ - { - // confirm that checking Unsafe uses the unsafe head, - // and that we can find logs even *at* the unsafe head index - "Unsafe Log at Head", - Unsafe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(6), nil}, - true, - }, - { - // confirm that checking the Safe head works - "Safe Log", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(3), nil}, - true, - }, - { - // confirm that checking the Finalized head works - "Finalized Log", - Finalized, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(1), nil}, - true, - }, - { - // confirm that when exists is false, we return false - "Does not Exist", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(1), logs.ErrConflict}, - false, - }, - { - // confirm that when a head is out of range, we return false - "Unsafe Out of Range", - Unsafe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(100), nil}, - false, - }, - { - // confirm that when a head is out of range, we return false - "Safe Out of Range", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(5), nil}, - false, - }, - { - // confirm that when a head is out of range, we return false - "Finalized Out of Range", - Finalized, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(3), nil}, - false, - }, - { - // confirm that when Contains returns an error, we return false - "Error", - Safe, - types.ChainIDFromUInt64(1), - 1, - 1, - common.Hash{1, 2, 3}, - containsResponse{entrydb.EntryIdx(0), errors.New("error")}, - false, - }, - } - - for _, c := range tcases { - t.Run(c.name, func(t *testing.T) { - // rig the logStore to return the expected response - logDB.containsResponse = c.containsResponse - checker := NewSafetyChecker(c.checkerType, chainsDB) - r := checker.Check(c.chainID, c.blockNum, c.logIdx, c.loghash) - // confirm that the expected outcome is correct - require.Equal(t, c.expected, r) - }) - } -} -*/ diff --git a/op-supervisor/supervisor/backend/safety/safety.go b/op-supervisor/supervisor/backend/safety/safety.go new file mode 100644 index 000000000000..c7828336ba57 --- /dev/null +++ b/op-supervisor/supervisor/backend/safety/safety.go @@ -0,0 +1,270 @@ +package safety + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type SafetyIndex interface { + // Updaters for the latest local safety status of each chain + UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRef) error + UpdateLocalSafe(chainID types.ChainID, at eth.L1BlockRef, ref eth.L2BlockRef) error + UpdateFinalizeL1(ref eth.L1BlockRef) error + + // Getters for the latest safety status of each chain + UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) + CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) + LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) + CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) + // We only finalize on full L2 block boundaries, hence not a heads.HeadPointer return. + FinalizedL2(chainId types.ChainID) (eth.BlockID, error) +} + +type ChainsDBClient interface { + IteratorStartingAt(chainID types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) + Check(chainID types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (h common.Hash, err error) +} + +type safetyIndex struct { + log log.Logger + + chains ChainsDBClient + + unsafe map[types.ChainID]*View + safe map[types.ChainID]*View + finalized map[types.ChainID]eth.BlockID + + // remember what each non-finalized L2 block is derived from + derivedFrom map[types.ChainID]map[common.Hash]eth.L1BlockRef + + // the last received L1 finality signal. + finalizedL1 eth.L1BlockRef +} + +func NewSafetyIndex(log log.Logger, chains ChainsDBClient) *safetyIndex { + return &safetyIndex{ + log: log, + chains: chains, + unsafe: make(map[types.ChainID]*View), + safe: make(map[types.ChainID]*View), + finalized: make(map[types.ChainID]eth.BlockID), + derivedFrom: make(map[types.ChainID]map[common.Hash]eth.L1BlockRef), + } +} + +// UpdateLocalUnsafe updates the local-unsafe view for the given chain, and advances the cross-unsafe status. +func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRef) error { + view, ok := r.safe[chainID] + if !ok { + iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) + if err != nil { + return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number) + } + view = &View{ + chainID: chainID, + iter: iter, + localView: heads.HeadPointer{ + LastSealedBlockHash: ref.Hash, + LastSealedBlockNum: ref.Number, + LastSealedTimestamp: ref.Time, + LogsSince: 0, + }, + localDerivedFrom: eth.L1BlockRef{}, + validWithinView: r.ValidWithinUnsafeView, + } + r.unsafe[chainID] = view + } else if err := view.UpdateLocal(eth.L1BlockRef{}, ref); err != nil { + return fmt.Errorf("failed to update local-unsafe: %w", err) + } + local, _ := r.unsafe[chainID].Local() + r.log.Debug("Updated local unsafe head", "chainID", chainID, "local", local) + r.advanceCrossUnsafe() + return nil +} + +// advanceCrossUnsafe calls Process on all cross-unsafe views. +func (r *safetyIndex) advanceCrossUnsafe() { + for chainID, view := range r.unsafe { + if err := view.Process(); err != nil { + r.log.Error("Failed to update cross-unsafe view", "chain", chainID, "err", err) + } + cross, _ := r.unsafe[chainID].Cross() + r.log.Debug("Updated cross unsafe head", "chainID", chainID, "cross", cross) + } +} + +// UpdateLocalSafe updates the local-safe view for the given chain, and advances the cross-safe status. +func (r *safetyIndex) UpdateLocalSafe( + chainID types.ChainID, at eth.L1BlockRef, ref eth.L2BlockRef) error { + view, ok := r.safe[chainID] + if !ok { + iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) + if err != nil { + return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number) + } + view = &View{ + chainID: chainID, + iter: iter, + localView: heads.HeadPointer{ + LastSealedBlockHash: ref.Hash, + LastSealedBlockNum: ref.Number, + LastSealedTimestamp: ref.Time, + LogsSince: 0, + }, + localDerivedFrom: at, + validWithinView: r.ValidWithinSafeView, + } + r.safe[chainID] = view + } else if err := view.UpdateLocal(at, ref); err != nil { + return fmt.Errorf("failed to update local-safe: %w", err) + } + + // register what this L2 block is derived from + m, ok := r.derivedFrom[chainID] + if !ok { + m = make(map[common.Hash]eth.L1BlockRef) + r.derivedFrom[chainID] = m + } + m[ref.Hash] = at + local, _ := r.safe[chainID].Local() + r.log.Debug("Updated local safe head", "chainID", chainID, "local", local) + r.advanceCrossSafe() + return nil +} + +// advanceCrossSafe calls Process on all cross-safe views, and advances the finalized safety status. +func (r *safetyIndex) advanceCrossSafe() { + for chainID, view := range r.safe { + if err := view.Process(); err != nil { + r.log.Error("Failed to update cross-safe view", "chain", chainID, "err", err) + } + cross, _ := r.safe[chainID].Cross() + r.log.Debug("Updated local safe head", "chainID", chainID, "cross", cross) + } + r.advanceFinalized() +} + +// UpdateFinalizeL1 updates the finalized L1 block, and advances the finalized safety status. +func (r *safetyIndex) UpdateFinalizeL1(ref eth.L1BlockRef) error { + if ref.Number <= r.finalizedL1.Number { + return fmt.Errorf("ignoring old L1 finality signal of %s, already have %s", ref, r.finalizedL1) + } + r.finalizedL1 = ref + r.log.Debug("Updated L1 finalized head", "L1finalized", ref) + r.advanceFinalized() + return nil +} + +// advanceFinalized should be called whenever the finalized L1 block, or the cross-safe history, changes. +// This then promotes the irreversible cross-safe L2 blocks to a finalized safety status. +func (r *safetyIndex) advanceFinalized() { + // Whatever was considered cross-safe at the finalized block-height can + // now be considered finalized, since the inputs have become irreversible. + for chainID, view := range r.safe { + crossSafe, err := view.Cross() + if err != nil { + r.log.Info("Failed to get cross-safe data, cannot finalize", "chain", chainID, "err", err) + continue + } + // TODO(#12184): we need to consider older cross-safe data, + // if we want to finalize something at all on longer lagging finality signal. + // Could consider just iterating over all derivedFrom contents? + l1Dep := r.derivedFrom[chainID][crossSafe.LastSealedBlockHash] + if l1Dep.Number < r.finalizedL1.Number { + r.finalized[chainID] = eth.BlockID{Hash: crossSafe.LastSealedBlockHash, Number: crossSafe.LastSealedBlockNum} + finalized := r.finalized[chainID] + r.log.Debug("Updated finalized head", "chainID", chainID, "finalized", finalized) + } + } +} + +// UnsafeL2 returns the latest unsafe L2 block of the given chain. +func (r *safetyIndex) UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.unsafe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no unsafe data for chain %s", chainID) + } + return view.Local() +} + +// CrossUnsafeL2 returns the latest cross-unsafe L2 block of the given chain. +func (r *safetyIndex) CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.unsafe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no cross-unsafe data for chain %s", chainID) + } + return view.Cross() +} + +// LocalSafeL2 returns the latest local-safe L2 block of the given chain. +func (r *safetyIndex) LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.safe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no local-safe data for chain %s", chainID) + } + return view.Local() +} + +// CrossSafeL2 returns the latest cross-safe L2 block of the given chain. +func (r *safetyIndex) CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) { + view, ok := r.safe[chainID] + if !ok { + return heads.HeadPointer{}, fmt.Errorf("no cross-safe data for chain %s", chainID) + } + return view.Cross() +} + +// FinalizedL2 returns the latest finalized L2 block of the given chain. +func (r *safetyIndex) FinalizedL2(chainId types.ChainID) (eth.BlockID, error) { + finalized, ok := r.finalized[chainId] + if !ok { + return eth.BlockID{}, fmt.Errorf("not seen finalized data of chain %s at finalized L1 block %s", chainId, r.finalizedL1) + } + return finalized, nil +} + +// ValidWithinUnsafeView checks if the given executing message is in the database. +// unsafe view is meant to represent all of the database, and so no boundary checks are needed. +func (r *safetyIndex) ValidWithinUnsafeView(_ uint64, execMsg *types.ExecutingMessage) error { + execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) + _, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash) + return err +} + +// ValidWithinSafeView checks if the given executing message is within the database, +// and within the L1 view of the caller. +func (r *safetyIndex) ValidWithinSafeView(l1View uint64, execMsg *types.ExecutingMessage) error { + execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain)) + + // Check that the initiating message, which was pulled in by the executing message, + // does indeed exist. And in which L2 block it exists (if any). + l2BlockHash, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash) + if err != nil { + return err + } + // if the executing message falls within the execFinalized range, then nothing to check + execFinalized, ok := r.finalized[execChainID] + if ok && execFinalized.Number > execMsg.BlockNum { + return nil + } + // check if the L1 block of the executing message is known + execL1Block, ok := r.derivedFrom[execChainID][l2BlockHash] + if !ok { + return logs.ErrFuture // TODO(#12185) need to distinguish between same-data future, and new-data future + } + // check if the L1 block is within the view + if execL1Block.Number > l1View { + return fmt.Errorf("exec message depends on L2 block %s:%d, derived from L1 block %s, not within view yet: %w", + l2BlockHash, execMsg.BlockNum, execL1Block, logs.ErrFuture) + } + return nil +} + +var _ SafetyIndex = (*safetyIndex)(nil) diff --git a/op-supervisor/supervisor/backend/safety/views.go b/op-supervisor/supervisor/backend/safety/views.go new file mode 100644 index 000000000000..c9393758fad5 --- /dev/null +++ b/op-supervisor/supervisor/backend/safety/views.go @@ -0,0 +1,91 @@ +package safety + +import ( + "errors" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type View struct { + chainID types.ChainID + + iter logs.Iterator + + localView heads.HeadPointer + localDerivedFrom eth.L1BlockRef + + validWithinView func(l1View uint64, execMsg *types.ExecutingMessage) error +} + +func (vi *View) Cross() (heads.HeadPointer, error) { + return vi.iter.HeadPointer() +} + +func (vi *View) Local() (heads.HeadPointer, error) { + if vi.localView == (heads.HeadPointer{}) { + return heads.HeadPointer{}, logs.ErrFuture + } + return vi.localView, nil +} + +func (vi *View) UpdateLocal(at eth.L1BlockRef, ref eth.L2BlockRef) error { + vi.localView = heads.HeadPointer{ + LastSealedBlockHash: ref.Hash, + LastSealedBlockNum: ref.Number, + //LastSealedTimestamp: ref.Time, + LogsSince: 0, + } + vi.localDerivedFrom = at + + // TODO(#11693): reorg check against existing DB + // TODO(#12186): localView may be larger than what DB contents we have + return nil +} + +func (vi *View) Process() error { + err := vi.iter.TraverseConditional(func(state logs.IteratorState) error { + hash, num, ok := state.SealedBlock() + if !ok { + return logs.ErrFuture // maybe a more specific error for no-genesis case? + } + // TODO(#11693): reorg check in the future. To make sure that what we traverse is still canonical. + _ = hash + // check if L2 block is within view + if !vi.localView.WithinRange(num, 0) { + return logs.ErrFuture + } + _, initLogIndex, ok := state.InitMessage() + if !ok { + return nil // no readable message, just an empty block + } + // check if the message is within view + if !vi.localView.WithinRange(num, initLogIndex) { + return logs.ErrFuture + } + // check if it is an executing message. If so, check the dependency + if execMsg := state.ExecMessage(); execMsg == nil { + // Check if executing message is within cross L2 view, + // relative to the L1 view of current message. + // And check if the message is valid to execute at all + // (i.e. if it exists on the initiating side). + // TODO(#12187): it's inaccurate to check with the view of the local-unsafe + // it should be limited to the L1 view at the time of the inclusion of execution of the message. + err := vi.validWithinView(vi.localDerivedFrom.Number, execMsg) + if err != nil { + return err + } + } + return nil + }) + if err == nil { + panic("expected reader to complete with an exit-error") + } + if errors.Is(err, logs.ErrFuture) { + // register the new cross-safe block as cross-safe up to the current L1 view + return nil + } + return err +} diff --git a/op-supervisor/supervisor/backend/source/chain.go b/op-supervisor/supervisor/backend/source/chain.go index 03286b1a4160..383a5fb74de8 100644 --- a/op-supervisor/supervisor/backend/source/chain.go +++ b/op-supervisor/supervisor/backend/source/chain.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/caching" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -25,8 +24,7 @@ type Metrics interface { } type Storage interface { - LogStorage - Heads() db.HeadsStorage + ChainsDBClientForLogProcessor DatabaseRewinder LatestBlockNum(chainID types.ChainID) (num uint64, ok bool) } @@ -50,16 +48,9 @@ func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID processLogs := newLogProcessor(chainID, store) unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, processLogs, store) - // create head processors which only update the head - unsafeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalUnsafe) - safeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalSafe) - finalizedHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalFinalized) + unsafeProcessors := []HeadProcessor{unsafeBlockProcessor} - unsafeProcessors := []HeadProcessor{unsafeBlockProcessor, unsafeHeadProcessor} - safeProcessors := []HeadProcessor{safeHeadProcessor} - finalizedProcessors := []HeadProcessor{finalizedHeadProcessor} - - callback := newHeadUpdateProcessor(logger, unsafeProcessors, safeProcessors, finalizedProcessors) + callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil) headMonitor := NewHeadMonitor(logger, epochPollInterval, cl, callback) return &ChainMonitor{ diff --git a/op-supervisor/supervisor/backend/source/chain_processor.go b/op-supervisor/supervisor/backend/source/chain_processor.go index 4c7895b0cdf3..60568fe296fb 100644 --- a/op-supervisor/supervisor/backend/source/chain_processor.go +++ b/op-supervisor/supervisor/backend/source/chain_processor.go @@ -21,7 +21,7 @@ type Source interface { } type LogProcessor interface { - ProcessLogs(ctx context.Context, block eth.L1BlockRef, receipts gethtypes.Receipts) error + ProcessLogs(ctx context.Context, block eth.L2BlockRef, receipts gethtypes.Receipts) error } type DatabaseRewinder interface { @@ -130,7 +130,13 @@ func (s *ChainProcessor) worker() { func (s *ChainProcessor) update(nextNum uint64) error { ctx, cancel := context.WithTimeout(s.ctx, time.Second*10) - next, err := s.client.L1BlockRefByNumber(ctx, nextNum) + nextL1, err := s.client.L1BlockRefByNumber(ctx, nextNum) + next := eth.L2BlockRef{ + Hash: nextL1.Hash, + ParentHash: nextL1.ParentHash, + Number: nextL1.Number, + Time: nextL1.Time, + } cancel() if err != nil { return fmt.Errorf("failed to fetch next block: %w", err) diff --git a/op-supervisor/supervisor/backend/source/log_processor.go b/op-supervisor/supervisor/backend/source/log_processor.go index 1c20f8c4530a..8a815f7ca9e9 100644 --- a/op-supervisor/supervisor/backend/source/log_processor.go +++ b/op-supervisor/supervisor/backend/source/log_processor.go @@ -15,7 +15,12 @@ import ( ) type LogStorage interface { - SealBlock(chain types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error + SealBlock(chain types.ChainID, block eth.L2BlockRef) error + AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error +} + +type ChainsDBClientForLogProcessor interface { + SealBlock(chain types.ChainID, block eth.L2BlockRef) error AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error } @@ -39,7 +44,7 @@ func newLogProcessor(chain types.ChainID, logStore LogStorage) *logProcessor { // ProcessLogs processes logs from a block and stores them in the log storage // for any logs that are related to executing messages, they are decoded and stored -func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpts ethTypes.Receipts) error { +func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L2BlockRef, rcpts ethTypes.Receipts) error { for _, rcpt := range rcpts { for _, l := range rcpt.Logs { // log hash represents the hash of *this* log as a potentially initiating message @@ -60,7 +65,7 @@ func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpt } } } - if err := p.logStore.SealBlock(p.chain, block.ParentHash, block.ID(), block.Time); err != nil { + if err := p.logStore.SealBlock(p.chain, block); err != nil { return fmt.Errorf("failed to seal block %s: %w", block.ID(), err) } return nil diff --git a/op-supervisor/supervisor/backend/source/log_processor_test.go b/op-supervisor/supervisor/backend/source/log_processor_test.go index bd7aa7abc3d1..6e96d731fcff 100644 --- a/op-supervisor/supervisor/backend/source/log_processor_test.go +++ b/op-supervisor/supervisor/backend/source/log_processor_test.go @@ -17,7 +17,7 @@ var logProcessorChainID = types.ChainIDFromUInt64(4) func TestLogProcessor(t *testing.T) { ctx := context.Background() - block1 := eth.L1BlockRef{ + block1 := eth.L2BlockRef{ ParentHash: common.Hash{0x42}, Number: 100, Hash: common.Hash{0x11}, @@ -205,14 +205,14 @@ type stubLogStorage struct { seals []storedSeal } -func (s *stubLogStorage) SealBlock(chainID types.ChainID, parentHash common.Hash, block eth.BlockID, timestamp uint64) error { +func (s *stubLogStorage) SealBlock(chainID types.ChainID, block eth.L2BlockRef) error { if logProcessorChainID != chainID { return fmt.Errorf("chain id mismatch, expected %v but got %v", logProcessorChainID, chainID) } s.seals = append(s.seals, storedSeal{ - parent: parentHash, - block: block, - timestamp: timestamp, + parent: block.ParentHash, + block: block.ID(), + timestamp: block.Time, }) return nil } From b1dfd7431d1172927d67b61eb01d237d83c1ecfa Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Fri, 27 Sep 2024 15:25:47 -0700 Subject: [PATCH 072/116] OPCM: Adds a test, cleans up some TODOs, new justfile command (#12182) * add missing assertion and remove unneeded todo comments * chore: semver lock + pre-pr-no-build * chore: rename function --- packages/contracts-bedrock/justfile | 6 +++++- .../contracts-bedrock/scripts/DeployOPChain.s.sol | 13 ++++++++++--- .../scripts/DeploySuperchain.s.sol | 1 - packages/contracts-bedrock/semver-lock.json | 4 ++-- .../contracts-bedrock/src/L1/OPContractsManager.sol | 10 ++++------ 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 901ce17daa68..b5bce1e55b7a 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -197,7 +197,11 @@ check: gas-snapshot-check-no-build kontrol-deployment-check snapshots-check-no-b ######################################################## # Cleans, builds, lints, and runs all checks. -pre-pr: clean build-go-ffi build lint gas-snapshot-no-build snapshots-no-build semver-lock check +pre-pr: clean pre-pr-no-build + +# Builds, lints, and runs all checks. Sometimes a bad cache causes issues, in which case the above +# `pre-pr` is preferred. But in most cases this will be sufficient and much faster then a full build. +pre-pr-no-build: build-go-ffi build lint gas-snapshot-no-build snapshots-no-build semver-lock check # Fixes linting errors. lint-fix: diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index d4de3636d245..152885170cb0 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -307,7 +307,7 @@ contract DeployOPChainOutput is BaseDeployIO { function assertValidDeploy(DeployOPChainInput _doi) internal { assertValidAnchorStateRegistryImpl(_doi); assertValidAnchorStateRegistryProxy(_doi); - assertValidDelayedWETHs(_doi); + assertValidDelayedWETH(_doi); assertValidDisputeGameFactory(_doi); assertValidL1CrossDomainMessenger(_doi); assertValidL1ERC721Bridge(_doi); @@ -482,8 +482,15 @@ contract DeployOPChainOutput is BaseDeployIO { require(factory.owner() == address(_doi.opChainProxyAdminOwner()), "DF-20"); } - function assertValidDelayedWETHs(DeployOPChainInput) internal view { - // TODO add in once FP support is added. + function assertValidDelayedWETH(DeployOPChainInput _doi) internal { + DelayedWETH permissioned = delayedWETHPermissionedGameProxy(); + + require(permissioned.owner() == address(_doi.opChainProxyAdminOwner()), "DWETH-10"); + + Proxy proxy = Proxy(payable(address(permissioned))); + vm.prank(address(0)); + address admin = proxy.admin(); + require(admin == address(opChainProxyAdmin()), "DWETH-20"); } } diff --git a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol index b26755c755ec..c9e1b23bf230 100644 --- a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol @@ -195,7 +195,6 @@ contract DeploySuperchainOutput is BaseDeployIO { require(actualSuperchainConfigImpl == address(_superchainConfigImpl), "100"); require(actualProtocolVersionsImpl == address(_protocolVersionsImpl), "200"); - // TODO Also add the assertions for the implementation contracts from ChainAssertions.sol assertValidDeploy(_dsi); } diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index cbd9611d6800..af623c25f487 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x7c5d90928ce882ed5360939722271e9af36e81c394e4110ba32864b14c3d78be", - "sourceCodeHash": "0x25372ad554eaeb64d7512e19642210bb3736e4047ea97518b2992b3ab67e1a5d" + "initCodeHash": "0xf64b41f5ec9a94ddbac484f2bdcb5792bf71e66ed6197d172cfd4592311f9b85", + "sourceCodeHash": "0x0ac3668c5f1ccbd49713a4e079f0dafb5cc45eca8de33e0faf0f01b1b989da89" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index e2707b4204d1..7cf5639b2911 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -128,8 +128,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.15 - string public constant version = "1.0.0-beta.15"; + /// @custom:semver 1.0.0-beta.16 + string public constant version = "1.0.0-beta.16"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -433,8 +433,6 @@ contract OPContractsManager is ISemver, Initializable { virtual returns (bytes memory) { - _output; - // TODO make GameTypes.CANNON an input once FPs are supported return abi.encodeWithSelector( _selector, _output.disputeGameFactoryProxy, @@ -473,7 +471,7 @@ contract OPContractsManager is ISemver, Initializable { _input.basefeeScalar, _input.blobBasefeeScalar, bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, // gasLimit, TODO should this be an input? + 30_000_000, _input.roles.unsafeBlockSigner, referenceResourceConfig, chainIdToBatchInboxAddress(_input.l2ChainId), @@ -490,7 +488,7 @@ contract OPContractsManager is ISemver, Initializable { _input.basefeeScalar, _input.blobBasefeeScalar, bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, // gasLimit, TODO should this be an input? + 30_000_000, _input.roles.unsafeBlockSigner, referenceResourceConfig, chainIdToBatchInboxAddress(_input.l2ChainId), From 5eaac1de545da1a3bc13d7ac78c03681a8f6e081 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Fri, 27 Sep 2024 18:59:10 -0400 Subject: [PATCH 073/116] maint(ct): remove Kontrol interfaces (#12178) We no longer need these interfaces now that we have the actual interfaces. --- .../scripts/checks/check-interfaces.sh | 34 ++++---- .../L1/interfaces/IL1CrossDomainMessenger.sol | 8 +- .../contracts-bedrock/test/kontrol/README.md | 15 ++-- .../proofs/L1CrossDomainMessenger.k.sol | 6 +- .../test/kontrol/proofs/L1ERC721Bridge.k.sol | 8 +- .../kontrol/proofs/L1StandardBridge.k.sol | 8 +- .../test/kontrol/proofs/OptimismPortal.k.sol | 6 +- .../test/kontrol/proofs/OptimismPortal2.k.sol | 6 +- .../proofs/interfaces/KontrolInterfaces.sol | 83 ------------------- 9 files changed, 40 insertions(+), 134 deletions(-) delete mode 100644 packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol diff --git a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh index 2df1045ef101..2a4a566f34e9 100755 --- a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh +++ b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh @@ -45,29 +45,31 @@ EXCLUDE_CONTRACTS=( "ISchemaResolver" "ISchemaRegistry" - # Kontrol - "KontrolCheatsBase" + # TODO: Interfaces that need to be fixed are below this line + # ---------------------------------------------------------- - # TODO: Interfaces that need to be fixed - "IOptimismSuperchainERC20" - "IOptimismMintableERC721" - "IOptimismMintableERC20" - "ILegacyMintableERC20" + # Inlined interface, needs to be replaced. "IInitializable" + + # Missing various functions. "IPreimageOracle" - "ICrossL2Inbox" - "IL2ToL2CrossDomainMessenger" + "ILegacyMintableERC20" + "IOptimismMintableERC20" + "IOptimismMintableERC721" + "IOptimismSuperchainERC20" + + # Doesn't start with "I" "MintableAndBurnable" + "KontrolCheatsBase" + + # Currently inherit from interface, needs to be fixed. "IWETH" "IDelayedWETH" - "IResolvedDelegateProxy" + "IL2ToL2CrossDomainMessenger" + "ICrossL2Inbox" - # TODO: Kontrol interfaces that need to be removed - "IL1ERC721Bridge" - "IL1StandardBridge" - "IL1CrossDomainMessenger" - "ISuperchainConfig" - "IOptimismPortal" + # Solidity complains about receive but contract doens't have it. + "IResolvedDelegateProxy" ) # Find all JSON files in the forge-artifacts folder diff --git a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol index b8b7e3403d29..8a6de84e2c9d 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/IL1CrossDomainMessenger.sol @@ -7,16 +7,16 @@ import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; interface IL1CrossDomainMessenger is ICrossDomainMessenger { - function PORTAL() external view returns (address); + function PORTAL() external view returns (IOptimismPortal); function initialize( ISuperchainConfig _superchainConfig, IOptimismPortal _portal, ISystemConfig _systemConfig ) external; - function portal() external view returns (address); - function superchainConfig() external view returns (address); - function systemConfig() external view returns (address); + function portal() external view returns (IOptimismPortal); + function superchainConfig() external view returns (ISuperchainConfig); + function systemConfig() external view returns (ISystemConfig); function version() external view returns (string memory); function __constructor__() external; diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index 0a6dcec7c79b..c0e3a39349b8 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -122,23 +122,19 @@ The next step is to include tests for the newly included state updates in [`Depl It might be necessary to set some of the existing tests from [`test`](../L1) as virtual because they can't be executed as is. See [`DeploymentSummary.t.sol`](deployment/DeploymentSummary.t.sol) for more concrete examples. -#### Add function signatures to [`KontrolInterfaces`](./proofs/interfaces/KontrolInterfaces.sol) - -So far we've got all the state updates ready to be added to the initial configuration of each proof, but we cannot yet write any proof about the function. We still need to add the relevant signatures into `KontrolInterfaces`. The reason for having `KontrolInterfaces` instead of using directly the contracts is to reduce the amount of compiled contracts by Kontrol. -In the future there might interfaces for all contracts under `contracts-bedrock`, which would imply the removal of `KontrolInterfaces`. - #### Write the proof Write your proof in a `.k.sol` file in the [`proofs`](./proofs/) folder, which is the `test` directory used by the `kprove` profile to run the proofs (see [Deployment Summary Process](#deployment-summary-process)). The name of the new proofs should start with `prove` (or `check`) instead of `test` to avoid `forge test` running them. The reason for this is that if Kontrol cheatcodes (see [Kontrol's own cheatcodes](https://github.com/runtimeverification/kontrol-cheatcodes/blob/master/src/KontrolCheats.sol)) are used in a test, it will not be runnable by `forge`. Currently, none of the tests are using custom Kontrol cheatcodes, but this is something to bear in mind. To reference the correct addresses for writing the tests, first import the signatures as in this example: + ```solidity -import { - IOptimismPortal as OptimismPortal, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; ``` + Declare the correspondent variables and cast the correct signatures to the correct addresses: + ```solidity OptimismPortal optimismPortal; SuperchainConfig superchainConfig; @@ -148,6 +144,7 @@ function setUp() public { superchainConfig = SuperchainConfig(superchainConfigProxyAddress); } ``` + Note that the names of the addresses come from [`DeploymentSummary.t.sol`](deployment/DeploymentSummary.t.sol) and are automatically generated by the [`make-summary-deployment.sh`](./scripts/make-summary-deployment.sh) script. #### Add your test to [`run-kontrol.sh`](./scripts/run-kontrol.sh) diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol index d748cd24b4a7..60edd1dc4655 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1CrossDomainMessenger.k.sol @@ -3,10 +3,8 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; -import { - IL1CrossDomainMessenger as L1CrossDomainMessenger, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IL1CrossDomainMessenger as L1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; contract L1CrossDomainMessengerKontrol is DeploymentSummary, KontrolUtils { L1CrossDomainMessenger l1CrossDomainMessenger; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol index 43803f31a3e8..f7887f0f1a71 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1ERC721Bridge.k.sol @@ -4,11 +4,9 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IL1ERC721Bridge as L1ERC721Bridge, - IL1CrossDomainMessenger as CrossDomainMessenger, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IL1ERC721Bridge as L1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; contract L1ERC721BridgeKontrol is DeploymentSummary, KontrolUtils { L1ERC721Bridge l1ERC721Bridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol index 0b710fc01e51..8cefd5546e9e 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/L1StandardBridge.k.sol @@ -4,11 +4,9 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IL1StandardBridge as L1StandardBridge, - IL1CrossDomainMessenger as CrossDomainMessenger, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IL1StandardBridge as L1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { ICrossDomainMessenger as CrossDomainMessenger } from "src/universal/interfaces/ICrossDomainMessenger.sol"; contract L1StandardBridgeKontrol is DeploymentSummary, KontrolUtils { L1StandardBridge l1standardBridge; diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol index 969c69349ae4..f0cf6cac7734 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal.k.sol @@ -4,10 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummary } from "./utils/DeploymentSummary.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IOptimismPortal as OptimismPortal, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortalKontrol is DeploymentSummary, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol index 18a1b579417a..d561b8b85092 100644 --- a/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol +++ b/packages/contracts-bedrock/test/kontrol/proofs/OptimismPortal2.k.sol @@ -4,10 +4,8 @@ pragma solidity ^0.8.13; import { DeploymentSummaryFaultProofs } from "./utils/DeploymentSummaryFaultProofs.sol"; import { KontrolUtils } from "./utils/KontrolUtils.sol"; import { Types } from "src/libraries/Types.sol"; -import { - IOptimismPortal as OptimismPortal, - ISuperchainConfig as SuperchainConfig -} from "./interfaces/KontrolInterfaces.sol"; +import { IOptimismPortal as OptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; +import { ISuperchainConfig as SuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import "src/libraries/PortalErrors.sol"; contract OptimismPortal2Kontrol is DeploymentSummaryFaultProofs, KontrolUtils { diff --git a/packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol b/packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol deleted file mode 100644 index 831d208b9ac6..000000000000 --- a/packages/contracts-bedrock/test/kontrol/proofs/interfaces/KontrolInterfaces.sol +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { Types } from "src/libraries/Types.sol"; - -interface IOptimismPortal { - function guardian() external view returns (address); - - function paused() external view returns (bool paused_); - - function proveWithdrawalTransaction( - Types.WithdrawalTransaction memory _tx, - uint256 _l2OutputIndex, - Types.OutputRootProof calldata _outputRootProof, - bytes[] calldata _withdrawalProof - ) - external; - - function finalizeWithdrawalTransaction(Types.WithdrawalTransaction memory _tx) external; -} - -interface ISuperchainConfig { - function guardian() external view returns (address); - - function paused() external view returns (bool paused_); - - function pause(string memory _identifier) external; - - function unpause() external; -} - -interface IL1StandardBridge { - function paused() external view returns (bool); - - function messenger() external view returns (IL1CrossDomainMessenger); - - function otherBridge() external view returns (IL1StandardBridge); - - function finalizeBridgeERC20( - address _localToken, - address _remoteToken, - address _from, - address _to, - uint256 _amount, - bytes calldata _extraData - ) - external; - - function finalizeBridgeETH(address _from, address _to, uint256 _amount, bytes calldata _extraData) external; -} - -interface IL1ERC721Bridge { - function paused() external view returns (bool); - - function messenger() external view returns (IL1CrossDomainMessenger); - - function otherBridge() external view returns (IL1StandardBridge); - - function finalizeBridgeERC721( - address _localToken, - address _remoteToken, - address _from, - address _to, - uint256 _amount, - bytes calldata _extraData - ) - external; -} - -interface IL1CrossDomainMessenger { - function relayMessage( - uint256 _nonce, - address _sender, - address _target, - uint256 _value, - uint256 _minGasLimit, - bytes calldata _message - ) - external - payable; - - function xDomainMessageSender() external view returns (address); -} From 6ba2ac0d25d6ab47e4247f328db09fd3d34e0fce Mon Sep 17 00:00:00 2001 From: Maurelian Date: Fri, 27 Sep 2024 20:32:07 -0400 Subject: [PATCH 074/116] Interfaces in OPCM and OPCMInterop (#12188) * Updates to OPCM and OPCMInterop * Update DeployImpls and OPCM Tests * remove some unused imports * Undo mapping type change * Undo DeployOutput type change * Semver and snapshots --- .../scripts/DeployImplementations.s.sol | 23 ++-- packages/contracts-bedrock/semver-lock.json | 4 +- .../snapshots/abi/OPContractsManager.json | 36 +++--- .../abi/OPContractsManagerInterop.json | 36 +++--- .../storageLayout/OPContractsManager.json | 2 +- .../OPContractsManagerInterop.json | 2 +- .../src/L1/OPContractsManager.sol | 112 +++++++++--------- .../src/L1/OPContractsManagerInterop.sol | 16 +-- .../test/L1/OPContractsManager.t.sol | 10 +- 9 files changed, 120 insertions(+), 121 deletions(-) diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 12659dbdc814..8e7a38ca2eb5 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -7,6 +7,7 @@ import { LibString } from "@solady/utils/LibString.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { IL1CrossDomainMessengerV160 } from "src/L1/interfaces/IL1CrossDomainMessengerV160.sol"; import { IL1StandardBridgeV160 } from "src/L1/interfaces/IL1StandardBridgeV160.sol"; @@ -29,8 +30,6 @@ import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; @@ -62,8 +61,8 @@ contract DeployImplementationsInput is BaseDeployIO { string internal _release; // Outputs from DeploySuperchain.s.sol. - SuperchainConfig internal _superchainConfigProxy; - ProtocolVersions internal _protocolVersionsProxy; + ISuperchainConfig internal _superchainConfigProxy; + IProtocolVersions internal _protocolVersionsProxy; string internal _standardVersionsToml; @@ -97,8 +96,8 @@ contract DeployImplementationsInput is BaseDeployIO { function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployImplementationsInput: cannot set zero address"); - if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_addr); - else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_addr); + if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = ISuperchainConfig(_addr); + else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = IProtocolVersions(_addr); else if (_sel == this.opcmProxyOwner.selector) _opcmProxyOwner = _addr; else revert("DeployImplementationsInput: unknown selector"); } @@ -151,12 +150,12 @@ contract DeployImplementationsInput is BaseDeployIO { return _standardVersionsToml; } - function superchainConfigProxy() public view returns (SuperchainConfig) { + function superchainConfigProxy() public view returns (ISuperchainConfig) { require(address(_superchainConfigProxy) != address(0), "DeployImplementationsInput: not set"); return _superchainConfigProxy; } - function protocolVersionsProxy() public view returns (ProtocolVersions) { + function protocolVersionsProxy() public view returns (IProtocolVersions) { require(address(_protocolVersionsProxy) != address(0), "DeployImplementationsInput: not set"); return _protocolVersionsProxy; } @@ -777,8 +776,8 @@ contract DeployImplementations is Script { public virtual { - SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); vm.broadcast(msg.sender); // TODO: Eventually we will want to select the correct implementation based on the release. @@ -1147,8 +1146,8 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - SuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - ProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); vm.broadcast(msg.sender); // TODO: Eventually we will want to select the correct implementation based on the release. diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index af623c25f487..30b546e24ca9 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xf64b41f5ec9a94ddbac484f2bdcb5792bf71e66ed6197d172cfd4592311f9b85", - "sourceCodeHash": "0x0ac3668c5f1ccbd49713a4e079f0dafb5cc45eca8de33e0faf0f01b1b989da89" + "initCodeHash": "0xa0c1139a01cef2445266c71175eff2d36e4b3a7584b198835ed8cba4f7143704", + "sourceCodeHash": "0x67f9846a215d0817a75b4beee50925861d14da2cab1b699bb4e8ae89fa12d01b" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 2ff2826881f5..dc4ef6142c57 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -2,12 +2,12 @@ { "inputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "_superchainConfig", "type": "address" }, { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" } @@ -184,17 +184,17 @@ "type": "address" }, { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "addressManager", "type": "address" }, { - "internalType": "contract L1ERC721Bridge", + "internalType": "contract IL1ERC721Bridge", "name": "l1ERC721BridgeProxy", "type": "address" }, { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "systemConfigProxy", "type": "address" }, @@ -204,52 +204,52 @@ "type": "address" }, { - "internalType": "contract L1StandardBridge", + "internalType": "contract IL1StandardBridge", "name": "l1StandardBridgeProxy", "type": "address" }, { - "internalType": "contract L1CrossDomainMessenger", + "internalType": "contract IL1CrossDomainMessenger", "name": "l1CrossDomainMessengerProxy", "type": "address" }, { - "internalType": "contract OptimismPortal2", + "internalType": "contract IOptimismPortal2", "name": "optimismPortalProxy", "type": "address" }, { - "internalType": "contract DisputeGameFactory", + "internalType": "contract IDisputeGameFactory", "name": "disputeGameFactoryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryImpl", "type": "address" }, { - "internalType": "contract FaultDisputeGame", + "internalType": "contract IFaultDisputeGame", "name": "faultDisputeGame", "type": "address" }, { - "internalType": "contract PermissionedDisputeGame", + "internalType": "contract IPermissionedDisputeGame", "name": "permissionedDisputeGame", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionedGameProxy", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionlessGameProxy", "type": "address" } @@ -410,7 +410,7 @@ "name": "protocolVersions", "outputs": [ { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "", "type": "address" } @@ -423,7 +423,7 @@ "name": "superchainConfig", "outputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "", "type": "address" } @@ -442,7 +442,7 @@ "name": "systemConfigs", "outputs": [ { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "", "type": "address" } diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 2ff2826881f5..dc4ef6142c57 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -2,12 +2,12 @@ { "inputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "_superchainConfig", "type": "address" }, { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" } @@ -184,17 +184,17 @@ "type": "address" }, { - "internalType": "contract AddressManager", + "internalType": "contract IAddressManager", "name": "addressManager", "type": "address" }, { - "internalType": "contract L1ERC721Bridge", + "internalType": "contract IL1ERC721Bridge", "name": "l1ERC721BridgeProxy", "type": "address" }, { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "systemConfigProxy", "type": "address" }, @@ -204,52 +204,52 @@ "type": "address" }, { - "internalType": "contract L1StandardBridge", + "internalType": "contract IL1StandardBridge", "name": "l1StandardBridgeProxy", "type": "address" }, { - "internalType": "contract L1CrossDomainMessenger", + "internalType": "contract IL1CrossDomainMessenger", "name": "l1CrossDomainMessengerProxy", "type": "address" }, { - "internalType": "contract OptimismPortal2", + "internalType": "contract IOptimismPortal2", "name": "optimismPortalProxy", "type": "address" }, { - "internalType": "contract DisputeGameFactory", + "internalType": "contract IDisputeGameFactory", "name": "disputeGameFactoryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryProxy", "type": "address" }, { - "internalType": "contract AnchorStateRegistry", + "internalType": "contract IAnchorStateRegistry", "name": "anchorStateRegistryImpl", "type": "address" }, { - "internalType": "contract FaultDisputeGame", + "internalType": "contract IFaultDisputeGame", "name": "faultDisputeGame", "type": "address" }, { - "internalType": "contract PermissionedDisputeGame", + "internalType": "contract IPermissionedDisputeGame", "name": "permissionedDisputeGame", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionedGameProxy", "type": "address" }, { - "internalType": "contract DelayedWETH", + "internalType": "contract IDelayedWETH", "name": "delayedWETHPermissionlessGameProxy", "type": "address" } @@ -410,7 +410,7 @@ "name": "protocolVersions", "outputs": [ { - "internalType": "contract ProtocolVersions", + "internalType": "contract IProtocolVersions", "name": "", "type": "address" } @@ -423,7 +423,7 @@ "name": "superchainConfig", "outputs": [ { - "internalType": "contract SuperchainConfig", + "internalType": "contract ISuperchainConfig", "name": "", "type": "address" } @@ -442,7 +442,7 @@ "name": "systemConfigs", "outputs": [ { - "internalType": "contract SystemConfig", + "internalType": "contract ISystemConfig", "name": "", "type": "address" } diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index cbb977f214b4..aeef539c5c20 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -32,7 +32,7 @@ "label": "systemConfigs", "offset": 0, "slot": "3", - "type": "mapping(uint256 => contract SystemConfig)" + "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json index cbb977f214b4..aeef539c5c20 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json @@ -32,7 +32,7 @@ "label": "systemConfigs", "offset": 0, "slot": "3", - "type": "mapping(uint256 => contract SystemConfig)" + "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 7cf5639b2911..19c5283cc332 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -15,30 +15,26 @@ import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { Proxy } from "src/universal/Proxy.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; -import { DelayedWETH } from "src/dispute/DelayedWETH.sol"; -import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; -import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; -import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; -import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { Claim, Duration, GameType, GameTypes } from "src/dispute/lib/Types.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; -import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; -import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; -import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; +import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; /// @custom:proxied true @@ -71,21 +67,21 @@ contract OPContractsManager is ISemver, Initializable { /// @notice The full set of outputs from deploying a new OP Stack chain. struct DeployOutput { ProxyAdmin opChainProxyAdmin; - AddressManager addressManager; - L1ERC721Bridge l1ERC721BridgeProxy; - SystemConfig systemConfigProxy; + IAddressManager addressManager; + IL1ERC721Bridge l1ERC721BridgeProxy; + ISystemConfig systemConfigProxy; OptimismMintableERC20Factory optimismMintableERC20FactoryProxy; - L1StandardBridge l1StandardBridgeProxy; - L1CrossDomainMessenger l1CrossDomainMessengerProxy; + IL1StandardBridge l1StandardBridgeProxy; + IL1CrossDomainMessenger l1CrossDomainMessengerProxy; // Fault proof contracts below. - OptimismPortal2 optimismPortalProxy; - DisputeGameFactory disputeGameFactoryProxy; - AnchorStateRegistry anchorStateRegistryProxy; - AnchorStateRegistry anchorStateRegistryImpl; - FaultDisputeGame faultDisputeGame; - PermissionedDisputeGame permissionedDisputeGame; - DelayedWETH delayedWETHPermissionedGameProxy; - DelayedWETH delayedWETHPermissionlessGameProxy; + IOptimismPortal2 optimismPortalProxy; + IDisputeGameFactory disputeGameFactoryProxy; + IAnchorStateRegistry anchorStateRegistryProxy; + IAnchorStateRegistry anchorStateRegistryImpl; + IFaultDisputeGame faultDisputeGame; + IPermissionedDisputeGame permissionedDisputeGame; + IDelayedWETH delayedWETHPermissionedGameProxy; + IDelayedWETH delayedWETHPermissionlessGameProxy; } /// @notice The logic address and initializer selector for an implementation contract. @@ -128,18 +124,18 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.16 - string public constant version = "1.0.0-beta.16"; + /// @custom:semver 1.0.0-beta.17 + string public constant version = "1.0.0-beta.17"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. uint256 public constant OUTPUT_VERSION = 0; /// @notice Address of the SuperchainConfig contract shared by all chains. - SuperchainConfig public immutable superchainConfig; + ISuperchainConfig public immutable superchainConfig; /// @notice Address of the ProtocolVersions contract shared by all chains. - ProtocolVersions public immutable protocolVersions; + IProtocolVersions public immutable protocolVersions; /// @notice The latest release of the OP Contracts Manager, as a string of the format `op-contracts/vX.Y.Z`. string public latestRelease; @@ -148,7 +144,7 @@ contract OPContractsManager is ISemver, Initializable { mapping(string => mapping(string => Implementation)) public implementations; /// @notice Maps an L2 Chain ID to the SystemConfig for that chain. - mapping(uint256 => SystemConfig) public systemConfigs; + mapping(uint256 => ISystemConfig) public systemConfigs; /// @notice Addresses of the Blueprint contracts. /// This is internal because if public the autogenerated getter method would return a tuple of @@ -198,7 +194,7 @@ contract OPContractsManager is ISemver, Initializable { /// @notice OPCM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. - constructor(SuperchainConfig _superchainConfig, ProtocolVersions _protocolVersions) { + constructor(ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions) { assertValidContractAddress(address(_superchainConfig)); assertValidContractAddress(address(_protocolVersions)); superchainConfig = _superchainConfig; @@ -238,36 +234,36 @@ contract OPContractsManager is ISemver, Initializable { // this contract, and then transfer ownership to the specified owner at the end of deployment. // The AddressManager is used to store the implementation for the L1CrossDomainMessenger // due to it's usage of the legacy ResolvedDelegateProxy. - output.addressManager = AddressManager(Blueprint.deployFrom(blueprint.addressManager, salt)); + output.addressManager = IAddressManager(Blueprint.deployFrom(blueprint.addressManager, salt)); output.opChainProxyAdmin = ProxyAdmin(Blueprint.deployFrom(blueprint.proxyAdmin, salt, abi.encode(address(this)))); - output.opChainProxyAdmin.setAddressManager(IAddressManager(address(output.addressManager))); + output.opChainProxyAdmin.setAddressManager(output.addressManager); // -------- Deploy Proxy Contracts -------- // Deploy ERC-1967 proxied contracts. output.l1ERC721BridgeProxy = - L1ERC721Bridge(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "L1ERC721Bridge")); + IL1ERC721Bridge(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "L1ERC721Bridge")); output.optimismPortalProxy = - OptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismPortal"))); + IOptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismPortal"))); output.systemConfigProxy = - SystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "SystemConfig")); + ISystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "SystemConfig")); output.optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory( deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismMintableERC20Factory") ); output.disputeGameFactoryProxy = - DisputeGameFactory(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DisputeGameFactory")); + IDisputeGameFactory(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DisputeGameFactory")); output.anchorStateRegistryProxy = - AnchorStateRegistry(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "AnchorStateRegistry")); + IAnchorStateRegistry(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "AnchorStateRegistry")); // Deploy legacy proxied contracts. - output.l1StandardBridgeProxy = L1StandardBridge( + output.l1StandardBridgeProxy = IL1StandardBridge( payable(Blueprint.deployFrom(blueprint.l1ChugSplashProxy, salt, abi.encode(output.opChainProxyAdmin))) ); output.opChainProxyAdmin.setProxyType(address(output.l1StandardBridgeProxy), ProxyAdmin.ProxyType.CHUGSPLASH); string memory contractName = "OVM_L1CrossDomainMessenger"; - output.l1CrossDomainMessengerProxy = L1CrossDomainMessenger( + output.l1CrossDomainMessengerProxy = IL1CrossDomainMessenger( Blueprint.deployFrom(blueprint.resolvedDelegateProxy, salt, abi.encode(output.addressManager, contractName)) ); output.opChainProxyAdmin.setProxyType( @@ -280,17 +276,17 @@ contract OPContractsManager is ISemver, Initializable { // The AnchorStateRegistry Implementation is not MCP Ready, and therefore requires an implementation per chain. // It must be deployed after the DisputeGameFactoryProxy so that it can be provided as a constructor argument. - output.anchorStateRegistryImpl = AnchorStateRegistry( + output.anchorStateRegistryImpl = IAnchorStateRegistry( Blueprint.deployFrom(blueprint.anchorStateRegistry, salt, abi.encode(output.disputeGameFactoryProxy)) ); // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. - output.delayedWETHPermissionedGameProxy = DelayedWETH( + output.delayedWETHPermissionedGameProxy = IDelayedWETH( payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "DelayedWETHPermissionedGame")) ); // While not a proxy, we deploy the PermissionedDisputeGame here as well because it's bespoke per chain. - output.permissionedDisputeGame = PermissionedDisputeGame( + output.permissionedDisputeGame = IPermissionedDisputeGame( Blueprint.deployFrom( blueprint.permissionedDisputeGame1, blueprint.permissionedDisputeGame2, @@ -345,7 +341,7 @@ contract OPContractsManager is ISemver, Initializable { output.disputeGameFactoryProxy.transferOwnership(address(_input.roles.opChainProxyAdminOwner)); impl.logic = address(output.anchorStateRegistryImpl); - impl.initializer = AnchorStateRegistry.initialize.selector; + impl.initializer = IAnchorStateRegistry.initialize.selector; data = encodeAnchorStateRegistryInitializer(impl.initializer, _input); upgradeAndCall(output.opChainProxyAdmin, address(output.anchorStateRegistryProxy), impl.logic, data); @@ -461,7 +457,7 @@ contract OPContractsManager is ISemver, Initializable { if (keccak256(abi.encode(semver)) == keccak256(abi.encode(string("2.2.0")))) { // We are using the op-contracts/v1.6.0 SystemConfig contract. ( - ResourceMetering.ResourceConfig memory referenceResourceConfig, + IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfigV160.Addresses memory opChainAddrs ) = defaultSystemConfigV160Params(_selector, _input, _output); @@ -479,8 +475,10 @@ contract OPContractsManager is ISemver, Initializable { ); } else { // We are using the latest SystemConfig contract from the repo. - (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) - = defaultSystemConfigParams(_selector, _input, _output); + ( + IResourceMetering.ResourceConfig memory referenceResourceConfig, + ISystemConfig.Addresses memory opChainAddrs + ) = defaultSystemConfigParams(_selector, _input, _output); return abi.encodeWithSelector( _selector, @@ -563,8 +561,8 @@ contract OPContractsManager is ISemver, Initializable { returns (bytes memory) { // this line fails in the op-deployer tests because it is not passing in any data - AnchorStateRegistry.StartingAnchorRoot[] memory startingAnchorRoots = - abi.decode(_input.startingAnchorRoots, (AnchorStateRegistry.StartingAnchorRoot[])); + IAnchorStateRegistry.StartingAnchorRoot[] memory startingAnchorRoots = + abi.decode(_input.startingAnchorRoots, (IAnchorStateRegistry.StartingAnchorRoot[])); return abi.encodeWithSelector(_selector, startingAnchorRoots, superchainConfig); } @@ -615,7 +613,7 @@ contract OPContractsManager is ISemver, Initializable { internal view virtual - returns (ResourceMetering.ResourceConfig memory resourceConfig_, SystemConfig.Addresses memory opChainAddrs_) + returns (IResourceMetering.ResourceConfig memory resourceConfig_, ISystemConfig.Addresses memory opChainAddrs_) { // We use assembly to easily convert from IResourceMetering.ResourceConfig to ResourceMetering.ResourceConfig. // This is required because we have not yet fully migrated the codebase to be interface-based. @@ -624,7 +622,7 @@ contract OPContractsManager is ISemver, Initializable { resourceConfig_ := resourceConfig } - opChainAddrs_ = SystemConfig.Addresses({ + opChainAddrs_ = ISystemConfig.Addresses({ l1CrossDomainMessenger: address(_output.l1CrossDomainMessengerProxy), l1ERC721Bridge: address(_output.l1ERC721BridgeProxy), l1StandardBridge: address(_output.l1StandardBridgeProxy), @@ -653,7 +651,7 @@ contract OPContractsManager is ISemver, Initializable { view virtual returns ( - ResourceMetering.ResourceConfig memory resourceConfig_, + IResourceMetering.ResourceConfig memory resourceConfig_, ISystemConfigV160.Addresses memory opChainAddrs_ ) { diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index ae7ac71c2ae9..133f2d629a5e 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -2,17 +2,17 @@ pragma solidity 0.8.15; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; -import { ResourceMetering } from "src/L1/ResourceMetering.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { SystemConfigInterop } from "src/L1/SystemConfigInterop.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; /// @custom:proxied true contract OPContractsManagerInterop is OPContractsManager { constructor( - SuperchainConfig _superchainConfig, - ProtocolVersions _protocolVersions + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions ) OPContractsManager(_superchainConfig, _protocolVersions) { } @@ -30,7 +30,7 @@ contract OPContractsManagerInterop is OPContractsManager { override returns (bytes memory) { - (ResourceMetering.ResourceConfig memory referenceResourceConfig, SystemConfig.Addresses memory opChainAddrs) = + (IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfig.Addresses memory opChainAddrs) = defaultSystemConfigParams(_selector, _input, _output); // TODO For now we assume that the dependency manager is the same as the proxy admin owner. diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 8edc33f47772..52c49e4dccf1 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -9,13 +9,15 @@ import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; // Exposes internal functions for testing. contract OPContractsManager_Harness is OPContractsManager { constructor( - SuperchainConfig _superchainConfig, - ProtocolVersions _protocolVersions + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions ) OPContractsManager(_superchainConfig, _protocolVersions) { } @@ -100,8 +102,8 @@ contract OPContractsManager_InternalMethods_Test is Test { OPContractsManager_Harness opcmHarness; function setUp() public { - SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfig")); - ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersions")); + ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfig")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersions")); vm.etch(address(superchainConfigProxy), hex"01"); vm.etch(address(protocolVersionsProxy), hex"01"); From cd7e9d40414741ef27ce785a6c305a89ab035e4a Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Sat, 28 Sep 2024 19:50:51 -0600 Subject: [PATCH 075/116] feat: Rewrite natspec checker in Go (#12191) * feat: Rewrite natspec checker in Go Rewrites the `semver-natspec-check-no-build` Just command in Go to reduce runtime. This PR reduces runtime for this check from ~1m30s to about 3 seconds post-compilation. * remove old script * add unit tests * rename test * review updates --- .circleci/config.yml | 3 + packages/contracts-bedrock/justfile | 2 +- .../checks/check-semver-natspec-match.sh | 74 ------ .../scripts/checks/semver-natspec/main.go | 215 ++++++++++++++++++ .../checks/semver-natspec/main_test.go | 124 ++++++++++ 5 files changed, 343 insertions(+), 75 deletions(-) delete mode 100755 packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh create mode 100644 packages/contracts-bedrock/scripts/checks/semver-natspec/main.go create mode 100644 packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 6930b4ddd1e5..73ac7d81b3fd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1458,6 +1458,9 @@ workflows: - op-program - op-service - op-supervisor + - go-test: + name: semver-natspec-tests + module: packages/contracts-bedrock/scripts/checks/semver-natspec - go-test-kurtosis: name: op-chain-ops-integration module: op-chain-ops diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index b5bce1e55b7a..a9c621cf240c 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -163,7 +163,7 @@ semver-diff-check: build semver-diff-check-no-build # Checks that semver natspec is equal to the actual semver version. # Does not build contracts. semver-natspec-check-no-build: - ./scripts/checks/check-semver-natspec-match.sh + go run ./scripts/checks/semver-natspec # Checks that semver natspec is equal to the actual semver version. semver-natspec-check: build semver-natspec-check-no-build diff --git a/packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh b/packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh deleted file mode 100755 index de4de3f8497a..000000000000 --- a/packages/contracts-bedrock/scripts/checks/check-semver-natspec-match.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the directory of the contracts-bedrock package -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -CONTRACTS_BASE=$(dirname "$(dirname "$SCRIPT_DIR")") -ARTIFACTS_DIR="$CONTRACTS_BASE/forge-artifacts" -CONTRACTS_DIR="$CONTRACTS_BASE/src" - -# Load semver-utils -# shellcheck source=/dev/null -source "$SCRIPT_DIR/utils/semver-utils.sh" - -# Flag to track if any errors are detected -has_errors=false - -# Iterate through each artifact file -for artifact_file in "$ARTIFACTS_DIR"/**/*.json; do - # Get the contract name and find the corresponding source file - contract_name=$(basename "$artifact_file" .json) - contract_file=$(find "$CONTRACTS_DIR" -name "$contract_name.sol") - - # Try to extract version as a constant - raw_metadata=$(jq -r '.rawMetadata' "$artifact_file") - artifact_version=$(echo "$raw_metadata" | jq -r '.output.devdoc.stateVariables.version."custom:semver"') - - is_constant=true - if [ "$artifact_version" = "null" ]; then - # If not found as a constant, try to extract as a function - artifact_version=$(echo "$raw_metadata" | jq -r '.output.devdoc.methods."version()"."custom:semver"') - is_constant=false - fi - - # If @custom:semver is not found in either location, skip this file - if [ "$artifact_version" = "null" ]; then - continue - fi - - # If source file is not found, report an error - if [ -z "$contract_file" ]; then - echo "❌ $contract_name: Source file not found" - continue - fi - - # Extract version from source based on whether it's a constant or function - if [ "$is_constant" = true ]; then - source_version=$(extract_constant_version "$contract_file") - else - source_version=$(extract_function_version "$contract_file") - fi - - # If source version is not found, report an error - if [ "$source_version" = "" ]; then - echo "❌ Error: failed to find version string for $contract_name" - echo " this is probably a bug in check-contract-semver.sh" - echo " please report or fix the issue if possible" - has_errors=true - fi - - # Compare versions - if [ "$source_version" != "$artifact_version" ]; then - echo "❌ Error: $contract_name has different semver in code and devdoc" - echo " Code: $source_version" - echo " Devdoc: $artifact_version" - has_errors=true - else - echo "✅ $contract_name: code: $source_version, devdoc: $artifact_version" - fi -done - -# If any errors were detected, exit with a non-zero status -if [ "$has_errors" = true ]; then - exit 1 -fi diff --git a/packages/contracts-bedrock/scripts/checks/semver-natspec/main.go b/packages/contracts-bedrock/scripts/checks/semver-natspec/main.go new file mode 100644 index 000000000000..d1e2153c02ef --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/semver-natspec/main.go @@ -0,0 +1,215 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "sync/atomic" +) + +type ArtifactsWrapper struct { + RawMetadata string `json:"rawMetadata"` +} + +type Artifacts struct { + Output struct { + Devdoc struct { + StateVariables struct { + Version struct { + Semver string `json:"custom:semver"` + } `json:"version"` + } `json:"stateVariables,omitempty"` + Methods struct { + Version struct { + Semver string `json:"custom:semver"` + } `json:"version()"` + } `json:"methods,omitempty"` + } `json:"devdoc"` + } `json:"output"` +} + +var ConstantVersionPattern = regexp.MustCompile(`string.*constant.*version\s+=\s+"([^"]+)";`) + +var FunctionVersionPattern = regexp.MustCompile(`^\s+return\s+"((?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)";$`) + +var InteropVersionPattern = regexp.MustCompile(`^\s+return\s+string\.concat\(super\.version\(\), "((.*)\+interop(.*)?)"\);`) + +func main() { + if err := run(); err != nil { + writeStderr("an error occurred: %v", err) + os.Exit(1) + } +} + +func writeStderr(msg string, args ...any) { + _, _ = fmt.Fprintf(os.Stderr, msg+"\n", args...) +} + +func run() error { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current working directory: %w", err) + } + + writeStderr("working directory: %s", cwd) + + artifactsDir := filepath.Join(cwd, "forge-artifacts") + srcDir := filepath.Join(cwd, "src") + + artifactFiles, err := glob(artifactsDir, ".json") + if err != nil { + return fmt.Errorf("failed to get artifact files: %w", err) + } + contractFiles, err := glob(srcDir, ".sol") + if err != nil { + return fmt.Errorf("failed to get contract files: %w", err) + } + + var hasErr int32 + var outMtx sync.Mutex + fail := func(msg string, args ...any) { + outMtx.Lock() + writeStderr("❌ "+msg, args...) + outMtx.Unlock() + atomic.StoreInt32(&hasErr, 1) + } + + sem := make(chan struct{}, runtime.NumCPU()) + for contractName, artifactPath := range artifactFiles { + contractName := contractName + artifactPath := artifactPath + + sem <- struct{}{} + + go func() { + defer func() { + <-sem + }() + + af, err := os.Open(artifactPath) + if err != nil { + fail("%s: failed to open contract artifact: %v", contractName, err) + return + } + defer af.Close() + + var wrapper ArtifactsWrapper + if err := json.NewDecoder(af).Decode(&wrapper); err != nil { + fail("%s: failed to parse artifact file: %v", contractName, err) + return + } + + if wrapper.RawMetadata == "" { + return + } + + var artifactData Artifacts + if err := json.Unmarshal([]byte(wrapper.RawMetadata), &artifactData); err != nil { + fail("%s: failed to unwrap artifact metadata: %v", contractName, err) + return + } + + artifactVersion := artifactData.Output.Devdoc.StateVariables.Version.Semver + + isConstant := true + if artifactData.Output.Devdoc.StateVariables.Version.Semver == "" { + artifactVersion = artifactData.Output.Devdoc.Methods.Version.Semver + isConstant = false + } + + if artifactVersion == "" { + return + } + + contractPath := contractFiles[contractName] + if contractPath == "" { + fail("%s: Source file not found", contractName) + return + } + + cf, err := os.Open(contractPath) + if err != nil { + fail("%s: failed to open contract source: %v", contractName, err) + return + } + defer cf.Close() + + sourceData, err := io.ReadAll(cf) + if err != nil { + fail("%s: failed to read contract source: %v", contractName, err) + return + } + + var sourceVersion string + + if isConstant { + sourceVersion = findLine(sourceData, ConstantVersionPattern) + } else { + sourceVersion = findLine(sourceData, FunctionVersionPattern) + } + + // Need to define a special case for interop contracts since they technically + // use an invalid semver format. Checking for sourceVersion == "" allows the + // team to update the format to a valid semver format in the future without + // needing to change this program. + if sourceVersion == "" && strings.HasSuffix(contractName, "Interop") { + sourceVersion = findLine(sourceData, InteropVersionPattern) + } + + if sourceVersion == "" { + fail("%s: version not found in source", contractName) + return + } + + if sourceVersion != artifactVersion { + fail("%s: version mismatch: source=%s, artifact=%s", contractName, sourceVersion, artifactVersion) + return + } + + _, _ = fmt.Fprintf(os.Stderr, "✅ %s: code: %s, artifact: %s\n", contractName, sourceVersion, artifactVersion) + }() + } + + for i := 0; i < cap(sem); i++ { + sem <- struct{}{} + } + + if atomic.LoadInt32(&hasErr) == 1 { + return fmt.Errorf("semver check failed, see logs above") + } + + return nil +} + +func glob(dir string, ext string) (map[string]string, error) { + out := make(map[string]string) + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() && filepath.Ext(path) == ext { + out[strings.TrimSuffix(filepath.Base(path), ext)] = path + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to walk directory: %w", err) + } + return out, nil +} + +func findLine(in []byte, pattern *regexp.Regexp) string { + scanner := bufio.NewScanner(bytes.NewReader(in)) + for scanner.Scan() { + match := pattern.FindStringSubmatch(scanner.Text()) + if len(match) > 0 { + return match[1] + } + } + return "" +} diff --git a/packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go b/packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go new file mode 100644 index 000000000000..7a8872d76d78 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/semver-natspec/main_test.go @@ -0,0 +1,124 @@ +package main + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRegexes(t *testing.T) { + t.Run("ConstantVersionPattern", func(t *testing.T) { + testRegex(t, ConstantVersionPattern, []regexTest{ + { + name: "constant version", + input: `string constant version = "1.2.3";`, + capture: "1.2.3", + }, + { + name: "constant version with weird spaces", + input: ` string constant version = "1.2.3";`, + capture: "1.2.3", + }, + { + name: "constant version with visibility", + input: `string public constant version = "1.2.3";`, + capture: "1.2.3", + }, + { + name: "different variable name", + input: `string constant VERSION = "1.2.3";`, + capture: "", + }, + { + name: "different type", + input: `uint constant version = 1;`, + capture: "", + }, + { + name: "not constant", + input: `string version = "1.2.3";`, + capture: "", + }, + { + name: "unterminated", + input: `string constant version = "1.2.3"`, + capture: "", + }, + }) + }) + + t.Run("FunctionVersionPattern", func(t *testing.T) { + testRegex(t, FunctionVersionPattern, []regexTest{ + { + name: "function version", + input: ` return "1.2.3";`, + capture: "1.2.3", + }, + { + name: "function version with weird spaces", + input: ` return "1.2.3";`, + capture: "1.2.3", + }, + { + name: "function version with prerelease", + input: ` return "1.2.3-alpha.1";`, + capture: "1.2.3-alpha.1", + }, + { + name: "invalid semver", + input: ` return "1.2.cabdab";`, + capture: "", + }, + { + name: "not a return statement", + input: `function foo()`, + capture: "", + }, + }) + }) + + t.Run("InteropVersionPattern", func(t *testing.T) { + testRegex(t, InteropVersionPattern, []regexTest{ + { + name: "interop version", + input: ` return string.concat(super.version(), "+interop");`, + capture: "+interop", + }, + { + name: "interop version but as a valid semver", + input: ` return string.concat(super.version(), "0.0.0+interop");`, + capture: "0.0.0+interop", + }, + { + name: "not an interop version", + input: ` return string.concat(super.version(), "hello!");`, + capture: "", + }, + { + name: "invalid syntax", + input: ` return string.concat(super.version(), "0.0.0+interop`, + capture: "", + }, + { + name: "something else is concatted", + input: ` return string.concat("superduper", "mart");`, + capture: "", + }, + }) + }) +} + +type regexTest struct { + name string + input string + capture string +} + +func testRegex(t *testing.T, re *regexp.Regexp, tests []regexTest) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.capture, findLine([]byte(test.input), re)) + }) + } +} From f2d5e32bc6cd4ffbe95819b6b5455f7bd6c7454a Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Mon, 30 Sep 2024 07:48:16 +0800 Subject: [PATCH 076/116] add `retry.Do0` (#12194) * add retry.Do0 * update code to use Do0 --- op-node/node/conductor.go | 5 ++--- op-node/node/node.go | 6 +++--- op-service/retry/operation.go | 23 ++++++++++++++++++----- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/op-node/node/conductor.go b/op-node/node/conductor.go index 93bde641a453..ff5723889b95 100644 --- a/op-node/node/conductor.go +++ b/op-node/node/conductor.go @@ -91,12 +91,11 @@ func (c *ConductorClient) CommitUnsafePayload(ctx context.Context, payload *eth. ctx, cancel := context.WithTimeout(ctx, c.cfg.ConductorRpcTimeout) defer cancel() - // extra bool return value is required for the generic, can be ignored. - _, err := retry.Do(ctx, 2, retry.Fixed(50*time.Millisecond), func() (bool, error) { + err := retry.Do0(ctx, 2, retry.Fixed(50*time.Millisecond), func() error { record := c.metrics.RecordRPCClientRequest("conductor_commitUnsafePayload") err := c.apiClient.CommitUnsafePayload(ctx, payload) record(err) - return true, err + return err }) return err } diff --git a/op-node/node/node.go b/op-node/node/node.go index 9d9f6a4343ac..298c98aa2b18 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -262,12 +262,12 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error { } // initialize the runtime config before unblocking - if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) { - ref, err := reload(ctx) + if err := retry.Do0(ctx, 5, retry.Fixed(time.Second*10), func() error { + _, err := reload(ctx) if errors.Is(err, errNodeHalt) { // don't retry on halt error err = nil } - return ref, err + return err }); err != nil { return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err) } diff --git a/op-service/retry/operation.go b/op-service/retry/operation.go index 4f0142cde946..95925296811d 100644 --- a/op-service/retry/operation.go +++ b/op-service/retry/operation.go @@ -40,25 +40,38 @@ func Do2[T, U any](ctx context.Context, maxAttempts int, strategy Strategy, op f // Strategy. func Do[T any](ctx context.Context, maxAttempts int, strategy Strategy, op func() (T, error)) (T, error) { var empty, ret T + f := func() (err error) { + ret, err = op() + return + } + err := Do0(ctx, maxAttempts, strategy, f) + if err != nil { + return empty, err + } + return ret, err +} + +// Do0 is similar to Do and Do2, execept that `op` only returns an error +func Do0(ctx context.Context, maxAttempts int, strategy Strategy, op func() error) error { var err error if maxAttempts < 1 { - return empty, fmt.Errorf("need at least 1 attempt to run op, but have %d max attempts", maxAttempts) + return fmt.Errorf("need at least 1 attempt to run op, but have %d max attempts", maxAttempts) } for i := 0; i < maxAttempts; i++ { if ctx.Err() != nil { - return empty, ctx.Err() + return ctx.Err() } - ret, err = op() + err = op() if err == nil { - return ret, nil + return nil } // Don't sleep when we are about to exit the loop & return ErrFailedPermanently if i != maxAttempts-1 { time.Sleep(strategy.Duration(i)) } } - return empty, &ErrFailedPermanently{ + return &ErrFailedPermanently{ attempts: maxAttempts, LastErr: err, } From d3402628002b7e86bf8f7e2d484e0c2f1c6ddf27 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Mon, 30 Sep 2024 14:05:27 +1000 Subject: [PATCH 077/116] op-program: Switch fpp-verify back to running in separate process. (#12201) Trying to reproduce the missing prestate failure. Also now runs hourly instead of 4 hourly to increase chances of finding a failing case. --- .circleci/config.yml | 2 +- op-program/verify/verify.go | 80 +++++++++++++++++++++++++++---------- 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 73ac7d81b3fd..961dfe6bb5d0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1677,7 +1677,7 @@ workflows: scheduled-fpp: when: - equal: [ build_four_hours, <> ] + equal: [ build_hourly, <> ] jobs: - fpp-verify: context: diff --git a/op-program/verify/verify.go b/op-program/verify/verify.go index 43150cfbc2b4..a04a725abe66 100644 --- a/op-program/verify/verify.go +++ b/op-program/verify/verify.go @@ -5,6 +5,7 @@ import ( "fmt" "math/big" "os" + "os/exec" "path/filepath" "strconv" "strings" @@ -26,6 +27,8 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) +const runInProcess = false + type Runner struct { l1RpcUrl string l1RpcKind string @@ -99,7 +102,7 @@ func (r *Runner) RunBetweenBlocks(ctx context.Context, l1Head common.Hash, start return fmt.Errorf("failed to find ending block info: %w", err) } - return r.run(l1Head, agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) + return r.run(ctx, l1Head, agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) } func (r *Runner) createL2Client(ctx context.Context) (*sources.L2Client, error) { @@ -157,10 +160,10 @@ func (r *Runner) RunToFinalized(ctx context.Context) error { return fmt.Errorf("failed to find ending block info: %w", err) } - return r.run(l1Head.Hash(), agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) + return r.run(ctx, l1Head.Hash(), agreedBlockInfo, agreedOutputRoot, claimedOutputRoot, claimedBlockInfo) } -func (r *Runner) run(l1Head common.Hash, agreedBlockInfo eth.BlockInfo, agreedOutputRoot common.Hash, claimedOutputRoot common.Hash, claimedBlockInfo eth.BlockInfo) error { +func (r *Runner) run(ctx context.Context, l1Head common.Hash, agreedBlockInfo eth.BlockInfo, agreedOutputRoot common.Hash, claimedOutputRoot common.Hash, claimedBlockInfo eth.BlockInfo) error { var err error if r.dataDir == "" { r.dataDir, err = os.MkdirTemp("", "oracledata") @@ -199,31 +202,64 @@ func (r *Runner) run(l1Head common.Hash, agreedBlockInfo eth.BlockInfo, agreedOu } fmt.Printf("Configuration: %s\n", argsStr) - offlineCfg := config.NewConfig( - r.rollupCfg, r.chainCfg, l1Head, agreedBlockInfo.Hash(), agreedOutputRoot, claimedOutputRoot, claimedBlockInfo.NumberU64()) - offlineCfg.DataDir = r.dataDir - onlineCfg := *offlineCfg - onlineCfg.L1URL = r.l1RpcUrl - onlineCfg.L1BeaconURL = r.l1BeaconUrl - onlineCfg.L2URL = r.l2RpcUrl - if r.l1RpcKind != "" { - onlineCfg.L1RPCKind = sources.RPCProviderKind(r.l1RpcKind) - } + if runInProcess { + offlineCfg := config.NewConfig( + r.rollupCfg, r.chainCfg, l1Head, agreedBlockInfo.Hash(), agreedOutputRoot, claimedOutputRoot, claimedBlockInfo.NumberU64()) + offlineCfg.DataDir = r.dataDir - fmt.Println("Running in online mode") - err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), &onlineCfg) - if err != nil { - return fmt.Errorf("online mode failed: %w", err) - } + onlineCfg := *offlineCfg + onlineCfg.L1URL = r.l1RpcUrl + onlineCfg.L1BeaconURL = r.l1BeaconUrl + onlineCfg.L2URL = r.l2RpcUrl + if r.l1RpcKind != "" { + onlineCfg.L1RPCKind = sources.RPCProviderKind(r.l1RpcKind) + } - fmt.Println("Running in offline mode") - err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), offlineCfg) - if err != nil { - return fmt.Errorf("offline mode failed: %w", err) + fmt.Println("Running in online mode") + err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), &onlineCfg) + if err != nil { + return fmt.Errorf("online mode failed: %w", err) + } + + fmt.Println("Running in offline mode") + err = host.Main(oplog.NewLogger(os.Stderr, r.logCfg), offlineCfg) + if err != nil { + return fmt.Errorf("offline mode failed: %w", err) + } + } else { + fmt.Println("Running in online mode") + onlineArgs := make([]string, len(args)) + copy(onlineArgs, args) + onlineArgs = append(onlineArgs, + "--l1", r.l1RpcUrl, + "--l1.beacon", r.l1BeaconUrl, + "--l2", r.l2RpcUrl) + if r.l1RpcKind != "" { + onlineArgs = append(onlineArgs, "--l1.rpckind", r.l1RpcKind) + } + err = runFaultProofProgram(ctx, onlineArgs) + if err != nil { + return fmt.Errorf("online mode failed: %w", err) + } + + fmt.Println("Running in offline mode") + err = runFaultProofProgram(ctx, args) + if err != nil { + return fmt.Errorf("offline mode failed: %w", err) + } } return nil } +func runFaultProofProgram(ctx context.Context, args []string) error { + ctx, cancel := context.WithTimeout(ctx, 60*time.Minute) + defer cancel() + cmd := exec.CommandContext(ctx, "./bin/op-program", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + func outputAtBlockNum(ctx context.Context, l2Client *sources.L2Client, blockNum uint64) (eth.BlockInfo, common.Hash, error) { startBlockInfo, err := l2Client.InfoByNumber(ctx, blockNum) if err != nil { From cd1cc1039b58ce5146b49d1a018bc303cdaa56b2 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Mon, 30 Sep 2024 09:41:31 -0400 Subject: [PATCH 078/116] test: Transfer to finalSystemOwner rather than SystemOwnerSafe (#12116) * test: Transfer to finalSystemOwner rather than SystemOwnerSafe * feat: Fix comment references to ProxyAdmin --- op-e2e/system/gastoken/gastoken_test.go | 57 +--- .../scripts/deploy/Deploy.s.sol | 271 +++++------------- .../scripts/deploy/DeployOwnership.s.sol | 113 +++++++- .../scripts/fpac/FPACOPS.s.sol | 34 +-- .../scripts/fpac/FPACOPS2.s.sol | 42 +-- .../contracts-bedrock/scripts/fpac/Makefile | 4 +- .../contracts-bedrock/scripts/fpac/README.md | 2 +- .../test/dispute/DelayedWETH.t.sol | 2 +- .../test/dispute/DisputeGameFactory.t.sol | 2 +- 9 files changed, 238 insertions(+), 289 deletions(-) diff --git a/op-e2e/system/gastoken/gastoken_test.go b/op-e2e/system/gastoken/gastoken_test.go index 445f672743cf..839f33634046 100644 --- a/op-e2e/system/gastoken/gastoken_test.go +++ b/op-e2e/system/gastoken/gastoken_test.go @@ -2,7 +2,6 @@ package gastoken import ( "context" - "fmt" "math/big" "testing" "time" @@ -440,34 +439,6 @@ func TestCustomGasToken(t *testing.T) { checkFeeWithdrawal(t, enabled) } -// callViaSafe will use the Safe smart account at safeAddress to send a transaction to target using the provided data. The transaction signature is constructed from -// the supplied opts. -func callViaSafe(opts *bind.TransactOpts, client *ethclient.Client, safeAddress common.Address, target common.Address, data []byte) (*types.Transaction, error) { - signature := [65]byte{} - copy(signature[12:], opts.From[:]) - signature[64] = uint8(1) - - safe, err := bindings.NewSafe(safeAddress, client) - if err != nil { - return nil, err - } - - owners, err := safe.GetOwners(&bind.CallOpts{}) - if err != nil { - return nil, err - } - - isOwner, err := safe.IsOwner(&bind.CallOpts{}, opts.From) - if err != nil { - return nil, err - } - if !isOwner { - return nil, fmt.Errorf("address %s is not in owners list %s", opts.From, owners) - } - - return safe.ExecTransaction(opts, target, big.NewInt(0), data, 0, big.NewInt(0), big.NewInt(0), big.NewInt(0), common.Address{}, common.Address{}, signature[:]) -} - // setCustomGasToeken enables the Custom Gas Token feature on a chain where it wasn't enabled at genesis. // It reads existing parameters from the SystemConfig contract, inserts the supplied cgtAddress and reinitializes that contract. // To do this it uses the ProxyAdmin and StorageSetter from the supplied cfg. @@ -518,27 +489,18 @@ func setCustomGasToken(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System proxyAdmin, err := bindings.NewProxyAdmin(cfg.L1Deployments.ProxyAdmin, l1Client) require.NoError(t, err) - // Compute Proxy Admin Owner (this is a SAFE with 1 owner) - proxyAdminOwner, err := proxyAdmin.Owner(&bind.CallOpts{}) - require.NoError(t, err) - // Deploy a new StorageSetter contract storageSetterAddr, tx, _, err := bindings.DeployStorageSetter(deployerOpts, l1Client) waitForTx(t, tx, err, l1Client) - // Set up a signer which controls the Proxy Admin Owner SAFE - safeOwnerOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Deployer, cfg.L1ChainIDBig()) - require.NoError(t, err) - - // Encode calldata for upgrading SystemConfigProxy to the StorageSetter implementation - proxyAdminABI, err := bindings.ProxyAdminMetaData.GetAbi() - require.NoError(t, err) - encodedUpgradeCall, err := proxyAdminABI.Pack("upgrade", - cfg.L1Deployments.SystemConfigProxy, storageSetterAddr) + // Set up a signer which controls the Proxy Admin. + // The deploy config's finalSystemOwner is the owner of the ProxyAdmin as well as the SystemConfig, + // so we can use that address for the proxy admin owner. + proxyAdminOwnerOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig()) require.NoError(t, err) - // Execute the upgrade SystemConfigProxy -> StorageSetter - tx, err = callViaSafe(safeOwnerOpts, l1Client, proxyAdminOwner, cfg.L1Deployments.ProxyAdmin, encodedUpgradeCall) + // Execute the upgrade SystemConfigProxy -> StorageSetter via ProxyAdmin + tx, err = proxyAdmin.Upgrade(proxyAdminOwnerOpts, cfg.L1Deployments.SystemConfigProxy, storageSetterAddr) waitForTx(t, tx, err, l1Client) // Bind a StorageSetter to the SystemConfigProxy address @@ -554,13 +516,8 @@ func setCustomGasToken(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System require.NoError(t, err) require.Equal(t, currentSlotValue, [32]byte{0}) - // Prepare calldata for SystemConfigProxy -> SystemConfig upgrade - encodedUpgradeCall, err = proxyAdminABI.Pack("upgrade", - cfg.L1Deployments.SystemConfigProxy, cfg.L1Deployments.SystemConfig) - require.NoError(t, err) - // Execute SystemConfigProxy -> SystemConfig upgrade - tx, err = callViaSafe(safeOwnerOpts, l1Client, proxyAdminOwner, cfg.L1Deployments.ProxyAdmin, encodedUpgradeCall) + tx, err = proxyAdmin.Upgrade(proxyAdminOwnerOpts, cfg.L1Deployments.SystemConfigProxy, cfg.L1Deployments.SystemConfig) waitForTx(t, tx, err, l1Client) // Reinitialise with existing initializer values but with custom gas token set diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index eda0a5695eb2..6d8004b5298a 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -9,12 +9,6 @@ import { stdJson } from "forge-std/StdJson.sol"; import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; -// Safe -import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; -import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; -import { GnosisSafeProxyFactory as SafeProxyFactory } from "safe-contracts/proxies/GnosisSafeProxyFactory.sol"; -import { Enum as SafeOps } from "safe-contracts/common/Enum.sol"; - // Scripts import { Deployer } from "scripts/deploy/Deployer.sol"; import { Chains } from "scripts/libraries/Chains.sol"; @@ -196,68 +190,15 @@ contract Deploy is Deployer { // State Changing Helper Functions // //////////////////////////////////////////////////////////////// - /// @notice Gets the address of the SafeProxyFactory and Safe singleton for use in deploying a new GnosisSafe. - function _getSafeFactory() internal returns (SafeProxyFactory safeProxyFactory_, Safe safeSingleton_) { - if (getAddress("SafeProxyFactory") != address(0)) { - // The SafeProxyFactory is already saved, we can just use it. - safeProxyFactory_ = SafeProxyFactory(getAddress("SafeProxyFactory")); - safeSingleton_ = Safe(getAddress("SafeSingleton")); - return (safeProxyFactory_, safeSingleton_); - } - - // These are the standard create2 deployed contracts. First we'll check if they are deployed, - // if not we'll deploy new ones, though not at these addresses. - address safeProxyFactory = 0xa6B71E26C5e0845f74c812102Ca7114b6a896AB2; - address safeSingleton = 0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552; - - safeProxyFactory.code.length == 0 - ? safeProxyFactory_ = new SafeProxyFactory() - : safeProxyFactory_ = SafeProxyFactory(safeProxyFactory); - - safeSingleton.code.length == 0 ? safeSingleton_ = new Safe() : safeSingleton_ = Safe(payable(safeSingleton)); - - save("SafeProxyFactory", address(safeProxyFactory_)); - save("SafeSingleton", address(safeSingleton_)); - } - - /// @notice Make a call from the Safe contract to an arbitrary address with arbitrary data - function _callViaSafe(Safe _safe, address _target, bytes memory _data) internal { - // This is the signature format used when the caller is also the signer. - bytes memory signature = abi.encodePacked(uint256(uint160(msg.sender)), bytes32(0), uint8(1)); - - _safe.execTransaction({ - to: _target, - value: 0, - data: _data, - operation: SafeOps.Operation.Call, - safeTxGas: 0, - baseGas: 0, - gasPrice: 0, - gasToken: address(0), - refundReceiver: payable(address(0)), - signatures: signature - }); - } - - /// @notice Call from the Safe contract to the Proxy Admin's upgrade and call method - function _upgradeAndCallViaSafe(address _proxy, address _implementation, bytes memory _innerCallData) internal { - address proxyAdmin = mustGetAddress("ProxyAdmin"); - - bytes memory data = - abi.encodeCall(IProxyAdmin.upgradeAndCall, (payable(_proxy), _implementation, _innerCallData)); - - Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); - _callViaSafe({ _safe: safe, _target: proxyAdmin, _data: data }); - } - /// @notice Transfer ownership of the ProxyAdmin contract to the final system owner function transferProxyAdminOwnership() public broadcast { IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); address owner = proxyAdmin.owner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - proxyAdmin.transferOwnership(safe); - console.log("ProxyAdmin ownership transferred to Safe at: %s", safe); + + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + proxyAdmin.transferOwnership(finalSystemOwner); + console.log("ProxyAdmin ownership transferred to final system owner at: %s", finalSystemOwner); } } @@ -334,8 +275,6 @@ contract Deploy is Deployer { /// @notice Internal function containing the deploy logic. function _run(bool _needsSuperchain) internal { console.log("start of L1 Deploy!"); - deploySafe("SystemOwnerSafe"); - console.log("deployed Safe!"); // Deploy a new ProxyAdmin and AddressManager // This proxy will be used on the SuperchainConfig and ProtocolVersions contracts, as well as the contracts @@ -366,7 +305,6 @@ contract Deploy is Deployer { function setupAdmin() public { deployAddressManager(); deployProxyAdmin(); - transferProxyAdminOwnership(); } /// @notice Deploy a full system with a new SuperchainConfig @@ -393,7 +331,6 @@ contract Deploy is Deployer { // Ensure that the requisite contracts are deployed mustGetAddress("SuperchainConfigProxy"); - mustGetAddress("SystemOwnerSafe"); mustGetAddress("AddressManager"); mustGetAddress("ProxyAdmin"); @@ -409,6 +346,7 @@ contract Deploy is Deployer { transferDisputeGameFactoryOwnership(); transferDelayedWETHOwnership(); + transferProxyAdminOwnership(); } /// @notice Deploy all of the OP Chain specific contracts @@ -491,70 +429,6 @@ contract Deploy is Deployer { // Non-Proxied Deployment Functions // //////////////////////////////////////////////////////////////// - /// @notice Deploy the Safe - function deploySafe(string memory _name) public broadcast returns (address addr_) { - address[] memory owners = new address[](0); - addr_ = deploySafe(_name, owners, 1, true); - } - - /// @notice Deploy a new Safe contract. If the keepDeployer option is used to enable further setup actions, then - /// the removeDeployerFromSafe() function should be called on that safe after setup is complete. - /// Note this function does not have the broadcast modifier. - /// @param _name The name of the Safe to deploy. - /// @param _owners The owners of the Safe. - /// @param _threshold The threshold of the Safe. - /// @param _keepDeployer Wether or not the deployer address will be added as an owner of the Safe. - function deploySafe( - string memory _name, - address[] memory _owners, - uint256 _threshold, - bool _keepDeployer - ) - public - returns (address addr_) - { - bytes32 salt = keccak256(abi.encode(_name, _implSalt())); - console.log("Deploying safe: %s with salt %s", _name, vm.toString(salt)); - (SafeProxyFactory safeProxyFactory, Safe safeSingleton) = _getSafeFactory(); - - if (_keepDeployer) { - address[] memory expandedOwners = new address[](_owners.length + 1); - // By always adding msg.sender first we know that the previousOwner will be SENTINEL_OWNERS, which makes it - // easier to call removeOwner later. - expandedOwners[0] = msg.sender; - for (uint256 i = 0; i < _owners.length; i++) { - expandedOwners[i + 1] = _owners[i]; - } - _owners = expandedOwners; - } - - bytes memory initData = abi.encodeCall( - Safe.setup, (_owners, _threshold, address(0), hex"", address(0), address(0), 0, payable(address(0))) - ); - addr_ = address(safeProxyFactory.createProxyWithNonce(address(safeSingleton), initData, uint256(salt))); - - save(_name, addr_); - console.log("New safe: %s deployed at %s\n Note that this safe is owned by the deployer key", _name, addr_); - } - - /// @notice If the keepDeployer option was used with deploySafe(), this function can be used to remove the deployer. - /// Note this function does not have the broadcast modifier. - function removeDeployerFromSafe(string memory _name, uint256 _newThreshold) public { - Safe safe = Safe(mustGetAddress(_name)); - - // The sentinel address is used to mark the start and end of the linked list of owners in the Safe. - address sentinelOwners = address(0x1); - - // Because deploySafe() always adds msg.sender first (if keepDeployer is true), we know that the previousOwner - // will be sentinelOwners. - _callViaSafe({ - _safe: safe, - _target: address(safe), - _data: abi.encodeCall(OwnerManager.removeOwner, (sentinelOwners, msg.sender, _newThreshold)) - }); - console.log("Removed deployer owner from ", _name); - } - /// @notice Deploy the AddressManager function deployAddressManager() public broadcast returns (address addr_) { console.log("Deploying AddressManager"); @@ -1025,7 +899,7 @@ contract Deploy is Deployer { /// @notice Transfer ownership of the address manager to the ProxyAdmin function transferAddressManagerOwnership() public broadcast { - console.log("Transferring AddressManager ownership to ProxyAdmin"); + console.log("Transferring AddressManager ownership to IProxyAdmin"); IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); address owner = addressManager.owner(); address proxyAdmin = mustGetAddress("ProxyAdmin"); @@ -1058,10 +932,12 @@ contract Deploy is Deployer { function initializeSuperchainConfig() public broadcast { address payable superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); address payable superchainConfig = mustGetAddress("SuperchainConfig"); - _upgradeAndCallViaSafe({ + + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: superchainConfigProxy, _implementation: superchainConfig, - _innerCallData: abi.encodeCall(ISuperchainConfig.initialize, (cfg.superchainConfigGuardian(), false)) + _data: abi.encodeCall(ISuperchainConfig.initialize, (cfg.superchainConfigGuardian(), false)) }); ChainAssertions.checkSuperchainConfig({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isPaused: false }); @@ -1073,10 +949,11 @@ contract Deploy is Deployer { address disputeGameFactoryProxy = mustGetAddress("DisputeGameFactoryProxy"); address disputeGameFactory = mustGetAddress("DisputeGameFactory"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(disputeGameFactoryProxy), _implementation: disputeGameFactory, - _innerCallData: abi.encodeCall(IDisputeGameFactory.initialize, (msg.sender)) + _data: abi.encodeCall(IDisputeGameFactory.initialize, (msg.sender)) }); string memory version = IDisputeGameFactory(disputeGameFactoryProxy).version(); @@ -1091,10 +968,11 @@ contract Deploy is Deployer { address delayedWETH = mustGetAddress("DelayedWETH"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(delayedWETHProxy), _implementation: delayedWETH, - _innerCallData: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) + _data: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) }); string memory version = IDelayedWETH(payable(delayedWETHProxy)).version(); @@ -1114,10 +992,11 @@ contract Deploy is Deployer { address delayedWETH = mustGetAddress("DelayedWETH"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(delayedWETHProxy), _implementation: delayedWETH, - _innerCallData: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) + _data: abi.encodeCall(IDelayedWETH.initialize, (msg.sender, ISuperchainConfig(superchainConfigProxy))) }); string memory version = IDelayedWETH(payable(delayedWETHProxy)).version(); @@ -1174,10 +1053,11 @@ contract Deploy is Deployer { }) }); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(anchorStateRegistryProxy), _implementation: anchorStateRegistry, - _innerCallData: abi.encodeCall(IAnchorStateRegistry.initialize, (roots, superchainConfig)) + _data: abi.encodeCall(IAnchorStateRegistry.initialize, (roots, superchainConfig)) }); string memory version = IAnchorStateRegistry(payable(anchorStateRegistryProxy)).version(); @@ -1197,10 +1077,11 @@ contract Deploy is Deployer { customGasTokenAddress = cfg.customGasTokenAddress(); } - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(systemConfigProxy), _implementation: systemConfig, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( ISystemConfig.initialize, ( cfg.finalSystemOwner(), @@ -1242,20 +1123,15 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); uint256 proxyType = uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)); - Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); if (proxyType != uint256(IProxyAdmin.ProxyType.CHUGSPLASH)) { - _callViaSafe({ - _safe: safe, - _target: address(proxyAdmin), - _data: abi.encodeCall(IProxyAdmin.setProxyType, (l1StandardBridgeProxy, IProxyAdmin.ProxyType.CHUGSPLASH)) - }); + proxyAdmin.setProxyType(l1StandardBridgeProxy, IProxyAdmin.ProxyType.CHUGSPLASH); } require(uint256(proxyAdmin.proxyType(l1StandardBridgeProxy)) == uint256(IProxyAdmin.ProxyType.CHUGSPLASH)); - _upgradeAndCallViaSafe({ + proxyAdmin.upgradeAndCall({ _proxy: payable(l1StandardBridgeProxy), _implementation: l1StandardBridge, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL1StandardBridge.initialize, ( ICrossDomainMessenger(l1CrossDomainMessengerProxy), @@ -1279,10 +1155,11 @@ contract Deploy is Deployer { address l1CrossDomainMessengerProxy = mustGetAddress("L1CrossDomainMessengerProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(l1ERC721BridgeProxy), _implementation: l1ERC721Bridge, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL1ERC721Bridge.initialize, (ICrossDomainMessenger(payable(l1CrossDomainMessengerProxy)), ISuperchainConfig(superchainConfigProxy)) ) @@ -1302,10 +1179,11 @@ contract Deploy is Deployer { address optimismMintableERC20Factory = mustGetAddress("OptimismMintableERC20Factory"); address l1StandardBridgeProxy = mustGetAddress("L1StandardBridgeProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(optimismMintableERC20FactoryProxy), _implementation: optimismMintableERC20Factory, - _innerCallData: abi.encodeCall(IOptimismMintableERC20Factory.initialize, (l1StandardBridgeProxy)) + _data: abi.encodeCall(IOptimismMintableERC20Factory.initialize, (l1StandardBridgeProxy)) }); IOptimismMintableERC20Factory factory = IOptimismMintableERC20Factory(optimismMintableERC20FactoryProxy); @@ -1326,36 +1204,25 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); uint256 proxyType = uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)); - Safe safe = Safe(mustGetAddress("SystemOwnerSafe")); if (proxyType != uint256(IProxyAdmin.ProxyType.RESOLVED)) { - _callViaSafe({ - _safe: safe, - _target: address(proxyAdmin), - _data: abi.encodeCall( - IProxyAdmin.setProxyType, (l1CrossDomainMessengerProxy, IProxyAdmin.ProxyType.RESOLVED) - ) - }); + proxyAdmin.setProxyType(l1CrossDomainMessengerProxy, IProxyAdmin.ProxyType.RESOLVED); } require(uint256(proxyAdmin.proxyType(l1CrossDomainMessengerProxy)) == uint256(IProxyAdmin.ProxyType.RESOLVED)); string memory contractName = "OVM_L1CrossDomainMessenger"; string memory implName = proxyAdmin.implementationName(l1CrossDomainMessenger); if (keccak256(bytes(contractName)) != keccak256(bytes(implName))) { - _callViaSafe({ - _safe: safe, - _target: address(proxyAdmin), - _data: abi.encodeCall(IProxyAdmin.setImplementationName, (l1CrossDomainMessengerProxy, contractName)) - }); + proxyAdmin.setImplementationName(l1CrossDomainMessengerProxy, contractName); } require( keccak256(bytes(proxyAdmin.implementationName(l1CrossDomainMessengerProxy))) == keccak256(bytes(contractName)) ); - _upgradeAndCallViaSafe({ + proxyAdmin.upgradeAndCall({ _proxy: payable(l1CrossDomainMessengerProxy), _implementation: l1CrossDomainMessenger, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL1CrossDomainMessenger.initialize, ( ISuperchainConfig(superchainConfigProxy), @@ -1378,10 +1245,11 @@ contract Deploy is Deployer { address l2OutputOracleProxy = mustGetAddress("L2OutputOracleProxy"); address l2OutputOracle = mustGetAddress("L2OutputOracle"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(l2OutputOracleProxy), _implementation: l2OutputOracle, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IL2OutputOracle.initialize, ( cfg.l2OutputOracleSubmissionInterval(), @@ -1416,10 +1284,11 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(optimismPortalProxy), _implementation: optimismPortal, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IOptimismPortal.initialize, ( IL2OutputOracle(l2OutputOracleProxy), @@ -1445,10 +1314,11 @@ contract Deploy is Deployer { address systemConfigProxy = mustGetAddress("SystemConfigProxy"); address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(optimismPortalProxy), _implementation: optimismPortal2, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IOptimismPortal2.initialize, ( IDisputeGameFactory(disputeGameFactoryProxy), @@ -1475,10 +1345,11 @@ contract Deploy is Deployer { uint256 requiredProtocolVersion = cfg.requiredProtocolVersion(); uint256 recommendedProtocolVersion = cfg.recommendedProtocolVersion(); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(protocolVersionsProxy), _implementation: protocolVersions, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IProtocolVersions.initialize, ( finalSystemOwner, @@ -1500,13 +1371,13 @@ contract Deploy is Deployer { console.log("Transferring DisputeGameFactory ownership to Safe"); IDisputeGameFactory disputeGameFactory = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); address owner = disputeGameFactory.owner(); + address finalSystemOwner = cfg.finalSystemOwner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - disputeGameFactory.transferOwnership(safe); - console.log("DisputeGameFactory ownership transferred to Safe at: %s", safe); + if (owner != finalSystemOwner) { + disputeGameFactory.transferOwnership(finalSystemOwner); + console.log("DisputeGameFactory ownership transferred to final system owner at: %s", finalSystemOwner); } - ChainAssertions.checkDisputeGameFactory({ _contracts: _proxies(), _expectedOwner: safe }); + ChainAssertions.checkDisputeGameFactory({ _contracts: _proxies(), _expectedOwner: finalSystemOwner }); } /// @notice Transfer ownership of the DelayedWETH contract to the final system owner @@ -1515,12 +1386,17 @@ contract Deploy is Deployer { IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); address owner = weth.owner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - weth.transferOwnership(safe); - console.log("DelayedWETH ownership transferred to Safe at: %s", safe); + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + weth.transferOwnership(finalSystemOwner); + console.log("DelayedWETH ownership transferred to final system owner at: %s", finalSystemOwner); } - ChainAssertions.checkDelayedWETH({ _contracts: _proxies(), _cfg: cfg, _isProxy: true, _expectedOwner: safe }); + ChainAssertions.checkDelayedWETH({ + _contracts: _proxies(), + _cfg: cfg, + _isProxy: true, + _expectedOwner: finalSystemOwner + }); } /// @notice Transfer ownership of the permissioned DelayedWETH contract to the final system owner @@ -1529,16 +1405,16 @@ contract Deploy is Deployer { IDelayedWETH weth = IDelayedWETH(mustGetAddress("PermissionedDelayedWETHProxy")); address owner = weth.owner(); - address safe = mustGetAddress("SystemOwnerSafe"); - if (owner != safe) { - weth.transferOwnership(safe); - console.log("DelayedWETH ownership transferred to Safe at: %s", safe); + address finalSystemOwner = cfg.finalSystemOwner(); + if (owner != finalSystemOwner) { + weth.transferOwnership(finalSystemOwner); + console.log("DelayedWETH ownership transferred to final system owner at: %s", finalSystemOwner); } ChainAssertions.checkPermissionedDelayedWETH({ _contracts: _proxies(), _cfg: cfg, _isProxy: true, - _expectedOwner: safe + _expectedOwner: finalSystemOwner }); } @@ -1812,10 +1688,11 @@ contract Deploy is Deployer { uint256 daBondSize = cfg.daBondSize(); uint256 daResolverRefundPercentage = cfg.daResolverRefundPercentage(); - _upgradeAndCallViaSafe({ + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + proxyAdmin.upgradeAndCall({ _proxy: payable(dataAvailabilityChallengeProxy), _implementation: dataAvailabilityChallenge, - _innerCallData: abi.encodeCall( + _data: abi.encodeCall( IDataAvailabilityChallenge.initialize, (finalSystemOwner, daChallengeWindow, daResolveWindow, daBondSize, daResolverRefundPercentage) ) diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol index 252b4703b203..2463576fb41a 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol @@ -5,9 +5,11 @@ import { console2 as console } from "forge-std/console2.sol"; import { stdJson } from "forge-std/StdJson.sol"; import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; +import { GnosisSafeProxyFactory as SafeProxyFactory } from "safe-contracts/proxies/GnosisSafeProxyFactory.sol"; import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; import { GuardManager } from "safe-contracts/base/GuardManager.sol"; +import { Enum as SafeOps } from "safe-contracts/common/Enum.sol"; import { Deployer } from "scripts/deploy/Deployer.sol"; @@ -17,8 +19,8 @@ import { DeputyGuardianModule } from "src/safe/DeputyGuardianModule.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { Deploy } from "./Deploy.s.sol"; - /// @notice Configuration for a Safe + struct SafeConfig { uint256 threshold; address[] owners; @@ -50,7 +52,7 @@ struct GuardianConfig { DeputyGuardianModuleConfig deputyGuardianModuleConfig; } -/// @title Deploy +/// @title DeployOwnership /// @notice Script used to deploy and configure the Safe contracts which are used to manage the Superchain, /// as the ProxyAdminOwner and other roles in the system. Note that this script is not executable in a /// production environment as some steps depend on having a quorum of signers available. This script is meant to @@ -112,6 +114,113 @@ contract DeployOwnership is Deploy { }); } + /// @notice Make a call from the Safe contract to an arbitrary address with arbitrary data + function _callViaSafe(Safe _safe, address _target, bytes memory _data) internal { + // This is the signature format used when the caller is also the signer. + bytes memory signature = abi.encodePacked(uint256(uint160(msg.sender)), bytes32(0), uint8(1)); + + _safe.execTransaction({ + to: _target, + value: 0, + data: _data, + operation: SafeOps.Operation.Call, + safeTxGas: 0, + baseGas: 0, + gasPrice: 0, + gasToken: address(0), + refundReceiver: payable(address(0)), + signatures: signature + }); + } + + /// @notice Deploy the Safe + function deploySafe(string memory _name) public broadcast returns (address addr_) { + address[] memory owners = new address[](0); + addr_ = deploySafe(_name, owners, 1, true); + } + + /// @notice Deploy a new Safe contract. If the keepDeployer option is used to enable further setup actions, then + /// the removeDeployerFromSafe() function should be called on that safe after setup is complete. + /// Note this function does not have the broadcast modifier. + /// @param _name The name of the Safe to deploy. + /// @param _owners The owners of the Safe. + /// @param _threshold The threshold of the Safe. + /// @param _keepDeployer Wether or not the deployer address will be added as an owner of the Safe. + function deploySafe( + string memory _name, + address[] memory _owners, + uint256 _threshold, + bool _keepDeployer + ) + public + returns (address addr_) + { + bytes32 salt = keccak256(abi.encode(_name, _implSalt())); + console.log("Deploying safe: %s with salt %s", _name, vm.toString(salt)); + (SafeProxyFactory safeProxyFactory, Safe safeSingleton) = _getSafeFactory(); + + if (_keepDeployer) { + address[] memory expandedOwners = new address[](_owners.length + 1); + // By always adding msg.sender first we know that the previousOwner will be SENTINEL_OWNERS, which makes it + // easier to call removeOwner later. + expandedOwners[0] = msg.sender; + for (uint256 i = 0; i < _owners.length; i++) { + expandedOwners[i + 1] = _owners[i]; + } + _owners = expandedOwners; + } + + bytes memory initData = abi.encodeCall( + Safe.setup, (_owners, _threshold, address(0), hex"", address(0), address(0), 0, payable(address(0))) + ); + addr_ = address(safeProxyFactory.createProxyWithNonce(address(safeSingleton), initData, uint256(salt))); + + save(_name, addr_); + console.log("New safe: %s deployed at %s\n Note that this safe is owned by the deployer key", _name, addr_); + } + + /// @notice If the keepDeployer option was used with deploySafe(), this function can be used to remove the deployer. + /// Note this function does not have the broadcast modifier. + function removeDeployerFromSafe(string memory _name, uint256 _newThreshold) public { + Safe safe = Safe(mustGetAddress(_name)); + + // The sentinel address is used to mark the start and end of the linked list of owners in the Safe. + address sentinelOwners = address(0x1); + + // Because deploySafe() always adds msg.sender first (if keepDeployer is true), we know that the previousOwner + // will be sentinelOwners. + _callViaSafe({ + _safe: safe, + _target: address(safe), + _data: abi.encodeCall(OwnerManager.removeOwner, (sentinelOwners, msg.sender, _newThreshold)) + }); + console.log("Removed deployer owner from ", _name); + } + + /// @notice Gets the address of the SafeProxyFactory and Safe singleton for use in deploying a new GnosisSafe. + function _getSafeFactory() internal returns (SafeProxyFactory safeProxyFactory_, Safe safeSingleton_) { + if (getAddress("SafeProxyFactory") != address(0)) { + // The SafeProxyFactory is already saved, we can just use it. + safeProxyFactory_ = SafeProxyFactory(getAddress("SafeProxyFactory")); + safeSingleton_ = Safe(getAddress("SafeSingleton")); + return (safeProxyFactory_, safeSingleton_); + } + + // These are the standard create2 deployed contracts. First we'll check if they are deployed, + // if not we'll deploy new ones, though not at these addresses. + address safeProxyFactory = 0xa6B71E26C5e0845f74c812102Ca7114b6a896AB2; + address safeSingleton = 0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552; + + safeProxyFactory.code.length == 0 + ? safeProxyFactory_ = new SafeProxyFactory() + : safeProxyFactory_ = SafeProxyFactory(safeProxyFactory); + + safeSingleton.code.length == 0 ? safeSingleton_ = new Safe() : safeSingleton_ = Safe(payable(safeSingleton)); + + save("SafeProxyFactory", address(safeProxyFactory_)); + save("SafeSingleton", address(safeSingleton_)); + } + /// @notice Deploys a Safe with a configuration similar to that of the Foundation Safe on Mainnet. function deployFoundationOperationsSafe() public broadcast returns (address addr_) { SafeConfig memory exampleFoundationConfig = _getExampleFoundationConfig(); diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol index a5064cdd0a17..c038c27a5683 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol @@ -22,11 +22,11 @@ contract FPACOPS is Deploy, StdAssertions { // ENTRYPOINTS // //////////////////////////////////////////////////////////////// - function deployFPAC(address _proxyAdmin, address _systemOwnerSafe, address _superchainConfigProxy) public { + function deployFPAC(address _proxyAdmin, address _finalSystemOwner, address _superchainConfigProxy) public { console.log("Deploying a fresh FPAC system and OptimismPortal2 implementation."); prankDeployment("ProxyAdmin", msg.sender); - prankDeployment("SystemOwnerSafe", msg.sender); + prankDeployment("FinalSystemOwner", msg.sender); prankDeployment("SuperchainConfigProxy", _superchainConfigProxy); // Deploy the proxies. @@ -54,14 +54,14 @@ contract FPACOPS is Deploy, StdAssertions { // Deploy the Permissioned Cannon Fault game implementation and set it as game ID = 1. setPermissionedCannonFaultGameImplementation({ _allowUpgrade: false }); - // Transfer ownership of the DisputeGameFactory to the SystemOwnerSafe, and transfer the administrative rights + // Transfer ownership of the DisputeGameFactory to the FinalSystemOwner, and transfer the administrative rights // of the DisputeGameFactoryProxy to the ProxyAdmin. - transferDGFOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); - transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + transferDGFOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); + transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); transferAnchorStateOwnershipFinal({ _proxyAdmin: _proxyAdmin }); // Run post-deployment assertions. - postDeployAssertions({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + postDeployAssertions({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); // Print overview printConfigReview(); @@ -126,12 +126,12 @@ contract FPACOPS is Deploy, StdAssertions { } /// @notice Transfers admin rights of the `DisputeGameFactoryProxy` to the `ProxyAdmin` and sets the - /// `DisputeGameFactory` owner to the `SystemOwnerSafe`. - function transferDGFOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// `DisputeGameFactory` owner to the `FinalSystemOwner`. + function transferDGFOwnershipFinal(address _proxyAdmin, address _finalSystemOwner) internal broadcast { IDisputeGameFactory dgf = IDisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy")); - // Transfer the ownership of the DisputeGameFactory to the SystemOwnerSafe. - dgf.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DisputeGameFactory to the FinalSystemOwner. + dgf.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DisputeGameFactoryProxy to the ProxyAdmin. IProxy prox = IProxy(payable(address(dgf))); @@ -139,12 +139,12 @@ contract FPACOPS is Deploy, StdAssertions { } /// @notice Transfers admin rights of the `DelayedWETHProxy` to the `ProxyAdmin` and sets the - /// `DelayedWETH` owner to the `SystemOwnerSafe`. - function transferWethOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// `DelayedWETH` owner to the `FinalSystemOwner`. + function transferWethOwnershipFinal(address _proxyAdmin, address _finalSystemOwner) internal broadcast { IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); - // Transfer the ownership of the DelayedWETH to the SystemOwnerSafe. - weth.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DelayedWETH to the FinalSystemOwner. + weth.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. IProxy prox = IProxy(payable(address(weth))); @@ -161,7 +161,7 @@ contract FPACOPS is Deploy, StdAssertions { } /// @notice Checks that the deployed system is configured correctly. - function postDeployAssertions(address _proxyAdmin, address _systemOwnerSafe) internal view { + function postDeployAssertions(address _proxyAdmin, address _finalSystemOwner) internal view { Types.ContractSet memory contracts = _proxiesUnstrict(); contracts.OptimismPortal2 = mustGetAddress("OptimismPortal2"); @@ -172,10 +172,10 @@ contract FPACOPS is Deploy, StdAssertions { address dgfProxyAddr = mustGetAddress("DisputeGameFactoryProxy"); IDisputeGameFactory dgfProxy = IDisputeGameFactory(dgfProxyAddr); assertEq(address(uint160(uint256(vm.load(dgfProxyAddr, Constants.PROXY_OWNER_ADDRESS)))), _proxyAdmin); - ChainAssertions.checkDisputeGameFactory(contracts, _systemOwnerSafe); + ChainAssertions.checkDisputeGameFactory(contracts, _finalSystemOwner); address wethProxyAddr = mustGetAddress("DelayedWETHProxy"); assertEq(address(uint160(uint256(vm.load(wethProxyAddr, Constants.PROXY_OWNER_ADDRESS)))), _proxyAdmin); - ChainAssertions.checkDelayedWETH(contracts, cfg, true, _systemOwnerSafe); + ChainAssertions.checkDelayedWETH(contracts, cfg, true, _finalSystemOwner); // Check the config elements in the deployed contracts. ChainAssertions.checkOptimismPortal2(contracts, cfg, false); diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol index f28bc214cd2a..7db0de4c3fce 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol @@ -30,13 +30,13 @@ contract FPACOPS2 is Deploy, StdAssertions { /// AnchorStateRegistry. Does not deploy a new DisputeGameFactory. System /// Owner is responsible for updating implementations later. /// @param _proxyAdmin Address of the ProxyAdmin contract to transfer ownership to. - /// @param _systemOwnerSafe Address of the SystemOwner. + /// @param _finalSystemOwner Address of the SystemOwner. /// @param _superchainConfigProxy Address of the SuperchainConfig proxy contract. /// @param _disputeGameFactoryProxy Address of the DisputeGameFactory proxy contract. /// @param _anchorStateRegistryProxy Address of the AnchorStateRegistry proxy contract. function deployFPAC2( address _proxyAdmin, - address _systemOwnerSafe, + address _finalSystemOwner, address _superchainConfigProxy, address _disputeGameFactoryProxy, address _anchorStateRegistryProxy @@ -47,7 +47,7 @@ contract FPACOPS2 is Deploy, StdAssertions { // Prank required deployments. prankDeployment("ProxyAdmin", msg.sender); - prankDeployment("SystemOwnerSafe", msg.sender); + prankDeployment("FinalSystemOwner", msg.sender); prankDeployment("SuperchainConfigProxy", _superchainConfigProxy); prankDeployment("DisputeGameFactoryProxy", _disputeGameFactoryProxy); prankDeployment("AnchorStateRegistryProxy", _anchorStateRegistryProxy); @@ -71,11 +71,11 @@ contract FPACOPS2 is Deploy, StdAssertions { deployPermissionedDisputeGame(); // Transfer ownership of DelayedWETH to ProxyAdmin. - transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); - transferPermissionedWETHOwnershipFinal({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + transferWethOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); + transferPermissionedWETHOwnershipFinal({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); // Run post-deployment assertions. - postDeployAssertions({ _proxyAdmin: _proxyAdmin, _systemOwnerSafe: _systemOwnerSafe }); + postDeployAssertions({ _proxyAdmin: _proxyAdmin, _finalSystemOwner: _finalSystemOwner }); // Print overview. printConfigReview(); @@ -169,14 +169,14 @@ contract FPACOPS2 is Deploy, StdAssertions { } /// @notice Transfers admin rights of the `DelayedWETHProxy` to the `ProxyAdmin` and sets the - /// `DelayedWETH` owner to the `SystemOwnerSafe`. - function transferWethOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// `DelayedWETH` owner to the `FinalSystemOwner`. + function transferWethOwnershipFinal(address _proxyAdmin, address _finalSystemOwner) internal broadcast { console.log("Transferring ownership of DelayedWETHProxy"); IDelayedWETH weth = IDelayedWETH(mustGetAddress("DelayedWETHProxy")); - // Transfer the ownership of the DelayedWETH to the SystemOwnerSafe. - weth.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DelayedWETH to the FinalSystemOwner. + weth.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. IProxy prox = IProxy(payable(address(weth))); @@ -184,14 +184,20 @@ contract FPACOPS2 is Deploy, StdAssertions { } /// @notice Transfers admin rights of the permissioned `DelayedWETHProxy` to the `ProxyAdmin` - /// and sets the `DelayedWETH` owner to the `SystemOwnerSafe`. - function transferPermissionedWETHOwnershipFinal(address _proxyAdmin, address _systemOwnerSafe) internal broadcast { + /// and sets the `DelayedWETH` owner to the `FinalSystemOwner`. + function transferPermissionedWETHOwnershipFinal( + address _proxyAdmin, + address _finalSystemOwner + ) + internal + broadcast + { console.log("Transferring ownership of permissioned DelayedWETHProxy"); IDelayedWETH weth = IDelayedWETH(mustGetAddress("PermissionedDelayedWETHProxy")); - // Transfer the ownership of the DelayedWETH to the SystemOwnerSafe. - weth.transferOwnership(_systemOwnerSafe); + // Transfer the ownership of the DelayedWETH to the FinalSystemOwner. + weth.transferOwnership(_finalSystemOwner); // Transfer the admin rights of the DelayedWETHProxy to the ProxyAdmin. IProxy prox = IProxy(payable(address(weth))); @@ -199,7 +205,7 @@ contract FPACOPS2 is Deploy, StdAssertions { } /// @notice Checks that the deployed system is configured correctly. - function postDeployAssertions(address _proxyAdmin, address _systemOwnerSafe) internal view { + function postDeployAssertions(address _proxyAdmin, address _finalSystemOwner) internal view { Types.ContractSet memory contracts = _proxiesUnstrict(); // Ensure that `useFaultProofs` is set to `true`. @@ -218,9 +224,9 @@ contract FPACOPS2 is Deploy, StdAssertions { assertEq(address(uint160(uint256(vm.load(soyWethProxyAddr, Constants.PROXY_OWNER_ADDRESS)))), _proxyAdmin); // Run standard assertions for DGF and DelayedWETH. - ChainAssertions.checkDisputeGameFactory(contracts, _systemOwnerSafe); - ChainAssertions.checkDelayedWETH(contracts, cfg, true, _systemOwnerSafe); - ChainAssertions.checkPermissionedDelayedWETH(contracts, cfg, true, _systemOwnerSafe); + ChainAssertions.checkDisputeGameFactory(contracts, _finalSystemOwner); + ChainAssertions.checkDelayedWETH(contracts, cfg, true, _finalSystemOwner); + ChainAssertions.checkPermissionedDelayedWETH(contracts, cfg, true, _finalSystemOwner); // Verify PreimageOracle configuration. IPreimageOracle oracle = IPreimageOracle(mustGetAddress("PreimageOracle")); diff --git a/packages/contracts-bedrock/scripts/fpac/Makefile b/packages/contracts-bedrock/scripts/fpac/Makefile index dbdea4a62cb2..0399666e4e27 100644 --- a/packages/contracts-bedrock/scripts/fpac/Makefile +++ b/packages/contracts-bedrock/scripts/fpac/Makefile @@ -23,9 +23,9 @@ cannon-prestate: # Generate the cannon prestate, and tar the `op-program` + `can .PHONY: deploy-fresh deploy-fresh: cannon-prestate # Deploy a fresh version of the FPAC contracts. Pass `--broadcast` to send to the network. - forge script FPACOPS.s.sol --sig "deployFPAC(address,address,address)" $(proxy-admin) $(system-owner-safe) $(superchain-config-proxy) --chain $(chain) -vvv $(args) + forge script FPACOPS.s.sol --sig "deployFPAC(address,address,address)" $(proxy-admin) $(final-system-owner) $(superchain-config-proxy) --chain $(chain) -vvv $(args) # TODO: Convert this whole file to a justfile .PHONY: deploy-upgrade deploy-upgrade: cannon-prestate # Deploy upgraded FP contracts. Pass `--broadcast` to send to the network. - forge script FPACOPS2.s.sol --sig "deployFPAC2(address,address,address,address,address)" $(proxy-admin) $(system-owner-safe) $(superchain-config-proxy) $(dispute-game-factory-proxy) $(anchor-state-registry-proxy) --chain $(chain) -vvv $(args) + forge script FPACOPS2.s.sol --sig "deployFPAC2(address,address,address,address,address)" $(proxy-admin) $(final-system-owner) $(superchain-config-proxy) $(dispute-game-factory-proxy) $(anchor-state-registry-proxy) --chain $(chain) -vvv $(args) diff --git a/packages/contracts-bedrock/scripts/fpac/README.md b/packages/contracts-bedrock/scripts/fpac/README.md index a5d981172b2a..a3d309a4871c 100644 --- a/packages/contracts-bedrock/scripts/fpac/README.md +++ b/packages/contracts-bedrock/scripts/fpac/README.md @@ -17,5 +17,5 @@ make cannon-prestate chain= _Description_: Deploys a fully fresh FPAC system to the passed chain. All args after the `args=` are forwarded to `forge script`. ```sh -make deploy-fresh chain= proxy-admin= system-owner-safe= [args=] +make deploy-fresh chain= proxy-admin= final-system-owner= [args=] ``` diff --git a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol index bde1e1b9f893..8982eae96bf6 100644 --- a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol +++ b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol @@ -22,7 +22,7 @@ contract DelayedWETH_Init is CommonTest { super.setUp(); // Transfer ownership of delayed WETH to the test contract. - vm.prank(deploy.mustGetAddress("SystemOwnerSafe")); + vm.prank(delayedWeth.owner()); delayedWeth.transferOwnership(address(this)); } } diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index 6c3ed2a18944..9619832135e5 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -29,7 +29,7 @@ contract DisputeGameFactory_Init is CommonTest { fakeClone = new FakeClone(); // Transfer ownership of the factory to the test contract. - vm.prank(deploy.mustGetAddress("SystemOwnerSafe")); + vm.prank(disputeGameFactory.owner()); disputeGameFactory.transferOwnership(address(this)); } } From fa9ab5e607c7857de045ae530971fc565b85ddc9 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 30 Sep 2024 12:27:54 -0400 Subject: [PATCH 079/116] fix(ct): kontrol summary bug (#12193) Fixes an annoying bug inside of make-summary-deployment that only became apparent with a recent commit that caused state diffs to start to appear under the address of Deploy.s.sol. --- .../test/kontrol/scripts/make-summary-deployment.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh b/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh index ea7abbf4cc3c..7d7b8da150f3 100755 --- a/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh +++ b/packages/contracts-bedrock/test/kontrol/scripts/make-summary-deployment.sh @@ -56,9 +56,12 @@ if [ "$KONTROL_FP_DEPLOYMENT" = true ]; then SCRIPT_SIG="runKontrolDeploymentFaultProofs()" fi +# Sender just needs to be anything but the default sender (0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38) +# Otherwise state changes inside of Deploy.s.sol get stored in the state diff under the default script address (0x7FA9385bE102ac3EAc297483Dd6233D62b3e1496) +# Conflicts with other stuff that happens inside of Kontrol and leads to errors that are hard to debug DEPLOY_CONFIG_PATH=deploy-config/hardhat.json \ DEPLOYMENT_OUTFILE="$CONTRACT_NAMES" \ - forge script -vvv test/kontrol/deployment/KontrolDeployment.sol:KontrolDeployment --sig $SCRIPT_SIG + forge script --sender 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 -vvv test/kontrol/deployment/KontrolDeployment.sol:KontrolDeployment --sig $SCRIPT_SIG echo "Created state diff json" # Clean and store the state diff json in snapshots/state-diff/Kontrol-Deploy.json From 3aabfe673c053a48bc1fa16b53bd7ec94f237e48 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 30 Sep 2024 14:49:57 -0400 Subject: [PATCH 080/116] fix(ct): give kontrol access to full src (#12181) Kontrol profile was originally just looking at the L1 contracts which caused issues now that we're deploying via vm.getCode. --- packages/contracts-bedrock/foundry.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index cef9f85bbaeb..273cbb40ff50 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -87,7 +87,7 @@ optimizer = false # See test/kontrol/README.md for an explanation of how the profiles are configured [profile.kdeploy] -src = 'src/L1' +src = 'src' out = 'kout-deployment' test = 'test/kontrol' script = 'scripts-kontrol' From 52d0e60c16498ad4efec8798e3fc1b36b13f46a2 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 1 Oct 2024 05:40:12 +1000 Subject: [PATCH 081/116] cannon: Support F_GETFD cmd to fcntl (#12050) * cannon: Support F_GETFD cmd to fcntl. * cannon: Update fuzz test expectations. * cannon: Update MIPS.t.sol * cannon: Introduce a new state version for supporting get_fd. Switches singlethreaded prestate to use .bin.gz instead of json since it now needs to detect the new state version. * cannon: Don't override the cannon version. * Update semver-lock. * cannon: Update tests to detect old versions but only check writing and parsing for the currently supported versions. * cannon: Load old version from cannon docker image * cannon: Improve logging. * cannon: Restore cannon version arg. * Fix contrac semvers. * cannon: Rename singlethreaded-getfd to just singlethreaded-2. We could just go to using the state version number directly, but particularly the difference between singlethreaded and multithreaded feels useful to keep. * cannon: Fix comment. * Update semver again. --- .circleci/config.yml | 4 +- Makefile | 6 +-- cannon/.gitignore | 3 -- cannon/Makefile | 2 +- cannon/cmd/load_elf.go | 2 +- cannon/cmd/run.go | 1 + cannon/mipsevm/exec/mips_syscalls.go | 10 +++- cannon/mipsevm/multithreaded/state.go | 1 - cannon/mipsevm/singlethreaded/state.go | 1 - cannon/mipsevm/tests/fuzz_evm_common_test.go | 12 ++++- cannon/mipsevm/versions/detect.go | 2 +- cannon/mipsevm/versions/detect_test.go | 39 +++++++++++--- cannon/mipsevm/versions/state.go | 12 +++-- cannon/mipsevm/versions/state_test.go | 41 ++++++++------- .../mipsevm/versions/testdata/states/0.bin.gz | Bin 0 -> 32 bytes .../mipsevm/versions/testdata/states/0.json | 48 ++++++++++++++++++ .../mipsevm/versions/testdata/states/1.bin.gz | Bin 0 -> 46 bytes .../mipsevm/versions/testdata/states/2.bin.gz | Bin 0 -> 33 bytes cannon/multicannon/exec.go | 2 +- op-challenger/README.md | 2 +- op-e2e/e2eutils/challenger/helper.go | 2 +- op-program/Dockerfile.repro | 6 +-- op-program/README.md | 2 +- op-program/scripts/build-prestates.sh | 8 ++- ops-bedrock/docker-compose.yml | 2 +- ops/docker/op-stack-go/Dockerfile | 7 +-- packages/contracts-bedrock/semver-lock.json | 8 +-- .../contracts-bedrock/src/cannon/MIPS.sol | 4 +- .../contracts-bedrock/src/cannon/MIPS2.sol | 4 +- .../src/cannon/libraries/MIPSSyscalls.sol | 17 +++++-- .../contracts-bedrock/test/cannon/MIPS.t.sol | 21 +++++++- 31 files changed, 199 insertions(+), 70 deletions(-) create mode 100644 cannon/mipsevm/versions/testdata/states/0.bin.gz create mode 100644 cannon/mipsevm/versions/testdata/states/0.json create mode 100644 cannon/mipsevm/versions/testdata/states/1.bin.gz create mode 100644 cannon/mipsevm/versions/testdata/states/2.bin.gz diff --git a/.circleci/config.yml b/.circleci/config.yml index 961dfe6bb5d0..980fbc26bf8d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1058,7 +1058,7 @@ jobs: key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} name: Save Cannon prestate to cache paths: - - "op-program/bin/prestate.json" + - "op-program/bin/prestate.bin.gz" - "op-program/bin/meta.json" - "op-program/bin/prestate-proof.json" - run: @@ -1079,7 +1079,7 @@ jobs: - persist_to_workspace: root: . paths: - - "op-program/bin/prestate.json" + - "op-program/bin/prestate.bin.gz" - "op-program/bin/meta.json" - "op-program/bin/prestate-proof.json" diff --git a/Makefile b/Makefile index 4f329a4241e2..072375e728bf 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,7 @@ reproducible-prestate: ## Builds reproducible-prestate binary .PHONY: reproducible-prestate # Include any files required for the devnet to build and run. -DEVNET_CANNON_PRESTATE_FILES := op-program/bin/prestate-proof.json op-program/bin/prestate.json op-program/bin/prestate-proof-mt.json op-program/bin/prestate-mt.bin.gz +DEVNET_CANNON_PRESTATE_FILES := op-program/bin/prestate-proof.json op-program/bin/prestate.bin.gz op-program/bin/prestate-proof-mt.json op-program/bin/prestate-mt.bin.gz $(DEVNET_CANNON_PRESTATE_FILES): @@ -142,8 +142,8 @@ $(DEVNET_CANNON_PRESTATE_FILES): make cannon-prestate-mt cannon-prestate: op-program cannon ## Generates prestate using cannon and op-program - ./cannon/bin/cannon load-elf --type singlethreaded --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.json --meta op-program/bin/meta.json - ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.json --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output "" + ./cannon/bin/cannon load-elf --type singlethreaded-2 --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.bin.gz --meta op-program/bin/meta.json + ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.bin.gz --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output "" mv op-program/bin/0.json op-program/bin/prestate-proof.json .PHONY: cannon-prestate diff --git a/cannon/.gitignore b/cannon/.gitignore index 68424370890f..c9a7f170c14d 100644 --- a/cannon/.gitignore +++ b/cannon/.gitignore @@ -7,9 +7,6 @@ venv *.log testdata/example/bin contracts/out -state.json -*.json -*.json.gz *.pprof *.out bin diff --git a/cannon/Makefile b/cannon/Makefile index 6a0275e16aca..408700613d3d 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -19,7 +19,7 @@ cannon-impl: env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon-impl . cannon-embeds: cannon-impl - @cp bin/cannon-impl ./multicannon/embeds/cannon-0 + @cp bin/cannon-impl ./multicannon/embeds/cannon-2 @cp bin/cannon-impl ./multicannon/embeds/cannon-1 cannon: cannon-embeds diff --git a/cannon/cmd/load_elf.go b/cannon/cmd/load_elf.go index 816eb7c02e46..c76626941e9b 100644 --- a/cannon/cmd/load_elf.go +++ b/cannon/cmd/load_elf.go @@ -69,7 +69,7 @@ func LoadELF(ctx *cli.Context) error { return err } switch ver { - case versions.VersionSingleThreaded: + case versions.VersionSingleThreaded2: createInitialState = func(f *elf.File) (mipsevm.FPVMState, error) { return program.LoadELF(f, singlethreaded.CreateInitialState) } diff --git a/cannon/cmd/run.go b/cannon/cmd/run.go index 21f4f7c29825..6d536eeeceac 100644 --- a/cannon/cmd/run.go +++ b/cannon/cmd/run.go @@ -373,6 +373,7 @@ func Run(ctx *cli.Context) error { if err != nil { return fmt.Errorf("failed to load state: %w", err) } + l.Info("Loaded input state", "version", state.Version) vm := state.CreateVM(l, po, outLog, errLog, meta) debugProgram := ctx.Bool(RunDebugFlag.Name) if debugProgram { diff --git a/cannon/mipsevm/exec/mips_syscalls.go b/cannon/mipsevm/exec/mips_syscalls.go index caf3b9bec630..57df29d760ac 100644 --- a/cannon/mipsevm/exec/mips_syscalls.go +++ b/cannon/mipsevm/exec/mips_syscalls.go @@ -286,7 +286,15 @@ func HandleSysFcntl(a0, a1 uint32) (v0, v1 uint32) { // args: a0 = fd, a1 = cmd v1 = uint32(0) - if a1 == 3 { // F_GETFL: get file descriptor flags + if a1 == 1 { // F_GETFD: get file descriptor flags + switch a0 { + case FdStdin, FdStdout, FdStderr, FdPreimageRead, FdHintRead, FdPreimageWrite, FdHintWrite: + v0 = 0 // No flags set + default: + v0 = 0xFFffFFff + v1 = MipsEBADF + } + } else if a1 == 3 { // F_GETFL: get file status flags switch a0 { case FdStdin, FdPreimageRead, FdHintRead: v0 = 0 // O_RDONLY diff --git a/cannon/mipsevm/multithreaded/state.go b/cannon/mipsevm/multithreaded/state.go index f93a99564958..7b4d545396a9 100644 --- a/cannon/mipsevm/multithreaded/state.go +++ b/cannon/mipsevm/multithreaded/state.go @@ -97,7 +97,6 @@ func CreateInitialState(pc, heapStart uint32) *State { } func (s *State) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer, meta mipsevm.Metadata) mipsevm.FPVM { - logger.Info("Using cannon multithreaded VM") return NewInstrumentedState(s, po, stdOut, stdErr, logger, meta) } diff --git a/cannon/mipsevm/singlethreaded/state.go b/cannon/mipsevm/singlethreaded/state.go index e0be88d99857..b7320131fb97 100644 --- a/cannon/mipsevm/singlethreaded/state.go +++ b/cannon/mipsevm/singlethreaded/state.go @@ -69,7 +69,6 @@ func CreateInitialState(pc, heapStart uint32) *State { } func (s *State) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer, meta mipsevm.Metadata) mipsevm.FPVM { - logger.Info("Using cannon VM") return NewInstrumentedState(s, po, stdOut, stdErr, meta) } diff --git a/cannon/mipsevm/tests/fuzz_evm_common_test.go b/cannon/mipsevm/tests/fuzz_evm_common_test.go index 2b85727679b1..15b29a2b9e50 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common_test.go @@ -150,7 +150,17 @@ func FuzzStateSyscallFcntl(f *testing.F) { expected.Step += 1 expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 - if cmd == 3 { + if cmd == 1 { + switch fd { + case exec.FdStdin, exec.FdStdout, exec.FdStderr, + exec.FdPreimageRead, exec.FdHintRead, exec.FdPreimageWrite, exec.FdHintWrite: + expected.Registers[2] = 0 + expected.Registers[7] = 0 + default: + expected.Registers[2] = 0xFF_FF_FF_FF + expected.Registers[7] = exec.MipsEBADF + } + } else if cmd == 3 { switch fd { case exec.FdStdin, exec.FdPreimageRead, exec.FdHintRead: expected.Registers[2] = 0 diff --git a/cannon/mipsevm/versions/detect.go b/cannon/mipsevm/versions/detect.go index ca4b9be9c51d..cb1efcc06eb3 100644 --- a/cannon/mipsevm/versions/detect.go +++ b/cannon/mipsevm/versions/detect.go @@ -27,7 +27,7 @@ func DetectVersion(path string) (StateVersion, error) { } switch ver { - case VersionSingleThreaded, VersionMultiThreaded: + case VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2: return ver, nil default: return 0, fmt.Errorf("%w: %d", ErrUnknownVersion, ver) diff --git a/cannon/mipsevm/versions/detect_test.go b/cannon/mipsevm/versions/detect_test.go index 38a90f178694..993fb4adcb81 100644 --- a/cannon/mipsevm/versions/detect_test.go +++ b/cannon/mipsevm/versions/detect_test.go @@ -1,8 +1,10 @@ package versions import ( + "embed" "os" "path/filepath" + "strconv" "testing" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" @@ -11,23 +13,46 @@ import ( "github.com/stretchr/testify/require" ) +const statesPath = "testdata/states" + +//go:embed testdata/states +var historicStates embed.FS + func TestDetectVersion(t *testing.T) { - t.Run("SingleThreadedJSON", func(t *testing.T) { - state, err := NewFromState(singlethreaded.CreateEmptyState()) + testDetection := func(t *testing.T, version StateVersion, ext string) { + filename := strconv.Itoa(int(version)) + ext + dir := t.TempDir() + path := filepath.Join(dir, filename) + in, err := historicStates.ReadFile(filepath.Join(statesPath, filename)) require.NoError(t, err) - path := writeToFile(t, "state.json", state) - version, err := DetectVersion(path) + require.NoError(t, os.WriteFile(path, in, 0o644)) + + detectedVersion, err := DetectVersion(path) require.NoError(t, err) - require.Equal(t, VersionSingleThreaded, version) - }) + require.Equal(t, version, detectedVersion) + } + // Iterate all known versions to ensure we have a test case to detect every state version + for _, version := range StateVersionTypes { + version := version + t.Run(version.String(), func(t *testing.T) { + testDetection(t, version, ".bin.gz") + }) + + if version == VersionSingleThreaded { + t.Run(version.String()+".json", func(t *testing.T) { + testDetection(t, version, ".json") + }) + } + } + // Additionally, check that the latest supported versions write new states in a way that is detected correctly t.Run("SingleThreadedBinary", func(t *testing.T) { state, err := NewFromState(singlethreaded.CreateEmptyState()) require.NoError(t, err) path := writeToFile(t, "state.bin.gz", state) version, err := DetectVersion(path) require.NoError(t, err) - require.Equal(t, VersionSingleThreaded, version) + require.Equal(t, VersionSingleThreaded2, version) }) t.Run("MultiThreadedBinary", func(t *testing.T) { diff --git a/cannon/mipsevm/versions/state.go b/cannon/mipsevm/versions/state.go index afd2a94204b3..97fceadd43e4 100644 --- a/cannon/mipsevm/versions/state.go +++ b/cannon/mipsevm/versions/state.go @@ -19,6 +19,8 @@ const ( // VersionSingleThreaded is the version of the Cannon STF found in op-contracts/v1.6.0 - https://github.com/ethereum-optimism/optimism/blob/op-contracts/v1.6.0/packages/contracts-bedrock/src/cannon/MIPS.sol VersionSingleThreaded StateVersion = iota VersionMultiThreaded + // VersionSingleThreaded2 is based on VersionSingleThreaded with the addition of support for fcntl(F_GETFD) syscall + VersionSingleThreaded2 ) var ( @@ -26,7 +28,7 @@ var ( ErrJsonNotSupported = errors.New("json not supported") ) -var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded} +var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2} func LoadStateFromFile(path string) (*VersionedState, error) { if !serialize.IsBinaryFile(path) { @@ -44,7 +46,7 @@ func NewFromState(state mipsevm.FPVMState) (*VersionedState, error) { switch state := state.(type) { case *singlethreaded.State: return &VersionedState{ - Version: VersionSingleThreaded, + Version: VersionSingleThreaded2, FPVMState: state, }, nil case *multithreaded.State: @@ -79,7 +81,7 @@ func (s *VersionedState) Deserialize(in io.Reader) error { } switch s.Version { - case VersionSingleThreaded: + case VersionSingleThreaded2: state := &singlethreaded.State{} if err := state.Deserialize(in); err != nil { return err @@ -113,6 +115,8 @@ func (s StateVersion) String() string { return "singlethreaded" case VersionMultiThreaded: return "multithreaded" + case VersionSingleThreaded2: + return "singlethreaded-2" default: return "unknown" } @@ -124,6 +128,8 @@ func ParseStateVersion(ver string) (StateVersion, error) { return VersionSingleThreaded, nil case "multithreaded": return VersionMultiThreaded, nil + case "singlethreaded-2": + return VersionSingleThreaded2, nil default: return StateVersion(0), errors.New("unknown state version") } diff --git a/cannon/mipsevm/versions/state_test.go b/cannon/mipsevm/versions/state_test.go index 7fb36cd5734c..8740d51d2929 100644 --- a/cannon/mipsevm/versions/state_test.go +++ b/cannon/mipsevm/versions/state_test.go @@ -4,6 +4,7 @@ import ( "path/filepath" "testing" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" "github.com/ethereum-optimism/optimism/cannon/serialize" @@ -11,11 +12,11 @@ import ( ) func TestNewFromState(t *testing.T) { - t.Run("singlethreaded", func(t *testing.T) { + t.Run("singlethreaded-2", func(t *testing.T) { actual, err := NewFromState(singlethreaded.CreateEmptyState()) require.NoError(t, err) require.IsType(t, &singlethreaded.State{}, actual.FPVMState) - require.Equal(t, VersionSingleThreaded, actual.Version) + require.Equal(t, VersionSingleThreaded2, actual.Version) }) t.Run("multithreaded", func(t *testing.T) { @@ -27,16 +28,6 @@ func TestNewFromState(t *testing.T) { } func TestLoadStateFromFile(t *testing.T) { - t.Run("SinglethreadedFromJSON", func(t *testing.T) { - expected, err := NewFromState(singlethreaded.CreateEmptyState()) - require.NoError(t, err) - - path := writeToFile(t, "state.json", expected) - actual, err := LoadStateFromFile(path) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) - t.Run("SinglethreadedFromBinary", func(t *testing.T) { expected, err := NewFromState(singlethreaded.CreateEmptyState()) require.NoError(t, err) @@ -58,14 +49,26 @@ func TestLoadStateFromFile(t *testing.T) { }) } -func TestMultithreadedDoesNotSupportJSON(t *testing.T) { - state, err := NewFromState(multithreaded.CreateEmptyState()) - require.NoError(t, err) +func TestVersionsOtherThanZeroDoNotSupportJSON(t *testing.T) { + tests := []struct { + version StateVersion + createState func() mipsevm.FPVMState + }{ + {VersionSingleThreaded2, func() mipsevm.FPVMState { return singlethreaded.CreateEmptyState() }}, + {VersionMultiThreaded, func() mipsevm.FPVMState { return multithreaded.CreateEmptyState() }}, + } + for _, test := range tests { + test := test + t.Run(test.version.String(), func(t *testing.T) { + state, err := NewFromState(test.createState()) + require.NoError(t, err) - dir := t.TempDir() - path := filepath.Join(dir, "test.json") - err = serialize.Write(path, state, 0o644) - require.ErrorIs(t, err, ErrJsonNotSupported) + dir := t.TempDir() + path := filepath.Join(dir, "test.json") + err = serialize.Write(path, state, 0o644) + require.ErrorIs(t, err, ErrJsonNotSupported) + }) + } } func writeToFile(t *testing.T, filename string, data serialize.Serializable) string { diff --git a/cannon/mipsevm/versions/testdata/states/0.bin.gz b/cannon/mipsevm/versions/testdata/states/0.bin.gz new file mode 100644 index 0000000000000000000000000000000000000000..2a862e6e0c11b56feeafcde2064b2065856bdfef GIT binary patch literal 32 jcmb2|=3oGW|49mh8z!u3tYZ*hVEF%kgIV?G(?AITkbVkV literal 0 HcmV?d00001 diff --git a/cannon/mipsevm/versions/testdata/states/0.json b/cannon/mipsevm/versions/testdata/states/0.json new file mode 100644 index 000000000000..b45e978ea614 --- /dev/null +++ b/cannon/mipsevm/versions/testdata/states/0.json @@ -0,0 +1,48 @@ +{ + "memory": [], + "preimageKey": "0x0000000000000000000000000000000000000000000000000000000000000000", + "preimageOffset": 0, + "pc": 0, + "nextPC": 4, + "lo": 0, + "hi": 0, + "heap": 0, + "exit": 0, + "exited": false, + "step": 0, + "registers": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] +} + diff --git a/cannon/mipsevm/versions/testdata/states/1.bin.gz b/cannon/mipsevm/versions/testdata/states/1.bin.gz new file mode 100644 index 0000000000000000000000000000000000000000..fa6309bd09696cf8cd6cdaa3bd50e1b194d21e6e GIT binary patch literal 46 zcmb2|=3oGW|4Ate_ "${LOG_FILE}" 2>&1 + rm -rf "${BIN_DIR}" make reproducible-prestate >> "${LOG_FILE}" 2>&1 HASH=$(cat "${BIN_DIR}/prestate-proof.json" | jq -r .pre) - cp "${BIN_DIR}/prestate.json" "${STATES_DIR}/${HASH}.json" + if [ -f "${BIN_DIR}/prestate.bin.gz" ] + then + cp "${BIN_DIR}/prestate.bin.gz" "${STATES_DIR}/${HASH}.bin.gz" + else + cp "${BIN_DIR}/prestate.json" "${STATES_DIR}/${HASH}.json" + fi echo "Built ${VERSION}: ${HASH}" done diff --git a/ops-bedrock/docker-compose.yml b/ops-bedrock/docker-compose.yml index 1cc5626876bd..adcaea8f4d1b 100644 --- a/ops-bedrock/docker-compose.yml +++ b/ops-bedrock/docker-compose.yml @@ -233,7 +233,7 @@ services: OP_CHALLENGER_CANNON_L2_GENESIS: ./.devnet/genesis-l2.json OP_CHALLENGER_CANNON_BIN: ./cannon/bin/cannon OP_CHALLENGER_CANNON_SERVER: /op-program/op-program - OP_CHALLENGER_CANNON_PRESTATE: /op-program/prestate.json + OP_CHALLENGER_CANNON_PRESTATE: /op-program/prestate.bin.gz OP_CHALLENGER_L2_ETH_RPC: http://l2:8545 OP_CHALLENGER_MNEMONIC: test test test test test test test test test test test junk OP_CHALLENGER_HD_PATH: "m/44'/60'/0'/0/4" diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 35f14d19a439..edb7597ec34f 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -47,14 +47,11 @@ ARG TARGETARCH # The "id" defaults to the value of "target", the cache will thus be reused during this build. # "sharing" defaults to "shared", the cache will thus be available to other concurrent docker builds. -# For now fetch the v1 cannon binary from the op-challenger image -#FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:v1.1.0 AS cannon-builder-0 +FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon:v1.0.0-alpha.3 AS cannon-builder-0 FROM --platform=$BUILDPLATFORM builder AS cannon-builder ARG CANNON_VERSION=v0.0.0 -# uncomment these lines once there's a new Cannon version available -#COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-0 -#COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-1 +COPY --from=cannon-builder-0 /usr/local/bin/cannon ./cannon/multicannon/embeds/cannon-0 RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \ GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION" diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 30b546e24ca9..4b946bdc4b50 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -140,12 +140,12 @@ "sourceCodeHash": "0x2ab6be69795109a1ee04c5693a34d6ce0ff90b62e404cdeb18178bab18d06784" }, "src/cannon/MIPS.sol": { - "initCodeHash": "0x4043f262804931bbbbecff64f87f2d0bdc4554b4d0a8b22df8fff940e8d239bf", - "sourceCodeHash": "0xba4674e1846afbbc708877332a38dfabd4b8d1e48ce07d8ebf0a45c9f27f16b0" + "initCodeHash": "0x3992081512da36af76b707aee7d8ef9e084c54fb1dc9f8ce9989ed16d1216f01", + "sourceCodeHash": "0x7630362c20fbca071452031b88c9384d3215c4f2cbee24c7989901de63b0c178" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0xbb8c2370460e66274210d16ae527a29cb432bb646ebdccc0db0b21e53a4e428c", - "sourceCodeHash": "0x50ed780b621521047ed36ffb260032f2e5ec287f3e1ab3d742c7de45febb280d" + "initCodeHash": "0x590be819d8f02a7f9eb04ddc447f93ccbfd8bc9339f7c2e65336f9805b6c9a66", + "sourceCodeHash": "0x5bc0ab24cf926953b2ea9eb40b929821e280a7181c6cb18e7954bc3f7dc59be1" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0xa0b19e18561da9990c95ebea9750dd901f73147b32b8b234eca0f35073c5a970", diff --git a/packages/contracts-bedrock/src/cannon/MIPS.sol b/packages/contracts-bedrock/src/cannon/MIPS.sol index f1d216c8e6de..603ead867284 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS.sol @@ -45,8 +45,8 @@ contract MIPS is ISemver { } /// @notice The semantic version of the MIPS contract. - /// @custom:semver 1.1.1-beta.4 - string public constant version = "1.1.1-beta.4"; + /// @custom:semver 1.2.1-beta.1 + string public constant version = "1.2.1-beta.1"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index fb8409f6b41c..ebbf9302c1de 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -57,8 +57,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.12 - string public constant version = "1.0.0-beta.12"; + /// @custom:semver 1.0.0-beta.13 + string public constant version = "1.0.0-beta.13"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index a835b6feef58..1b5fddaba7fd 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -347,7 +347,7 @@ library MIPSSyscalls { /// retrieve the file-descriptor R/W flags. /// @param _a0 The file descriptor. /// @param _a1 The control command. - /// @param v0_ The file status flag (only supported command is F_GETFL), or -1 on error. + /// @param v0_ The file status flag (only supported commands are F_GETFD and F_GETFL), or -1 on error. /// @param v1_ An error number, or 0 if there is no error. function handleSysFcntl(uint32 _a0, uint32 _a1) internal pure returns (uint32 v0_, uint32 v1_) { unchecked { @@ -355,8 +355,19 @@ library MIPSSyscalls { v1_ = uint32(0); // args: _a0 = fd, _a1 = cmd - if (_a1 == 3) { - // F_GETFL: get file descriptor flags + if (_a1 == 1) { + // F_GETFD: get file descriptor flags + if ( + _a0 == FD_STDIN || _a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_PREIMAGE_READ + || _a0 == FD_HINT_READ || _a0 == FD_PREIMAGE_WRITE || _a0 == FD_HINT_WRITE + ) { + v0_ = 0; // No flags set + } else { + v0_ = 0xFFffFFff; + v1_ = EBADF; + } + } else if (_a1 == 3) { + // F_GETFL: get file status flags if (_a0 == FD_STDIN || _a0 == FD_PREIMAGE_READ || _a0 == FD_HINT_READ) { v0_ = 0; // O_RDONLY } else if (_a0 == FD_STDOUT || _a0 == FD_STDERR || _a0 == FD_PREIMAGE_WRITE || _a0 == FD_HINT_WRITE) { diff --git a/packages/contracts-bedrock/test/cannon/MIPS.t.sol b/packages/contracts-bedrock/test/cannon/MIPS.t.sol index 2843b876e4ff..998bc4d4aa79 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS.t.sol @@ -1613,7 +1613,7 @@ contract MIPS_Test is CommonTest { assertEq(postState, outputState(expect), "unexpected post state"); } - function test_fcntl_succeeds() external { + function test_fcntl_getfl_succeeds() external { uint32 insn = 0x0000000c; // syscall (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); state.registers[2] = 4055; // fcntl syscall @@ -1639,6 +1639,25 @@ contract MIPS_Test is CommonTest { assertEq(postState, outputState(expect), "unexpected post state"); } + function test_fcntl_getfd_succeeds() external { + uint32 insn = 0x0000000c; // syscall + (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); + state.registers[2] = 4055; // fcntl syscall + state.registers[4] = 0x0; // a0 + state.registers[5] = 0x1; // a1 + + MIPS.State memory expect; + expect.memRoot = state.memRoot; + expect.pc = state.nextPC; + expect.nextPC = state.nextPC + 4; + expect.step = state.step + 1; + expect.registers[2] = 0; + expect.registers[5] = state.registers[5]; + + bytes32 postState = mips.step(encodeState(state), proof, 0); + assertEq(postState, outputState(expect), "unexpected post state"); + } + function test_prestate_exited_succeeds() external { uint32 insn = 0x0000000c; // syscall (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); From de2d9fce97d2db2dbc9da5faa6ba7e8aabc7b6da Mon Sep 17 00:00:00 2001 From: Maurelian Date: Mon, 30 Sep 2024 16:47:53 -0400 Subject: [PATCH 082/116] test: Ensure all foundry tests are organized into subdirs (#12111) --- packages/contracts-bedrock/test/{ => L2}/L2Genesis.t.sol | 0 packages/contracts-bedrock/test/{ => L2}/Predeploys.t.sol | 0 packages/contracts-bedrock/test/{ => L2}/Preinstalls.t.sol | 0 .../contracts-bedrock/test/{ => universal}/BenchmarkTest.t.sol | 0 .../contracts-bedrock/test/{ => universal}/ExtendedPause.t.sol | 0 packages/contracts-bedrock/test/{ => universal}/Specs.t.sol | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename packages/contracts-bedrock/test/{ => L2}/L2Genesis.t.sol (100%) rename packages/contracts-bedrock/test/{ => L2}/Predeploys.t.sol (100%) rename packages/contracts-bedrock/test/{ => L2}/Preinstalls.t.sol (100%) rename packages/contracts-bedrock/test/{ => universal}/BenchmarkTest.t.sol (100%) rename packages/contracts-bedrock/test/{ => universal}/ExtendedPause.t.sol (100%) rename packages/contracts-bedrock/test/{ => universal}/Specs.t.sol (100%) diff --git a/packages/contracts-bedrock/test/L2Genesis.t.sol b/packages/contracts-bedrock/test/L2/L2Genesis.t.sol similarity index 100% rename from packages/contracts-bedrock/test/L2Genesis.t.sol rename to packages/contracts-bedrock/test/L2/L2Genesis.t.sol diff --git a/packages/contracts-bedrock/test/Predeploys.t.sol b/packages/contracts-bedrock/test/L2/Predeploys.t.sol similarity index 100% rename from packages/contracts-bedrock/test/Predeploys.t.sol rename to packages/contracts-bedrock/test/L2/Predeploys.t.sol diff --git a/packages/contracts-bedrock/test/Preinstalls.t.sol b/packages/contracts-bedrock/test/L2/Preinstalls.t.sol similarity index 100% rename from packages/contracts-bedrock/test/Preinstalls.t.sol rename to packages/contracts-bedrock/test/L2/Preinstalls.t.sol diff --git a/packages/contracts-bedrock/test/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol similarity index 100% rename from packages/contracts-bedrock/test/BenchmarkTest.t.sol rename to packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol diff --git a/packages/contracts-bedrock/test/ExtendedPause.t.sol b/packages/contracts-bedrock/test/universal/ExtendedPause.t.sol similarity index 100% rename from packages/contracts-bedrock/test/ExtendedPause.t.sol rename to packages/contracts-bedrock/test/universal/ExtendedPause.t.sol diff --git a/packages/contracts-bedrock/test/Specs.t.sol b/packages/contracts-bedrock/test/universal/Specs.t.sol similarity index 100% rename from packages/contracts-bedrock/test/Specs.t.sol rename to packages/contracts-bedrock/test/universal/Specs.t.sol From fe8f2ae79bcd745eb0ed4cc4d6a86a28f49a6106 Mon Sep 17 00:00:00 2001 From: Harper Date: Mon, 30 Sep 2024 22:58:45 +0200 Subject: [PATCH 083/116] Delete interface directory (#12208) --- packages/contracts-bedrock/test/kontrol/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/contracts-bedrock/test/kontrol/README.md b/packages/contracts-bedrock/test/kontrol/README.md index c0e3a39349b8..25660756963c 100644 --- a/packages/contracts-bedrock/test/kontrol/README.md +++ b/packages/contracts-bedrock/test/kontrol/README.md @@ -39,7 +39,6 @@ The directory is structured as follows │ ├── KontrolDeployment.sol: Deployment sequence for Kontrol proofs ├── proofs: Where the proofs (tests) themselves live │ ├── *.k.sol: Symbolic property tests for contracts -│ ├── interfaces: Interface files for src contracts, to avoid unnecessary compilation of contracts │ └── utils: Proof dependencies, including the autogenerated deployment summary contracts └── scripts: Where the scripts of the projects live ├── json: Data cleaning scripts for the output of KontrolDeployment.sol From 74679806b37c8f9a7ff17ef25a976099c073f4a0 Mon Sep 17 00:00:00 2001 From: Mark Tyneway Date: Mon, 30 Sep 2024 14:01:34 -0700 Subject: [PATCH 084/116] contracts-bedrock: bump forge-std (#12149) * contracts-bedrock: bump forge-std Bumps forge-std to the latest release https://github.com/foundry-rs/forge-std/tree/v1.9.3 This will help us to delete some solidity code and move to using cheatcodes in its place. * build: fix * gas-snapshot: update --- packages/contracts-bedrock/.gas-snapshot | 14 +++++++------- packages/contracts-bedrock/lib/forge-std | 2 +- .../scripts/ops/FeeVaultWithdrawal.s.sol | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/contracts-bedrock/.gas-snapshot b/packages/contracts-bedrock/.gas-snapshot index 3564748212d9..8e43cb748941 100644 --- a/packages/contracts-bedrock/.gas-snapshot +++ b/packages/contracts-bedrock/.gas-snapshot @@ -6,12 +6,12 @@ GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() ( GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7597) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 369242) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2967382) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 564362) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4076577) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 564356) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4076571) GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 467019) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3512701) +GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3512723) GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 72618) -GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 92970) -GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68312) -GasBenchMark_OptimismPortal:test_depositTransaction_benchmark_1() (gas: 68943) -GasBenchMark_OptimismPortal:test_proveWithdrawalTransaction_benchmark() (gas: 155607) \ No newline at end of file +GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 92973) +GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68357) +GasBenchMark_OptimismPortal:test_depositTransaction_benchmark_1() (gas: 68921) +GasBenchMark_OptimismPortal:test_proveWithdrawalTransaction_benchmark() (gas: 155610) \ No newline at end of file diff --git a/packages/contracts-bedrock/lib/forge-std b/packages/contracts-bedrock/lib/forge-std index 2d8b7b876a5b..8f24d6b04c92 160000 --- a/packages/contracts-bedrock/lib/forge-std +++ b/packages/contracts-bedrock/lib/forge-std @@ -1 +1 @@ -Subproject commit 2d8b7b876a5b328d6a73e13c4740ed7a0d72d5f4 +Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa diff --git a/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol b/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol index e19cd7e994bd..5a7b48847614 100644 --- a/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol +++ b/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol @@ -65,7 +65,7 @@ contract FeeVaultWithdrawal is Script { } /// @notice Logs the information relevant to the user. - function log(uint256 _balance, address _recipient, address _vault) internal view { + function log(uint256 _balance, address _recipient, address _vault) internal pure { string memory logline = string.concat( "Withdrawing ", vm.toString(_balance), " to ", vm.toString(_recipient), " from ", vm.toString(_vault) ); From f0227a826d2874bf39950f2977a2e76d7c4aec24 Mon Sep 17 00:00:00 2001 From: Inphi Date: Mon, 30 Sep 2024 17:06:50 -0400 Subject: [PATCH 085/116] tag-tool: Add --prerelease option (#12210) --- ops/tag-service/tag-tool.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ops/tag-service/tag-tool.py b/ops/tag-service/tag-tool.py index dedd3601fc8e..545f9b41571f 100644 --- a/ops/tag-service/tag-tool.py +++ b/ops/tag-service/tag-tool.py @@ -20,7 +20,7 @@ GIT_TAG_COMMAND = 'git tag -a {tag} -m "{message}"' GIT_PUSH_COMMAND = 'git push origin {tag}' -def new_tag(service, version, bump): +def new_tag(service, version, bump, pre_release): if bump == 'major': bumped = version.bump_major() elif bump == 'minor': @@ -28,11 +28,18 @@ def new_tag(service, version, bump): elif bump == 'patch': bumped = version.bump_patch() elif bump == 'prerelease': + if pre_release: + raise Exception('Cannot use --bump=prerelease with --pre-release') bumped = version.bump_prerelease() elif bump == 'finalize-prerelease': + if pre_release: + raise Exception('Cannot use --bump=finalize-prerelease with --pre-release') bumped = version.finalize_version() else: raise Exception('Invalid bump type: {}'.format(bump)) + + if pre_release: + bumped = bumped.bump_prerelease() return f'{service}/v{bumped}' def latest_version(service): @@ -57,6 +64,7 @@ def main(): parser = argparse.ArgumentParser(description='Create a new git tag for a service') parser.add_argument('--service', type=str, help='The name of the Service') parser.add_argument('--bump', type=str, help='The type of bump to apply to the version number') + parser.add_argument('--pre-release', help='Treat this tag as a pre-release', action='store_true') parser.add_argument('--message', type=str, help='Message to include in git tag', default='[tag-tool-release]') args = parser.parse_args() @@ -67,7 +75,7 @@ def main(): else: latest = latest_version(service) - bumped = new_tag(service, semver.VersionInfo.parse(latest), args.bump) + bumped = new_tag(service, semver.VersionInfo.parse(latest), args.bump, args.pre_release) print(f'latest tag: {latest}') print(f'new tag: {bumped}') From b127499e4e74941b62f3a63ecc2d097cee23ff8d Mon Sep 17 00:00:00 2001 From: Inphi Date: Mon, 30 Sep 2024 17:16:26 -0400 Subject: [PATCH 086/116] ci: Cannon v2 STF verify (#12211) * ci: Cannon v2 STF verify * ci: remove op-e2e-cannon-tests dep --- .circleci/config.yml | 5 ++++- cannon/Dockerfile.diff | 10 +++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 980fbc26bf8d..cdbe4fa8beb6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1171,6 +1171,8 @@ jobs: key: golang-build-cache-cannon-stf-verify-{{ checksum "go.sum" }} paths: - "/root/.cache/go-build" + - notify-failures-on-develop: + mentions: "@proofs-squad" semgrep-scan: parameters: @@ -1708,6 +1710,8 @@ workflows: - cannon-stf-verify: requires: - go-mod-download + context: + - slack - contracts-bedrock-build: skip_pattern: test context: @@ -1725,7 +1729,6 @@ workflows: requires: - contracts-bedrock-build - cannon-prestate - - cannon-stf-verify context: - slack diff --git a/cannon/Dockerfile.diff b/cannon/Dockerfile.diff index 78384fa30010..305dada244af 100644 --- a/cannon/Dockerfile.diff +++ b/cannon/Dockerfile.diff @@ -23,12 +23,12 @@ ARG GIT_DATE ARG TARGETOS TARGETARCH -FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon:v1.0.0-alpha.2 AS cannon-v1 +FROM --platform=$BUILDPLATFORM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon:v1.1.0-alpha.1 AS cannon-v2 FROM --platform=$BUILDPLATFORM builder as cannon-verify -COPY --from=cannon-v1 /usr/local/bin/cannon /usr/local/bin/cannon-v1 -# verify the latest singlethreaded VM behavior against cannon-v1 -RUN cd cannon && make diff-singlethreaded-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v1 +COPY --from=cannon-v2 /usr/local/bin/cannon /usr/local/bin/cannon-v2 +# verify the latest singlethreaded VM behavior against cannon-v2 +RUN cd cannon && make diff-singlethreaded-2-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v2 RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && \ - make diff-singlethreaded-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v1 \ + make diff-singlethreaded-2-cannon -e OTHER_CANNON=/usr/local/bin/cannon-v2 \ GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE From b6e27b65a44a881e638ab7e60a4786830bb1c3e8 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Mon, 30 Sep 2024 17:18:30 -0400 Subject: [PATCH 087/116] test(deploy): Deploy a distinct ProxyAdmin for Superchain contracts (#12130) * test(deploy): Deploy a distinct ProxyAdmin for Superchain contracts * fix: only set address manager for OP Chain's ProxyAdmin * fix: typo * test: fix create2 collision between ProxyAdmins * fix: lint * fix: move setupOpChainAdmin() before setupOpAltDA The DA challenge contract needs a proxy admin too * feat: use create2AndSave for ProxyAdmin * fix: Do not double save the superchain ProxyAdmin * Fix whitespace Co-authored-by: Matt Solomon --------- Co-authored-by: Matt Solomon --- .../scripts/deploy/Deploy.s.sol | 62 +++++++++---------- .../scripts/deploy/DeployOwnership.s.sol | 2 +- 2 files changed, 31 insertions(+), 33 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 6d8004b5298a..02a77678abfe 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -191,8 +191,9 @@ contract Deploy is Deployer { //////////////////////////////////////////////////////////////// /// @notice Transfer ownership of the ProxyAdmin contract to the final system owner - function transferProxyAdminOwnership() public broadcast { - IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress("ProxyAdmin")); + function transferProxyAdminOwnership(bool _isSuperchain) public broadcast { + string memory proxyAdminName = _isSuperchain ? "SuperchainProxyAdmin" : "ProxyAdmin"; + IProxyAdmin proxyAdmin = IProxyAdmin(mustGetAddress(proxyAdminName)); address owner = proxyAdmin.owner(); address finalSystemOwner = cfg.finalSystemOwner(); @@ -202,17 +203,6 @@ contract Deploy is Deployer { } } - /// @notice Transfer ownership of a Proxy to the ProxyAdmin contract - /// This is expected to be used in conjusting with deployERC1967ProxyWithOwner after setup actions - /// have been performed on the proxy. - /// @param _name The name of the proxy to transfer ownership of. - function transferProxyToProxyAdmin(string memory _name) public broadcast { - IProxy proxy = IProxy(mustGetAddress(_name)); - address proxyAdmin = mustGetAddress("ProxyAdmin"); - proxy.changeAdmin(proxyAdmin); - console.log("Proxy %s ownership transferred to ProxyAdmin at: %s", _name, proxyAdmin); - } - //////////////////////////////////////////////////////////////// // SetUp and Run // //////////////////////////////////////////////////////////////// @@ -276,16 +266,13 @@ contract Deploy is Deployer { function _run(bool _needsSuperchain) internal { console.log("start of L1 Deploy!"); - // Deploy a new ProxyAdmin and AddressManager - // This proxy will be used on the SuperchainConfig and ProtocolVersions contracts, as well as the contracts - // in the OP Chain system. - setupAdmin(); - if (_needsSuperchain) { + deployProxyAdmin({ _isSuperchain: true }); setupSuperchain(); console.log("set up superchain!"); } + setupOpChainAdmin(); if (cfg.useAltDA()) { bytes32 typeHash = keccak256(bytes(cfg.daCommitmentType())); bytes32 keccakHash = keccak256(bytes("KeccakCommitment")); @@ -293,6 +280,7 @@ contract Deploy is Deployer { setupOpAltDA(); } } + setupOpChain(); console.log("set up op chain!"); } @@ -302,9 +290,9 @@ contract Deploy is Deployer { //////////////////////////////////////////////////////////////// /// @notice Deploy the address manager and proxy admin contracts. - function setupAdmin() public { + function setupOpChainAdmin() public { deployAddressManager(); - deployProxyAdmin(); + deployProxyAdmin({ _isSuperchain: false }); } /// @notice Deploy a full system with a new SuperchainConfig @@ -315,12 +303,12 @@ contract Deploy is Deployer { console.log("Setting up Superchain"); // Deploy the SuperchainConfigProxy - deployERC1967Proxy("SuperchainConfigProxy"); + deployERC1967ProxyWithOwner("SuperchainConfigProxy", mustGetAddress("SuperchainProxyAdmin")); deploySuperchainConfig(); initializeSuperchainConfig(); // Deploy the ProtocolVersionsProxy - deployERC1967Proxy("ProtocolVersionsProxy"); + deployERC1967ProxyWithOwner("ProtocolVersionsProxy", mustGetAddress("SuperchainProxyAdmin")); deployProtocolVersions(); initializeProtocolVersions(); } @@ -346,7 +334,7 @@ contract Deploy is Deployer { transferDisputeGameFactoryOwnership(); transferDelayedWETHOwnership(); - transferProxyAdminOwnership(); + transferProxyAdminOwnership({ _isSuperchain: false }); } /// @notice Deploy all of the OP Chain specific contracts @@ -441,23 +429,33 @@ contract Deploy is Deployer { } /// @notice Deploy the ProxyAdmin - function deployProxyAdmin() public broadcast returns (address addr_) { + function deployProxyAdmin(bool _isSuperchain) public broadcast returns (address addr_) { + string memory proxyAdminName = _isSuperchain ? "SuperchainProxyAdmin" : "ProxyAdmin"; + + console.log("Deploying %s", proxyAdminName); + + // Include the proxyAdminName in the salt to prevent a create2 collision when both the Superchain and an OP + // Chain are being setup. IProxyAdmin admin = IProxyAdmin( DeployUtils.create2AndSave({ _save: this, - _salt: _implSalt(), + _salt: keccak256(abi.encode(_implSalt(), proxyAdminName)), _name: "ProxyAdmin", + _nick: proxyAdminName, _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (msg.sender))) }) ); require(admin.owner() == msg.sender); - IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); - if (admin.addressManager() != addressManager) { - admin.setAddressManager(addressManager); + // The AddressManager is only required for OP Chains + if (!_isSuperchain) { + IAddressManager addressManager = IAddressManager(mustGetAddress("AddressManager")); + if (admin.addressManager() != addressManager) { + admin.setAddressManager(addressManager); + } + require(admin.addressManager() == addressManager); } - - require(admin.addressManager() == addressManager); + console.log("%s deployed at %s", proxyAdminName, address(admin)); addr_ = address(admin); } @@ -933,7 +931,7 @@ contract Deploy is Deployer { address payable superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); address payable superchainConfig = mustGetAddress("SuperchainConfig"); - IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("SuperchainProxyAdmin"))); proxyAdmin.upgradeAndCall({ _proxy: superchainConfigProxy, _implementation: superchainConfig, @@ -1345,7 +1343,7 @@ contract Deploy is Deployer { uint256 requiredProtocolVersion = cfg.requiredProtocolVersion(); uint256 recommendedProtocolVersion = cfg.recommendedProtocolVersion(); - IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("ProxyAdmin"))); + IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("SuperchainProxyAdmin"))); proxyAdmin.upgradeAndCall({ _proxy: payable(protocolVersionsProxy), _implementation: protocolVersions, diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol index 2463576fb41a..5171a2066628 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol @@ -19,8 +19,8 @@ import { DeputyGuardianModule } from "src/safe/DeputyGuardianModule.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { Deploy } from "./Deploy.s.sol"; -/// @notice Configuration for a Safe +/// @notice Configuration for a Safe struct SafeConfig { uint256 threshold; address[] owners; From 909d7fd2363867939e95a02837989c4d6e400567 Mon Sep 17 00:00:00 2001 From: Inphi Date: Mon, 30 Sep 2024 17:45:52 -0400 Subject: [PATCH 088/116] supervisor: Fix nil check (#12209) --- op-supervisor/supervisor/backend/safety/views.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-supervisor/supervisor/backend/safety/views.go b/op-supervisor/supervisor/backend/safety/views.go index c9393758fad5..98941dd7e6e9 100644 --- a/op-supervisor/supervisor/backend/safety/views.go +++ b/op-supervisor/supervisor/backend/safety/views.go @@ -66,7 +66,7 @@ func (vi *View) Process() error { return logs.ErrFuture } // check if it is an executing message. If so, check the dependency - if execMsg := state.ExecMessage(); execMsg == nil { + if execMsg := state.ExecMessage(); execMsg != nil { // Check if executing message is within cross L2 view, // relative to the L1 view of current message. // And check if the message is valid to execute at all From 5bd72f690bf09a6ae3bbe1802c4be60cf99628b3 Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Mon, 30 Sep 2024 14:50:45 -0700 Subject: [PATCH 089/116] Holocene extensions to L1Block.sol (#12096) --- packages/contracts-bedrock/.gas-snapshot | 6 +- packages/contracts-bedrock/semver-lock.json | 8 +- .../snapshots/abi/L1Block.json | 33 ++++++ .../snapshots/abi/L1BlockInterop.json | 33 ++++++ .../snapshots/storageLayout/L1Block.json | 14 +++ .../storageLayout/L1BlockInterop.json | 16 ++- packages/contracts-bedrock/src/L2/L1Block.sol | 63 +++++++++- .../src/L2/L1BlockInterop.sol | 4 +- .../src/L2/interfaces/IL1Block.sol | 3 + .../src/L2/interfaces/IL1BlockInterop.sol | 3 + .../src/libraries/Encoding.sol | 46 ++++++++ .../contracts-bedrock/test/L2/L1Block.t.sol | 110 ++++++++++++++++++ 12 files changed, 327 insertions(+), 12 deletions(-) diff --git a/packages/contracts-bedrock/.gas-snapshot b/packages/contracts-bedrock/.gas-snapshot index 8e43cb748941..700053bd8ab9 100644 --- a/packages/contracts-bedrock/.gas-snapshot +++ b/packages/contracts-bedrock/.gas-snapshot @@ -1,6 +1,6 @@ -GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7567) -GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5567) -GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175677) +GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7589) +GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5589) +GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175655) GasBenchMark_L1BlockInterop_SetValuesInterop_Warm:test_setL1BlockValuesInterop_benchmark() (gas: 5099) GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158531) GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7597) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 4b946bdc4b50..3ded81206c65 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -80,12 +80,12 @@ "sourceCodeHash": "0x4f21025d4b5c9c74cf7040db6f8e9ce605b82931e3012fee51d3f5d9fbd7b73f" }, "src/L2/L1Block.sol": { - "initCodeHash": "0xd12353c5bf71c6765cc9292eecf262f216e67f117f4ba6287796a5207dbca00f", - "sourceCodeHash": "0xfe3a9585d9bfca8428e12759cab68a3114374e5c37371cfe08bb1976a9a5a041" + "initCodeHash": "0x48d118de2a69fb0fbf6a8da4603025e12da1360da8fb70a5e56342ba64b3ff5f", + "sourceCodeHash": "0x04d25cbf0c4ea5025b0dd3f79f0a32f6623ddb869cff35649072ab3ad964b310" }, "src/L2/L1BlockInterop.sol": { - "initCodeHash": "0x77b3b2151fe14ea36a640469115a5e4de27f7654a9606a9d0701522c6a4ad887", - "sourceCodeHash": "0x7417677643e1df1ae1782513b94c7821097b9529d3f8626c3bcb8b3a9ae0d180" + "initCodeHash": "0x7f87e0b8be9801cb242c469ec7999eb80221f65063aedd4ca4923a5e0fb0e5a7", + "sourceCodeHash": "0x722071a9d08dcbeda9cdaadeb2dd679a8bc192563e4a0439f4cd74439fa75581" }, "src/L2/L1FeeVault.sol": { "initCodeHash": "0x3bfcd57e25ad54b66c374f63e24e33a6cf107044aa8f5f69ef21202c380b5c5b", diff --git a/packages/contracts-bedrock/snapshots/abi/L1Block.json b/packages/contracts-bedrock/snapshots/abi/L1Block.json index 020c9e942c75..6efa216b5bd6 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1Block.json +++ b/packages/contracts-bedrock/snapshots/abi/L1Block.json @@ -77,6 +77,32 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "eip1559Denominator", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "eip1559Elasticity", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "gasPayingToken", @@ -282,6 +308,13 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "setL1BlockValuesHolocene", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [], "name": "timestamp", diff --git a/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json index ab089f0cec55..ba871eb2086a 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json @@ -97,6 +97,32 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "eip1559Denominator", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "eip1559Elasticity", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "gasPayingToken", @@ -352,6 +378,13 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "setL1BlockValuesHolocene", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [], "name": "setL1BlockValuesInterop", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json b/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json index 2928d2147b5c..5ee7d1e31942 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json @@ -75,5 +75,19 @@ "offset": 0, "slot": "7", "type": "uint256" + }, + { + "bytes": "8", + "label": "eip1559Denominator", + "offset": 0, + "slot": "8", + "type": "uint64" + }, + { + "bytes": "8", + "label": "eip1559Elasticity", + "offset": 8, + "slot": "8", + "type": "uint64" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json index 14ee2ff9609a..4f0eeb0e52d7 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json @@ -76,11 +76,25 @@ "slot": "7", "type": "uint256" }, + { + "bytes": "8", + "label": "eip1559Denominator", + "offset": 0, + "slot": "8", + "type": "uint64" + }, + { + "bytes": "8", + "label": "eip1559Elasticity", + "offset": 8, + "slot": "8", + "type": "uint64" + }, { "bytes": "64", "label": "dependencySet", "offset": 0, - "slot": "8", + "slot": "9", "type": "struct EnumerableSet.UintSet" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L2/L1Block.sol b/packages/contracts-bedrock/src/L2/L1Block.sol index c61f45b83629..feb9f18d1b89 100644 --- a/packages/contracts-bedrock/src/L2/L1Block.sol +++ b/packages/contracts-bedrock/src/L2/L1Block.sol @@ -57,9 +57,15 @@ contract L1Block is ISemver, IGasToken { /// @notice The latest L1 blob base fee. uint256 public blobBaseFee; - /// @custom:semver 1.5.1-beta.2 + /// @notice The eip-1550 base fee change denominator value. + uint64 public eip1559Denominator; + + /// @notice The eip-1550 base fee change elasticity value. + uint64 public eip1559Elasticity; + + /// @custom:semver 1.5.1-beta.3 function version() public pure virtual returns (string memory) { - return "1.5.1-beta.2"; + return "1.5.1-beta.3"; } /// @notice Returns the gas paying token, its decimals, name and symbol. @@ -168,6 +174,59 @@ contract L1Block is ISemver, IGasToken { } } + /// @notice Updates the L1 block values for a Holocene upgraded chain. + /// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size. + /// Params are expected to be in the following order: + /// 1. _baseFeeScalar L1 base fee scalar + /// 2. _blobBaseFeeScalar L1 blob base fee scalar + /// 3. _sequenceNumber Number of L2 blocks since epoch start. + /// 4. _timestamp L1 timestamp. + /// 5. _number L1 blocknumber. + /// 6. _basefee L1 base fee. + /// 7. _blobBaseFee L1 blob base fee. + /// 8. _hash L1 blockhash. + /// 9. _batcherHash Versioned hash to authenticate batcher by. + /// 10. _eip1559Elasticity EIP-1559 elasticity multiplier value. + /// 11. _eip1559Denominator EIP-1559 base fee change denominator value. + function setL1BlockValuesHolocene() public { + _setL1BlockValuesHolocene(); + } + + /// @notice Updates the L1 block values for a Holocene upgraded chain. + /// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size. + /// Params are expected to be in the following order: + /// 1. _baseFeeScalar L1 base fee scalar + /// 2. _blobBaseFeeScalar L1 blob base fee scalar + /// 3. _sequenceNumber Number of L2 blocks since epoch start. + /// 4. _timestamp L1 timestamp. + /// 5. _number L1 blocknumber. + /// 6. _basefee L1 base fee. + /// 7. _blobBaseFee L1 blob base fee. + /// 8. _hash L1 blockhash. + /// 9. _batcherHash Versioned hash to authenticate batcher by. + /// 10. _eip1559Elasticity EIP-1559 elasticity multiplier value. + /// 11. _eip1559Denominator EIP-1559 base fee change denominator value. + function _setL1BlockValuesHolocene() internal { + address depositor = DEPOSITOR_ACCOUNT(); + assembly { + // Revert if the caller is not the depositor account. + if xor(caller(), depositor) { + mstore(0x00, 0x3cc50b45) // 0x3cc50b45 is the 4-byte selector of "NotDepositor()" + revert(0x1C, 0x04) // returns the stored 4-byte selector from above + } + // sequencenum (uint64), blobBaseFeeScalar (uint32), baseFeeScalar (uint32) + sstore(sequenceNumber.slot, shr(128, calldataload(4))) + // number (uint64) and timestamp (uint64) + sstore(number.slot, shr(128, calldataload(20))) + sstore(basefee.slot, calldataload(36)) // uint256 + sstore(blobBaseFee.slot, calldataload(68)) // uint256 + sstore(hash.slot, calldataload(100)) // bytes32 + sstore(batcherHash.slot, calldataload(132)) // bytes32 + // eip1559Denominator (uint64) and eip1559Elasticity (uint64) + sstore(eip1559Denominator.slot, shr(128, calldataload(164))) // uint64 + } + } + /// @notice Sets the gas paying token for the L2 system. Can only be called by the special /// depositor account. This function is not called on every L2 block but instead /// only called by specially crafted L1 deposit transactions. diff --git a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol index 15ea67f5e6b3..189e0fe7d7d0 100644 --- a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol @@ -42,9 +42,9 @@ contract L1BlockInterop is L1Block { /// keccak256(abi.encode(uint256(keccak256("l1Block.identifier.isDeposit")) - 1)) & ~bytes32(uint256(0xff)) uint256 internal constant IS_DEPOSIT_SLOT = 0x921bd3a089295c6e5540e8fba8195448d253efd6f2e3e495b499b627dc36a300; - /// @custom:semver +interop + /// @custom:semver +interop-beta.1 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop"); + return string.concat(super.version(), "+interop-beta.1"); } /// @notice Returns whether the call was triggered from a a deposit or not. diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol index a43b3c7c3963..0eba9a9973f3 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol @@ -34,8 +34,11 @@ interface IL1Block { ) external; function setL1BlockValuesEcotone() external; + function setL1BlockValuesHolocene() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); + function eip1559Denominator() external view returns (uint64); + function eip1559Elasticity() external view returns (uint64); function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol index dd72e3fa6f89..31943804b961 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol @@ -52,9 +52,12 @@ interface IL1BlockInterop { ) external; function setL1BlockValuesEcotone() external; + function setL1BlockValuesHolocene() external; function setL1BlockValuesInterop() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); + function eip1559Denominator() external view returns (uint64); + function eip1559Elasticity() external view returns (uint64); function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index edcdd4ed75e2..896e60e5bae2 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -213,4 +213,50 @@ library Encoding { _batcherHash ); } + + /// @notice Returns an appropriately encoded call to L1Block.setL1BlockValuesHolocene + /// @param baseFeeScalar L1 base fee Scalar + /// @param blobBaseFeeScalar L1 blob base fee Scalar + /// @param sequenceNumber Number of L2 blocks since epoch start. + /// @param timestamp L1 timestamp. + /// @param number L1 blocknumber. + /// @param baseFee L1 base fee. + /// @param blobBaseFee L1 blob base fee. + /// @param hash L1 blockhash. + /// @param batcherHash Versioned hash to authenticate batcher by. + /// @param eip1559Elasticity EIP-1559 elasticity parameter + /// @param eip1559Denominator EIP-1559 denominator parameter + function encodeSetL1BlockValuesHolocene( + uint32 baseFeeScalar, + uint32 blobBaseFeeScalar, + uint64 sequenceNumber, + uint64 timestamp, + uint64 number, + uint256 baseFee, + uint256 blobBaseFee, + bytes32 hash, + bytes32 batcherHash, + uint64 eip1559Elasticity, + uint64 eip1559Denominator + ) + internal + pure + returns (bytes memory) + { + bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesHolocene()")); + return abi.encodePacked( + functionSignature, + baseFeeScalar, + blobBaseFeeScalar, + sequenceNumber, + timestamp, + number, + baseFee, + blobBaseFee, + hash, + batcherHash, + eip1559Elasticity, + eip1559Denominator + ); + } } diff --git a/packages/contracts-bedrock/test/L2/L1Block.t.sol b/packages/contracts-bedrock/test/L2/L1Block.t.sol index 762553a2ff2f..06de35f51c1d 100644 --- a/packages/contracts-bedrock/test/L2/L1Block.t.sol +++ b/packages/contracts-bedrock/test/L2/L1Block.t.sol @@ -165,6 +165,116 @@ contract L1BlockEcotone_Test is L1BlockTest { } } +contract L1BlockHolocene_Test is L1BlockTest { + /// @dev Tests that setL1BlockValuesHolocene updates the values appropriately. + function testFuzz_setL1BlockValuesHolocene_succeeds( + uint32 baseFeeScalar, + uint32 blobBaseFeeScalar, + uint64 sequenceNumber, + uint64 timestamp, + uint64 number, + uint256 baseFee, + uint256 blobBaseFee, + bytes32 hash, + bytes32 batcherHash, + uint64 eip1559Elasticity, + uint64 eip1559Denominator + ) + external + { + bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( + baseFeeScalar, + blobBaseFeeScalar, + sequenceNumber, + timestamp, + number, + baseFee, + blobBaseFee, + hash, + batcherHash, + eip1559Elasticity, + eip1559Denominator + ); + + vm.prank(depositor); + (bool success,) = address(l1Block).call(functionCallDataPacked); + assertTrue(success, "Function call failed"); + + assertEq(l1Block.baseFeeScalar(), baseFeeScalar); + assertEq(l1Block.blobBaseFeeScalar(), blobBaseFeeScalar); + assertEq(l1Block.sequenceNumber(), sequenceNumber); + assertEq(l1Block.timestamp(), timestamp); + assertEq(l1Block.number(), number); + assertEq(l1Block.basefee(), baseFee); + assertEq(l1Block.blobBaseFee(), blobBaseFee); + assertEq(l1Block.hash(), hash); + assertEq(l1Block.batcherHash(), batcherHash); + assertEq(l1Block.eip1559Denominator(), eip1559Denominator); + assertEq(l1Block.eip1559Elasticity(), eip1559Elasticity); + + // ensure we didn't accidentally pollute the 128 bits of the sequencenum+scalars slot that + // should be empty + bytes32 scalarsSlot = vm.load(address(l1Block), bytes32(uint256(3))); + bytes32 mask128 = hex"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000000000000000000000000000"; + + assertEq(0, scalarsSlot & mask128); + + // ensure we didn't accidentally pollute the 128 bits of the number & timestamp slot that + // should be empty + bytes32 numberTimestampSlot = vm.load(address(l1Block), bytes32(uint256(0))); + assertEq(0, numberTimestampSlot & mask128); + + // ensure we didn't accidentally pollute the 128 bits of the eip-1559 parameters slot that + // should be empty + bytes32 eip1559ParamsSlot = vm.load(address(l1Block), bytes32(uint256(9))); + assertEq(0, eip1559ParamsSlot & mask128); + } + + /// @dev Tests that `setL1BlockValuesHolocene` succeeds if sender address is the depositor + function test_setL1BlockValuesHolocene_isDepositor_succeeds() external { + bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( + type(uint32).max, + type(uint32).max, + type(uint64).max, + type(uint64).max, + type(uint64).max, + type(uint256).max, + type(uint256).max, + bytes32(type(uint256).max), + bytes32(type(uint256).max), + type(uint64).max, + type(uint64).max + ); + + vm.prank(depositor); + (bool success,) = address(l1Block).call(functionCallDataPacked); + assertTrue(success, "function call failed"); + } + + /// @dev Tests that `setL1BlockValuesEcotone` reverts if sender address is not the depositor + function test_setL1BlockValuesHolocene_notDepositor_reverts() external { + bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( + type(uint32).max, + type(uint32).max, + type(uint64).max, + type(uint64).max, + type(uint64).max, + type(uint256).max, + type(uint256).max, + bytes32(type(uint256).max), + bytes32(type(uint256).max), + type(uint64).max, + type(uint64).max + ); + + (bool success, bytes memory data) = address(l1Block).call(functionCallDataPacked); + assertTrue(!success, "function call should have failed"); + // make sure return value is the expected function selector for "NotDepositor()" + bytes memory expReturn = hex"3cc50b45"; + assertEq(data, expReturn); + } +} + contract L1BlockCustomGasToken_Test is L1BlockTest { function testFuzz_setGasPayingToken_succeeds( address _token, From 75b0e66d0b663e0cde34b200ae63b2f008b0060d Mon Sep 17 00:00:00 2001 From: mbaxter Date: Mon, 30 Sep 2024 15:22:23 -0700 Subject: [PATCH 090/116] cannon: Bump go tests to go1.22 (#12214) * cannon: Update cannon go test program versions * cannon: Allow more steps for multithreaded program --- cannon/mipsevm/multithreaded/instrumented_test.go | 2 +- cannon/testdata/example/alloc/go.mod | 4 ++-- cannon/testdata/example/claim/go.mod | 4 ++-- cannon/testdata/example/entry/go.mod | 4 +++- cannon/testdata/example/hello/go.mod | 4 +++- cannon/testdata/example/multithreaded/go.mod | 4 +++- 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index b3ce2d95eac3..20ce2b9cc0b0 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -41,7 +41,7 @@ func TestInstrumentedState_MultithreadedProgram(t *testing.T) { var stdOutBuf, stdErrBuf bytes.Buffer us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), nil) - for i := 0; i < 1_000_000; i++ { + for i := 0; i < 2_000_000; i++ { if us.GetState().GetExited() { break } diff --git a/cannon/testdata/example/alloc/go.mod b/cannon/testdata/example/alloc/go.mod index d4d3c23faf2d..f0525fb68d5a 100644 --- a/cannon/testdata/example/alloc/go.mod +++ b/cannon/testdata/example/alloc/go.mod @@ -1,8 +1,8 @@ module alloc -go 1.21 +go 1.22 -toolchain go1.21.1 +toolchain go1.22.0 require github.com/ethereum-optimism/optimism v0.0.0 diff --git a/cannon/testdata/example/claim/go.mod b/cannon/testdata/example/claim/go.mod index c70d9906f06c..be3ddc7c0040 100644 --- a/cannon/testdata/example/claim/go.mod +++ b/cannon/testdata/example/claim/go.mod @@ -1,8 +1,8 @@ module claim -go 1.21 +go 1.22 -toolchain go1.21.1 +toolchain go1.22.0 require github.com/ethereum-optimism/optimism v0.0.0 diff --git a/cannon/testdata/example/entry/go.mod b/cannon/testdata/example/entry/go.mod index 2e4d29124f54..296b95426437 100644 --- a/cannon/testdata/example/entry/go.mod +++ b/cannon/testdata/example/entry/go.mod @@ -1,3 +1,5 @@ module entry -go 1.21 +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/hello/go.mod b/cannon/testdata/example/hello/go.mod index da6c43db676b..b54bb78c6aee 100644 --- a/cannon/testdata/example/hello/go.mod +++ b/cannon/testdata/example/hello/go.mod @@ -1,3 +1,5 @@ module hello -go 1.20 +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/multithreaded/go.mod b/cannon/testdata/example/multithreaded/go.mod index a075941f46c3..e1bdb77a9aff 100644 --- a/cannon/testdata/example/multithreaded/go.mod +++ b/cannon/testdata/example/multithreaded/go.mod @@ -1,3 +1,5 @@ module multithreaded -go 1.21 +go 1.22 + +toolchain go1.22.0 From 31e244c3152da815edab1cc659cac86aeec8fe15 Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Mon, 30 Sep 2024 17:27:35 -0700 Subject: [PATCH 091/116] - fix lint issue in Encoding.sol (#12215) - clarify that 'just update-foundry' needs to be run from the root of the repo in check-foundry script. Usually the user will be in contracts-bedrock/src when seeing this message, and the justfile there doesn't handle it. --- .../scripts/checks/check-foundry-install.sh | 2 +- .../src/libraries/Encoding.sol | 66 +++++++++---------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh b/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh index a0fa104d5bc8..a2093e936f3f 100755 --- a/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh +++ b/packages/contracts-bedrock/scripts/checks/check-foundry-install.sh @@ -41,5 +41,5 @@ if [ "$INSTALLED_VERSION" = "$EXPECTED_VERSION" ]; then else echo "Mismatch between installed Foundry version ($INSTALLED_VERSION) and expected version ($EXPECTED_VERSION)." echo "Your version of Foundry may either not be up to date, or it could be a later version." - echo "Running just update-foundry will install the expected version." + echo "Running 'just update-foundry' from the repository root will install the expected version." fi diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index 896e60e5bae2..ea33f3ca50bf 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -215,29 +215,29 @@ library Encoding { } /// @notice Returns an appropriately encoded call to L1Block.setL1BlockValuesHolocene - /// @param baseFeeScalar L1 base fee Scalar - /// @param blobBaseFeeScalar L1 blob base fee Scalar - /// @param sequenceNumber Number of L2 blocks since epoch start. - /// @param timestamp L1 timestamp. - /// @param number L1 blocknumber. - /// @param baseFee L1 base fee. - /// @param blobBaseFee L1 blob base fee. - /// @param hash L1 blockhash. - /// @param batcherHash Versioned hash to authenticate batcher by. - /// @param eip1559Elasticity EIP-1559 elasticity parameter - /// @param eip1559Denominator EIP-1559 denominator parameter + /// @param _baseFeeScalar L1 base fee Scalar + /// @param _blobBaseFeeScalar L1 blob base fee Scalar + /// @param _sequenceNumber Number of L2 blocks since epoch start. + /// @param _timestamp L1 timestamp. + /// @param _number L1 blocknumber. + /// @param _baseFee L1 base fee. + /// @param _blobBaseFee L1 blob base fee. + /// @param _hash L1 blockhash. + /// @param _batcherHash Versioned hash to authenticate batcher by. + /// @param _eip1559Elasticity EIP-1559 elasticity parameter + /// @param _eip1559Denominator EIP-1559 denominator parameter function encodeSetL1BlockValuesHolocene( - uint32 baseFeeScalar, - uint32 blobBaseFeeScalar, - uint64 sequenceNumber, - uint64 timestamp, - uint64 number, - uint256 baseFee, - uint256 blobBaseFee, - bytes32 hash, - bytes32 batcherHash, - uint64 eip1559Elasticity, - uint64 eip1559Denominator + uint32 _baseFeeScalar, + uint32 _blobBaseFeeScalar, + uint64 _sequenceNumber, + uint64 _timestamp, + uint64 _number, + uint256 _baseFee, + uint256 _blobBaseFee, + bytes32 _hash, + bytes32 _batcherHash, + uint64 _eip1559Elasticity, + uint64 _eip1559Denominator ) internal pure @@ -246,17 +246,17 @@ library Encoding { bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesHolocene()")); return abi.encodePacked( functionSignature, - baseFeeScalar, - blobBaseFeeScalar, - sequenceNumber, - timestamp, - number, - baseFee, - blobBaseFee, - hash, - batcherHash, - eip1559Elasticity, - eip1559Denominator + _baseFeeScalar, + _blobBaseFeeScalar, + _sequenceNumber, + _timestamp, + _number, + _baseFee, + _blobBaseFee, + _hash, + _batcherHash, + _eip1559Elasticity, + _eip1559Denominator ); } } From b5b68decda1c7019130dcacf048ad86f19ac0123 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Mon, 30 Sep 2024 21:50:41 -0400 Subject: [PATCH 092/116] fix(ci): add files to semgrep ignore (#12219) FaultDisputeGame and PermissionedDisputeGame interface files are annoying because the semgrep ignore comment doesn't seem to work properly. --- .circleci/config.yml | 2 +- .semgrepignore | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index cdbe4fa8beb6..fe6228c106b8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1210,7 +1210,7 @@ jobs: # --timeout (in seconds) limits the time per rule and file. # SEMGREP_TIMEOUT is the same, but docs have conflicting defaults (5s in CLI flag, 1800 in some places) # https://semgrep.dev/docs/troubleshooting/semgrep-app#if-the-job-is-aborted-due-to-taking-too-long - command: semgrep ci --timeout=100 + command: semgrep ci --timeout=100 --no-suppress-errors # If semgrep hangs, stop the scan after 20m, to prevent a useless 5h job no_output_timeout: 20m - notify-failures-on-develop diff --git a/.semgrepignore b/.semgrepignore index 3208e2604b89..5d358263fb65 100644 --- a/.semgrepignore +++ b/.semgrepignore @@ -41,3 +41,5 @@ packages/contracts-bedrock/src/L2/SuperchainWETH.sol packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol packages/contracts-bedrock/src/governance/GovernanceToken.sol packages/contracts-bedrock/src/governance/interfaces/IGovernanceToken.sol +packages/contracts-bedrock/src/dispute/interfaces/IFaultDisputeGame.sol +packages/contracts-bedrock/src/dispute/interfaces/IPermissionedDisputeGame.sol From d05fb505809717282d5cee7264a09d26002a4ddd Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 1 Oct 2024 12:37:04 +1000 Subject: [PATCH 093/116] cannon: Update version of go used in cannon stf-diff (#12218) --- cannon/Dockerfile.diff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cannon/Dockerfile.diff b/cannon/Dockerfile.diff index 305dada244af..168b664a2baa 100644 --- a/cannon/Dockerfile.diff +++ b/cannon/Dockerfile.diff @@ -1,4 +1,4 @@ -FROM golang:1.21.3-alpine3.18 as builder +FROM golang:1.22.7-alpine3.20 as builder RUN apk add --no-cache make bash From c2dc0abfd3cc98cdb19587a0d079a889dd69f19e Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 1 Oct 2024 10:42:04 -0600 Subject: [PATCH 094/116] op-e2e: Support specifying allocs in tests (#12216) * op-e2e: Support specifying allocs in tests Previously, the E2E test suite read a hardcoded set of allocs from the `.devnet` directory. There could only be one logical set of allocs per test suite run. To get around this limitation, we added matrix jobs to the CI pipeline that swapped in different versions of the allocs for alt-DA, MT Cannon, fault proofs, and the L2OO. This was very inefficient and complex: most tests don't need to run against multiple versions of the allocs, and were running 4 separate build jobs when only one was needed. This PR makes it possible for an individual test to request a specific allocs file. We can now run tests for multiple different configurations of the OP Stack - e.g. alt-DA, L2OO and fault proofs - in a single test run. To make this work, I updated the test suite's initialization method to read alloc files from multiple suffixed `.devnet-` directories. I made a a new `make devnet-allocs-tests` task to generate them. The allocs are then added to a map, and the rest of the tests use getter methods in the `config` package to locate the appropriate configuration structs. This PR seems large, but most of the modified files contain limited changes to comply with the new API for selecting test configuration based on alloc type. For example, an `allocType` configuration parameter was added to various system config structs and the `DefaultRollupTestParams` variable was made into a method so that it always returns a copy for easy extension. The important logic happens in the following files: - Makefile - .circleci/config.yml - op-e2e/config/init.go As part of this PR, I also cleaned up a few issues: - I removed the external OP Geth shim. It wasn't used anywhere, and was introducing a lot of complexity for little gain. - I refactored some tests to use top-level test methods rather than subtests so that they could be more easily parallelized. - I removed references to `UseFaultProofs`, `UseAltDA`, and other environment-based test flags from test utilities. We shouldn't be reading from the environment in test utils. Instead, we should pass in the behavior we want to the test utils themselves. * code review updates * fix gastoken test * fixes --- .circleci/config.yml | 72 +----- .gitignore | 2 +- Makefile | 13 ++ go.mod | 9 - go.sum | 20 -- op-chain-ops/genesis/layer_two.go | 2 + op-e2e/actions/altda/altda_test.go | 28 +-- op-e2e/actions/batcher/eip4844_test.go | 2 +- op-e2e/actions/batcher/l2_batcher_test.go | 11 +- op-e2e/actions/derivation/batch_queue_test.go | 3 + op-e2e/actions/derivation/blocktime_test.go | 4 +- op-e2e/actions/derivation/l2_verifier_test.go | 3 + op-e2e/actions/derivation/reorg_test.go | 18 +- .../actions/derivation/system_config_test.go | 6 +- op-e2e/actions/helpers/l1_miner_test.go | 2 +- op-e2e/actions/helpers/l1_replica_test.go | 4 +- op-e2e/actions/helpers/l2_engine_test.go | 8 +- op-e2e/actions/helpers/l2_proposer.go | 14 +- op-e2e/actions/helpers/user.go | 25 ++- op-e2e/actions/helpers/user_test.go | 39 +++- op-e2e/actions/helpers/utils.go | 14 +- op-e2e/actions/interop/interop_test.go | 2 +- op-e2e/actions/proofs/helpers/env.go | 10 +- op-e2e/actions/proposer/l2_proposer_test.go | 48 ++-- op-e2e/actions/safedb/safedb_test.go | 2 +- op-e2e/actions/sequencer/l2_sequencer_test.go | 5 +- op-e2e/actions/sync/sync_test.go | 26 +-- op-e2e/actions/upgrades/dencun_fork_test.go | 14 +- op-e2e/actions/upgrades/ecotone_fork_test.go | 4 +- op-e2e/actions/upgrades/fjord_fork_test.go | 2 +- op-e2e/actions/upgrades/span_batch_test.go | 9 + op-e2e/config/init.go | 211 ++++++++++-------- op-e2e/devnet/devnet_test.go | 51 ----- op-e2e/devnet/setup.go | 105 --------- op-e2e/e2e.go | 27 --- op-e2e/e2eutils/addresses_test.go | 3 + op-e2e/e2eutils/challenger/helper.go | 15 +- op-e2e/e2eutils/disputegame/helper.go | 12 +- .../disputegame/output_cannon_helper.go | 4 +- .../disputegame/output_game_helper.go | 6 +- op-e2e/e2eutils/setup.go | 34 +-- op-e2e/e2eutils/setup_test.go | 4 +- op-e2e/external/config.go | 69 ------ op-e2e/external_geth/.gitignore | 1 - op-e2e/external_geth/Makefile | 8 - op-e2e/external_geth/README.md | 65 ------ op-e2e/external_geth/main.go | 205 ----------------- op-e2e/external_geth/main_test.go | 58 ----- op-e2e/external_geth/test_parms.json | 5 - op-e2e/external_geth/tools.go | 5 - op-e2e/faultproofs/multi_test.go | 4 +- op-e2e/faultproofs/permissioned_test.go | 4 +- op-e2e/faultproofs/precompile_test.go | 4 +- op-e2e/opgeth/op_geth.go | 22 +- op-e2e/system/bridge/validity_test.go | 22 +- op-e2e/system/bridge/withdrawal.go | 5 +- op-e2e/system/bridge/withdrawal_test.go | 17 +- op-e2e/system/da/brotli_batcher_test.go | 2 +- op-e2e/system/da/eip4844_test.go | 18 +- op-e2e/system/da/startstop_test.go | 47 ++-- op-e2e/system/e2esys/external.go | 147 ------------ op-e2e/system/e2esys/setup.go | 134 +++++------ op-e2e/system/gastoken/gastoken_test.go | 20 +- op-e2e/system/helpers/withdrawal_helper.go | 48 ++-- op-e2e/system/p2p/gossip_test.go | 9 +- op-e2e/system/proofs/proposer_fp_test.go | 7 +- op-e2e/system/proofs/proposer_l2oo_test.go | 5 +- 67 files changed, 561 insertions(+), 1263 deletions(-) delete mode 100644 op-e2e/devnet/devnet_test.go delete mode 100644 op-e2e/devnet/setup.go delete mode 100644 op-e2e/external/config.go delete mode 100644 op-e2e/external_geth/.gitignore delete mode 100644 op-e2e/external_geth/Makefile delete mode 100644 op-e2e/external_geth/README.md delete mode 100644 op-e2e/external_geth/main.go delete mode 100644 op-e2e/external_geth/main_test.go delete mode 100644 op-e2e/external_geth/test_parms.json delete mode 100644 op-e2e/external_geth/tools.go delete mode 100644 op-e2e/system/e2esys/external.go diff --git a/.circleci/config.yml b/.circleci/config.yml index fe6228c106b8..3d8c25faa5cc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -255,28 +255,9 @@ jobs: FOUNDRY_PROFILE: ci working_directory: packages/contracts-bedrock - run: - name: Generate L2OO allocs + name: Generate allocs command: | - DEVNET_L2OO="true" make devnet-allocs - cp -r .devnet/ .devnet-l2oo/ - - run: - name: Generate AltDA allocs - command: | - DEVNET_ALTDA="true" make devnet-allocs - cp -r .devnet/ .devnet-altda/ - - run: - name: Generate Generic AltDA allocs - command: | - DEVNET_ALTDA="true" GENERIC_ALTDA="true" make devnet-allocs - cp -r .devnet/ .devnet-altda-generic/ - - run: - name: Generate MT-Cannon allocs - command: | - USE_MT_CANNON="true" make devnet-allocs - cp -r .devnet/ .devnet-mt-cannon/ - - run: - name: Generate default allocs - command: make devnet-allocs + make devnet-allocs-tests - save_cache: name: Save Go modules cache key: gomod-contracts-build-{{ checksum "go.sum" }} @@ -296,9 +277,10 @@ jobs: - "packages/contracts-bedrock/deploy-config/devnetL1.json" - "packages/contracts-bedrock/deployments/devnetL1" - ".devnet" + - ".devnet-standard" - ".devnet-l2oo" - - ".devnet-altda" - - ".devnet-altda-generic" + - ".devnet-alt-da" + - ".devnet-alt-da-generic" - ".devnet-mt-cannon" - notify-failures-on-develop @@ -929,39 +911,19 @@ jobs: description: Slack user or group to mention when notifying of failures type: string default: "" - environment: - DEVNET_L2OO: 'false' - OP_E2E_USE_L2OO: 'false' docker: - image: <> resource_class: xlarge parallelism: <> steps: - checkout - - when: - condition: - equal: ['-l2oo', <>] - steps: - - run: - name: Set DEVNET_L2OO = true - command: echo 'export DEVNET_L2OO=true' >> $BASH_ENV - - run: - name: Set OP_E2E_USE_L2OO = true - command: echo 'export OP_E2E_USE_L2OO=true' >> $BASH_ENV - - when: - condition: - equal: ['-altda', <>] - steps: - - run: - name: Set OP_E2E_USE_ALTDA = true - command: echo 'export OP_E2E_USE_ALTDA=true' >> $BASH_ENV - when: condition: equal: ['-mt-cannon', <>] steps: - run: - name: Set OP_E2E_USE_MT_CANNON = true - command: echo 'export OP_E2E_USE_MT_CANNON=true' >> $BASH_ENV + name: Set OP_E2E_ALLOC_TYPE = mt-cannon + command: echo 'export OP_E2E_ALLOC_TYPE=mt-cannon' >> $BASH_ENV - check-changed: patterns: op-(.+),cannon,contracts-bedrock - run: @@ -981,12 +943,7 @@ jobs: name: Load devnet-allocs and artifacts command: | mkdir -p .devnet - cp /tmp/workspace/.devnet<>/allocs-l2-delta.json .devnet/allocs-l2-delta.json - cp /tmp/workspace/.devnet<>/allocs-l2-ecotone.json .devnet/allocs-l2-ecotone.json - cp /tmp/workspace/.devnet<>/allocs-l2-fjord.json .devnet/allocs-l2-fjord.json - cp /tmp/workspace/.devnet<>/allocs-l2-granite.json .devnet/allocs-l2-granite.json - cp /tmp/workspace/.devnet<>/allocs-l1.json .devnet/allocs-l1.json - cp /tmp/workspace/.devnet<>/addresses.json .devnet/addresses.json + cp -r /tmp/workspace/.devnet* . cp -r /tmp/workspace/packages/contracts-bedrock/forge-artifacts packages/contracts-bedrock/forge-artifacts cp /tmp/workspace/packages/contracts-bedrock/deploy-config/devnetL1.json packages/contracts-bedrock/deploy-config/devnetL1.json cp -r /tmp/workspace/packages/contracts-bedrock/deployments/devnetL1 packages/contracts-bedrock/deployments/devnetL1 @@ -1470,21 +1427,15 @@ workflows: uses_artifacts: true requires: ["contracts-bedrock-build"] - go-e2e-test: - name: op-e2e-HTTP-tests<< matrix.variant >> - matrix: - parameters: - variant: ["", "-l2oo"] + name: op-e2e-HTTP-tests module: op-e2e target: test-http - parallelism: 4 + parallelism: 8 requires: - go-mod-download - contracts-bedrock-build - go-e2e-test: - name: op-e2e-action-tests<< matrix.variant >> - matrix: - parameters: - variant: ["", "-l2oo", "-altda"] + name: op-e2e-action-tests module: op-e2e target: test-actions parallelism: 1 @@ -1526,7 +1477,6 @@ workflows: - op-e2e-HTTP-tests - op-e2e-fault-proof-tests - op-e2e-action-tests - - op-e2e-action-tests-altda # Not needed for the devnet but we want to make sure they build successfully - cannon-docker-build - op-dispute-mon-docker-build diff --git a/.gitignore b/.gitignore index 9751cc608985..5fc198d02522 100644 --- a/.gitignore +++ b/.gitignore @@ -34,7 +34,7 @@ packages/contracts-bedrock/deployments/anvil !.envrc.example *.log -.devnet +.devnet* # Ignore local fuzzing results **/testdata/fuzz/ diff --git a/Makefile b/Makefile index 072375e728bf..9ad5c846446a 100644 --- a/Makefile +++ b/Makefile @@ -206,6 +206,19 @@ devnet-allocs: pre-devnet ## Generates allocations for the local devnet PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --allocs .PHONY: devnet-allocs +devnet-allocs-tests: + DEVNET_L2OO=true make devnet-allocs + cp -r .devnet/ .devnet-l2oo/ + DEVNET_ALTDA=true make devnet-allocs + cp -r .devnet/ .devnet-alt-da/ + DEVNET_ALTDA=false GENERIC_ALTDA=true make devnet-allocs + cp -r .devnet/ .devnet-alt-da-generic/ + USE_MT_CANNON=true make devnet-allocs + cp -r .devnet/ .devnet-mt-cannon + make devnet-allocs + cp -r .devnet/ .devnet-standard/ +.PHONY: devnet-allocs-tests + devnet-logs: ## Displays logs for the local devnet @(cd ./ops-bedrock && docker compose logs -f) .PHONY: devnet-logs diff --git a/go.mod b/go.mod index 1e3d2fa2b43d..92a75cf442fa 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,6 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 - github.com/onsi/gomega v1.34.1 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/prometheus/client_golang v1.20.4 @@ -86,7 +85,6 @@ require ( github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -136,9 +134,6 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52 // indirect - github.com/kilic/bls12-381 v0.1.0 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -206,9 +201,6 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/protolambda/bls12-381-util v0.1.0 // indirect - github.com/protolambda/zrnt v0.32.2 // indirect - github.com/protolambda/ztyp v0.2.2 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/quic-go v0.46.0 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect @@ -234,7 +226,6 @@ require ( github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.etcd.io/bbolt v1.3.5 // indirect - go.uber.org/automaxprocs v1.5.2 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect diff --git a/go.sum b/go.sum index 922d0cf58a11..5cd91613853f 100644 --- a/go.sum +++ b/go.sum @@ -165,8 +165,6 @@ github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnm github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= -github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= @@ -357,7 +355,6 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -394,8 +391,6 @@ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABo github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -404,10 +399,6 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52 h1:msKODTL1m0wigztaqILOtla9HeW1ciscYG4xjLtvk5I= -github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52/go.mod h1:qk1sX/IBgppQNcGCRoj90u6EGC056EBoIc1oEjCWla8= -github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= -github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -667,8 +658,6 @@ github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDj github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -694,14 +683,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/protolambda/bls12-381-util v0.1.0 h1:05DU2wJN7DTU7z28+Q+zejXkIsA/MF8JZQGhtBZZiWk= -github.com/protolambda/bls12-381-util v0.1.0/go.mod h1:cdkysJTRpeFeuUVx/TXGDQNMTiRAalk1vQw3TYTHcE4= github.com/protolambda/ctxlock v0.1.0 h1:rCUY3+vRdcdZXqT07iXgyr744J2DU2LCBIXowYAjBCE= github.com/protolambda/ctxlock v0.1.0/go.mod h1:vefhX6rIZH8rsg5ZpOJfEDYQOppZi19SfPiGOFrNnwM= -github.com/protolambda/zrnt v0.32.2 h1:KZ48T+3UhsPXNdtE/5QEvGc9DGjUaRI17nJaoznoIaM= -github.com/protolambda/zrnt v0.32.2/go.mod h1:A0fezkp9Tt3GBLATSPIbuY4ywYESyAuc/FFmPKg8Lqs= -github.com/protolambda/ztyp v0.2.2 h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY= -github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU= github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw= github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= @@ -827,8 +810,6 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= -go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= @@ -964,7 +945,6 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/op-chain-ops/genesis/layer_two.go b/op-chain-ops/genesis/layer_two.go index 5e79b55ce69d..c7c9765019e2 100644 --- a/op-chain-ops/genesis/layer_two.go +++ b/op-chain-ops/genesis/layer_two.go @@ -19,6 +19,8 @@ import ( type L2AllocsMode string +type L2AllocsModeMap map[L2AllocsMode]*foundry.ForgeAllocs + const ( L2AllocsDelta L2AllocsMode = "delta" L2AllocsEcotone L2AllocsMode = "ecotone" diff --git a/op-e2e/actions/altda/altda_test.go b/op-e2e/actions/altda/altda_test.go index ac122d9d999a..21dcbf6b9038 100644 --- a/op-e2e/actions/altda/altda_test.go +++ b/op-e2e/actions/altda/altda_test.go @@ -5,6 +5,8 @@ import ( "math/rand" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" "github.com/stretchr/testify/require" @@ -54,6 +56,7 @@ func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { ChannelTimeout: 12, L1BlockTime: 12, UseAltDA: true, + AllocType: config.AllocTypeAltDA, } for _, apply := range params { apply(p) @@ -96,7 +99,7 @@ func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { AddressCorpora: addresses, Bindings: helpers.NewL2Bindings(t, cl, engine.GethClient()), } - alice := helpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b))) + alice := helpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)), p.AllocType) alice.L2.SetUserEnv(l2UserEnv) contract, err := bindings.NewDataAvailabilityChallenge(sd.RollupCfg.AltDAConfig.DAChallengeAddress, l1Client) @@ -261,10 +264,6 @@ func (a *L2AltDA) ActL1Finalized(t helpers.Testing) { // Commitment is challenged but never resolved, chain reorgs when challenge window expires. func TestAltDA_ChallengeExpired(gt *testing.T) { - if !e2eutils.UseAltDA() { - gt.Skip("AltDA is not enabled") - } - t := helpers.NewDefaultTesting(gt) harness := NewL2AltDA(t) @@ -321,10 +320,6 @@ func TestAltDA_ChallengeExpired(gt *testing.T) { // Commitment is challenged after sequencer derived the chain but data disappears. A verifier // derivation pipeline stalls until the challenge is resolved and then resumes with data from the contract. func TestAltDA_ChallengeResolved(gt *testing.T) { - if !e2eutils.UseAltDA() { - gt.Skip("AltDA is not enabled") - } - t := helpers.NewDefaultTesting(gt) harness := NewL2AltDA(t) @@ -369,10 +364,6 @@ func TestAltDA_ChallengeResolved(gt *testing.T) { // DA storage service goes offline while sequencer keeps making blocks. When storage comes back online, it should be able to catch up. func TestAltDA_StorageError(gt *testing.T) { - if !e2eutils.UseAltDA() { - gt.Skip("AltDA is not enabled") - } - t := helpers.NewDefaultTesting(gt) harness := NewL2AltDA(t) @@ -398,10 +389,6 @@ func TestAltDA_StorageError(gt *testing.T) { // L1 chain reorgs a resolved challenge so it expires instead causing // the l2 chain to reorg as well. func TestAltDA_ChallengeReorg(gt *testing.T) { - if !e2eutils.UseAltDA() { - gt.Skip("AltDA is not enabled") - } - t := helpers.NewDefaultTesting(gt) harness := NewL2AltDA(t) @@ -446,10 +433,6 @@ func TestAltDA_ChallengeReorg(gt *testing.T) { // Sequencer stalls as data is not available, batcher keeps posting, untracked commitments are // challenged and resolved, then sequencer resumes and catches up. func TestAltDA_SequencerStalledMultiChallenges(gt *testing.T) { - if !e2eutils.UseAltDA() { - gt.Skip("AltDA is not enabled") - } - t := helpers.NewDefaultTesting(gt) a := NewL2AltDA(t) @@ -542,9 +525,6 @@ func TestAltDA_SequencerStalledMultiChallenges(gt *testing.T) { // Verify that finalization happens based on altDA windows. // based on l2_batcher_test.go L2Finalization func TestAltDA_Finalization(gt *testing.T) { - if !e2eutils.UseAltDA() { - gt.Skip("AltDA is not enabled") - } t := helpers.NewDefaultTesting(gt) a := NewL2AltDA(t) diff --git a/op-e2e/actions/batcher/eip4844_test.go b/op-e2e/actions/batcher/eip4844_test.go index 1447a07a2076..06f2a86f60c4 100644 --- a/op-e2e/actions/batcher/eip4844_test.go +++ b/op-e2e/actions/batcher/eip4844_test.go @@ -19,7 +19,7 @@ import ( ) func setupEIP4844Test(t helpers.Testing, log log.Logger) (*e2eutils.SetupData, *e2eutils.DeployParams, *helpers.L1Miner, *helpers.L2Sequencer, *helpers.L2Engine, *helpers.L2Verifier, *helpers.L2Engine) { - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) genesisActivation := hexutil.Uint64(0) dp.DeployConfig.L1CancunTimeOffset = &genesisActivation dp.DeployConfig.L2GenesisCanyonTimeOffset = &genesisActivation diff --git a/op-e2e/actions/batcher/l2_batcher_test.go b/op-e2e/actions/batcher/l2_batcher_test.go index e0605f3fb121..8906dcbed4ea 100644 --- a/op-e2e/actions/batcher/l2_batcher_test.go +++ b/op-e2e/actions/batcher/l2_batcher_test.go @@ -6,6 +6,8 @@ import ( "math/rand" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" upgradesHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/upgrades/helpers" "github.com/ethereum/go-ethereum/common/hexutil" @@ -59,6 +61,7 @@ func NormalBatcher(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) @@ -129,7 +132,7 @@ func NormalBatcher(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -226,7 +229,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { // L2FinalizationWithSparseL1 tests that safe L2 blocks can be finalized even if we do not regularly get a L1 finalization signal func L2FinalizationWithSparseL1(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -282,7 +285,7 @@ func L2FinalizationWithSparseL1(gt *testing.T, deltaTimeOffset *hexutil.Uint64) // and the safe L2 head should remain unaltered. func GarbageBatch(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - p := actionsHelpers.DefaultRollupTestParams + p := actionsHelpers.DefaultRollupTestParams() dp := e2eutils.MakeDeployParams(t, p) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) for _, garbageKind := range actionsHelpers.GarbageKinds { @@ -363,6 +366,7 @@ func ExtendedTimeWithoutL1Batches(gt *testing.T, deltaTimeOffset *hexutil.Uint64 SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) @@ -419,6 +423,7 @@ func BigL2Txs(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { SequencerWindowSize: 1000, ChannelTimeout: 200, // give enough space to buffer large amounts of data before submitting it L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) diff --git a/op-e2e/actions/derivation/batch_queue_test.go b/op-e2e/actions/derivation/batch_queue_test.go index af5b7231b884..9685cc58b3ac 100644 --- a/op-e2e/actions/derivation/batch_queue_test.go +++ b/op-e2e/actions/derivation/batch_queue_test.go @@ -3,6 +3,8 @@ package derivation import ( "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + altda "github.com/ethereum-optimism/optimism/op-alt-da" batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" @@ -29,6 +31,7 @@ func TestDeriveChainFromNearL1Genesis(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) // do not activate Delta hardfork for verifier diff --git a/op-e2e/actions/derivation/blocktime_test.go b/op-e2e/actions/derivation/blocktime_test.go index ec192d08ad78..1855013aad6d 100644 --- a/op-e2e/actions/derivation/blocktime_test.go +++ b/op-e2e/actions/derivation/blocktime_test.go @@ -47,7 +47,7 @@ func TestBlockTimeBatchType(t *testing.T) { // This is a regression test against the bug fixed in PR #4566 func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) dp.DeployConfig.SequencerWindowSize = 4 dp.DeployConfig.L2BlockTime = 2 @@ -158,7 +158,7 @@ func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { // Note: It batches submits when possible. func LargeL1Gaps(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) dp.DeployConfig.L1BlockTime = 4 dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.SequencerWindowSize = 4 diff --git a/op-e2e/actions/derivation/l2_verifier_test.go b/op-e2e/actions/derivation/l2_verifier_test.go index 2f4fce628d53..afe28e7ad861 100644 --- a/op-e2e/actions/derivation/l2_verifier_test.go +++ b/op-e2e/actions/derivation/l2_verifier_test.go @@ -3,6 +3,8 @@ package derivation import ( "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -19,6 +21,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 10, L1BlockTime: 15, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) diff --git a/op-e2e/actions/derivation/reorg_test.go b/op-e2e/actions/derivation/reorg_test.go index 6551b314c1ea..10155a471a6f 100644 --- a/op-e2e/actions/derivation/reorg_test.go +++ b/op-e2e/actions/derivation/reorg_test.go @@ -6,6 +6,8 @@ import ( "path" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" upgradesHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/upgrades/helpers" "github.com/ethereum/go-ethereum/common" @@ -55,7 +57,7 @@ func TestReorgBatchType(t *testing.T) { func ReorgOrphanBlock(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - sd, _, miner, sequencer, _, verifier, verifierEng, batcher := actionsHelpers.SetupReorgTest(t, actionsHelpers.DefaultRollupTestParams, deltaTimeOffset) + sd, _, miner, sequencer, _, verifier, verifierEng, batcher := actionsHelpers.SetupReorgTest(t, actionsHelpers.DefaultRollupTestParams(), deltaTimeOffset) verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg) sequencer.ActL2PipelineFull(t) @@ -123,7 +125,7 @@ func ReorgOrphanBlock(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { func ReorgFlipFlop(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - sd, _, miner, sequencer, _, verifier, verifierEng, batcher := actionsHelpers.SetupReorgTest(t, actionsHelpers.DefaultRollupTestParams, deltaTimeOffset) + sd, _, miner, sequencer, _, verifier, verifierEng, batcher := actionsHelpers.SetupReorgTest(t, actionsHelpers.DefaultRollupTestParams(), deltaTimeOffset) minerCl := miner.L1Client(t, sd.RollupCfg) verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg) checkVerifEngine := func() { @@ -344,6 +346,7 @@ func DeepReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { SequencerWindowSize: 20, ChannelTimeout: 120, L1BlockTime: 4, + AllocType: config.AllocTypeStandard, }, deltaTimeOffset) minerCl := miner.L1Client(t, sd.RollupCfg) l2Client := seqEngine.EthClient() @@ -363,7 +366,7 @@ func DeepReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { AddressCorpora: addresses, Bindings: actionsHelpers.NewL2Bindings(t, l2Client, seqEngine.GethClient()), } - alice := actionsHelpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b))) + alice := actionsHelpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)), config.AllocTypeStandard) alice.L2.SetUserEnv(l2UserEnv) // Run one iteration of the L2 derivation pipeline @@ -579,7 +582,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { nodeCfg.DataDir = dbPath return nil } - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -667,7 +670,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { // the alt block is not synced by the verifier, in unsafe and safe sync modes. func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -694,7 +697,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { AddressCorpora: addresses, Bindings: actionsHelpers.NewL2Bindings(t, l2Cl, altSeqEng.GethClient()), } - alice := actionsHelpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234))) + alice := actionsHelpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234)), config.AllocTypeStandard) alice.L2.SetUserEnv(l2UserEnv) sequencer.ActL2PipelineFull(t) @@ -779,6 +782,7 @@ func SyncAfterReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { SequencerWindowSize: 4, ChannelTimeout: 2, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } sd, dp, miner, sequencer, seqEngine, verifier, _, batcher := actionsHelpers.SetupReorgTest(t, &testingParams, deltaTimeOffset) l2Client := seqEngine.EthClient() @@ -790,7 +794,7 @@ func SyncAfterReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { AddressCorpora: addresses, Bindings: actionsHelpers.NewL2Bindings(t, l2Client, seqEngine.GethClient()), } - alice := actionsHelpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b))) + alice := actionsHelpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)), config.AllocTypeStandard) alice.L2.SetUserEnv(l2UserEnv) sequencer.ActL2PipelineFull(t) diff --git a/op-e2e/actions/derivation/system_config_test.go b/op-e2e/actions/derivation/system_config_test.go index bb62001de4a2..362c9f2dc854 100644 --- a/op-e2e/actions/derivation/system_config_test.go +++ b/op-e2e/actions/derivation/system_config_test.go @@ -53,7 +53,7 @@ func TestSystemConfigBatchType(t *testing.T) { func BatcherKeyRotation(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) dp.DeployConfig.L2BlockTime = 2 upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) @@ -228,7 +228,7 @@ func BatcherKeyRotation(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { // and that the L1 data fees to the L2 transaction are applied correctly before, during and after the GPO update in L2. func GPOParamsChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) // activating Delta only, not Ecotone and further: @@ -363,7 +363,7 @@ func GPOParamsChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { // the gas limit change event. And checks if a verifier node can reproduce the same gas limit change. func GasLimitChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) diff --git a/op-e2e/actions/helpers/l1_miner_test.go b/op-e2e/actions/helpers/l1_miner_test.go index b102dcaf84f0..9c4e21885204 100644 --- a/op-e2e/actions/helpers/l1_miner_test.go +++ b/op-e2e/actions/helpers/l1_miner_test.go @@ -15,7 +15,7 @@ import ( func TestL1Miner_BuildBlock(gt *testing.T) { t := NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) miner := NewL1Miner(t, log, sd.L1Cfg) diff --git a/op-e2e/actions/helpers/l1_replica_test.go b/op-e2e/actions/helpers/l1_replica_test.go index 5bfe2212f59e..fbd3068d9792 100644 --- a/op-e2e/actions/helpers/l1_replica_test.go +++ b/op-e2e/actions/helpers/l1_replica_test.go @@ -24,7 +24,7 @@ import ( // Test if we can mock an RPC failure func TestL1Replica_ActL1RPCFail(gt *testing.T) { t := NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) replica := NewL1Replica(t, log, sd.L1Cfg) @@ -46,7 +46,7 @@ func TestL1Replica_ActL1RPCFail(gt *testing.T) { // Test if we can make the replica sync an artificial L1 chain, rewind it, and reorg it func TestL1Replica_ActL1Sync(gt *testing.T) { t := NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) dp.DeployConfig.L1CancunTimeOffset = nil sd := e2eutils.Setup(t, dp, DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) diff --git a/op-e2e/actions/helpers/l2_engine_test.go b/op-e2e/actions/helpers/l2_engine_test.go index d74595d31621..b859b393b621 100644 --- a/op-e2e/actions/helpers/l2_engine_test.go +++ b/op-e2e/actions/helpers/l2_engine_test.go @@ -31,7 +31,7 @@ import ( func TestL2EngineAPI(gt *testing.T) { t := NewDefaultTesting(gt) jwtPath := e2eutils.WriteDefaultJWT(t) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) genesisBlock := sd.L2Cfg.ToBlock() @@ -107,7 +107,7 @@ func TestL2EngineAPI(gt *testing.T) { func TestL2EngineAPIBlockBuilding(gt *testing.T) { t := NewDefaultTesting(gt) jwtPath := e2eutils.WriteDefaultJWT(t) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) genesisBlock := sd.L2Cfg.ToBlock() @@ -208,7 +208,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { func TestL2EngineAPIFail(gt *testing.T) { t := NewDefaultTesting(gt) jwtPath := e2eutils.WriteDefaultJWT(t) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath) @@ -228,7 +228,7 @@ func TestL2EngineAPIFail(gt *testing.T) { func TestEngineAPITests(t *testing.T) { test.RunEngineAPITests(t, func(t *testing.T) engineapi.EngineBackend { jwtPath := e2eutils.WriteDefaultJWT(t) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, DefaultAlloc) n, _, apiBackend := newBackend(t, sd.L2Cfg, jwtPath, nil) err := n.Start() diff --git a/op-e2e/actions/helpers/l2_proposer.go b/op-e2e/actions/helpers/l2_proposer.go index f1a0c4d0d634..c30a5006da80 100644 --- a/op-e2e/actions/helpers/l2_proposer.go +++ b/op-e2e/actions/helpers/l2_proposer.go @@ -7,6 +7,8 @@ import ( "math/big" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -21,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-e2e/bindings" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-proposer/metrics" "github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-service/dial" @@ -38,6 +39,7 @@ type ProposerCfg struct { DisputeGameType uint32 ProposerKey *ecdsa.PrivateKey AllowNonFinalized bool + AllocType config.AllocType } type L2Proposer struct { @@ -51,6 +53,7 @@ type L2Proposer struct { address common.Address privKey *ecdsa.PrivateKey lastTx common.Hash + allocType config.AllocType } type fakeTxMgr struct { @@ -117,7 +120,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl var l2OutputOracle *bindings.L2OutputOracleCaller var disputeGameFactory *bindings.DisputeGameFactoryCaller - if e2eutils.UseFaultProofs() { + if cfg.AllocType.UsesProofs() { disputeGameFactory, err = bindings.NewDisputeGameFactoryCaller(*cfg.DisputeGameFactoryAddr, l1) require.NoError(t, err) } else { @@ -138,6 +141,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl disputeGameFactoryAddr: cfg.DisputeGameFactoryAddr, address: address, privKey: cfg.ProposerKey, + allocType: cfg.AllocType, } } @@ -154,7 +158,7 @@ func (p *L2Proposer) sendTx(t Testing, data []byte) { require.NoError(t, err) var addr common.Address - if e2eutils.UseFaultProofs() { + if p.allocType.UsesProofs() { addr = *p.disputeGameFactoryAddr } else { addr = *p.l2OutputOracleAddr @@ -222,7 +226,7 @@ func toCallArg(msg ethereum.CallMsg) interface{} { } func (p *L2Proposer) fetchNextOutput(t Testing) (*eth.OutputResponse, bool, error) { - if e2eutils.UseFaultProofs() { + if p.allocType.UsesProofs() { output, shouldPropose, err := p.driver.FetchDGFOutput(t.Ctx()) if err != nil || !shouldPropose { return nil, false, err @@ -258,7 +262,7 @@ func (p *L2Proposer) ActMakeProposalTx(t Testing) { } var txData []byte - if e2eutils.UseFaultProofs() { + if p.allocType.UsesProofs() { tx, err := p.driver.ProposeL2OutputDGFTxCandidate(context.Background(), output) require.NoError(t, err) txData = tx.TxData diff --git a/op-e2e/actions/helpers/user.go b/op-e2e/actions/helpers/user.go index 2acd6ccaf8e7..d7215b80650f 100644 --- a/op-e2e/actions/helpers/user.go +++ b/op-e2e/actions/helpers/user.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" legacybindings "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" e2ehelpers "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" "github.com/ethereum-optimism/optimism/op-node/bindings" bindingspreview "github.com/ethereum-optimism/optimism/op-node/bindings/preview" @@ -43,17 +42,18 @@ type L1Bindings struct { DisputeGameFactory *bindings.DisputeGameFactory } -func NewL1Bindings(t Testing, l1Cl *ethclient.Client) *L1Bindings { - optimismPortal, err := bindings.NewOptimismPortal(config.L1Deployments.OptimismPortalProxy, l1Cl) +func NewL1Bindings(t Testing, l1Cl *ethclient.Client, allocType config.AllocType) *L1Bindings { + l1Deployments := config.L1Deployments(allocType) + optimismPortal, err := bindings.NewOptimismPortal(l1Deployments.OptimismPortalProxy, l1Cl) require.NoError(t, err) - l2OutputOracle, err := bindings.NewL2OutputOracle(config.L1Deployments.L2OutputOracleProxy, l1Cl) + l2OutputOracle, err := bindings.NewL2OutputOracle(l1Deployments.L2OutputOracleProxy, l1Cl) require.NoError(t, err) - optimismPortal2, err := bindingspreview.NewOptimismPortal2(config.L1Deployments.OptimismPortalProxy, l1Cl) + optimismPortal2, err := bindingspreview.NewOptimismPortal2(l1Deployments.OptimismPortalProxy, l1Cl) require.NoError(t, err) - disputeGameFactory, err := bindings.NewDisputeGameFactory(config.L1Deployments.DisputeGameFactoryProxy, l1Cl) + disputeGameFactory, err := bindings.NewDisputeGameFactory(l1Deployments.DisputeGameFactoryProxy, l1Cl) require.NoError(t, err) return &L1Bindings{ @@ -309,9 +309,11 @@ type CrossLayerUser struct { lastL1DepositTxHash common.Hash lastL2WithdrawalTxHash common.Hash + + allocType config.AllocType } -func NewCrossLayerUser(log log.Logger, priv *ecdsa.PrivateKey, rng *rand.Rand) *CrossLayerUser { +func NewCrossLayerUser(log log.Logger, priv *ecdsa.PrivateKey, rng *rand.Rand, allocType config.AllocType) *CrossLayerUser { addr := crypto.PubkeyToAddress(priv.PublicKey) return &CrossLayerUser{ L1: L1User{ @@ -330,6 +332,7 @@ func NewCrossLayerUser(log log.Logger, priv *ecdsa.PrivateKey, rng *rand.Rand) * address: addr, }, }, + allocType: allocType, } } @@ -427,7 +430,7 @@ func (s *CrossLayerUser) getLatestWithdrawalParams(t Testing) (*withdrawals.Prov var l2OutputBlockNr *big.Int var l2OutputBlock *types.Block - if e2eutils.UseFaultProofs() { + if s.allocType.UsesProofs() { latestGame, err := withdrawals.FindLatestGame(t.Ctx(), &s.L1.env.Bindings.DisputeGameFactory.DisputeGameFactoryCaller, &s.L1.env.Bindings.OptimismPortal2.OptimismPortal2Caller) require.NoError(t, err) l2OutputBlockNr = new(big.Int).SetBytes(latestGame.ExtraData[0:32]) @@ -444,7 +447,7 @@ func (s *CrossLayerUser) getLatestWithdrawalParams(t Testing) (*withdrawals.Prov return nil, fmt.Errorf("the latest L2 output is %d and is not past L2 block %d that includes the withdrawal yet, no withdrawal can be proved yet", l2OutputBlock.NumberU64(), l2WithdrawalBlock.NumberU64()) } - if !e2eutils.UseFaultProofs() { + if !s.allocType.UsesProofs() { finalizationPeriod, err := s.L1.env.Bindings.L2OutputOracle.FINALIZATIONPERIODSECONDS(&bind.CallOpts{}) require.NoError(t, err) l1Head, err := s.L1.env.EthCl.HeaderByNumber(t.Ctx(), nil) @@ -457,7 +460,7 @@ func (s *CrossLayerUser) getLatestWithdrawalParams(t Testing) (*withdrawals.Prov header, err := s.L2.env.EthCl.HeaderByNumber(t.Ctx(), l2OutputBlockNr) require.NoError(t, err) - params, err := e2ehelpers.ProveWithdrawalParameters(t.Ctx(), s.L2.env.Bindings.ProofClient, s.L2.env.EthCl, s.L2.env.EthCl, s.lastL2WithdrawalTxHash, header, &s.L1.env.Bindings.L2OutputOracle.L2OutputOracleCaller, &s.L1.env.Bindings.DisputeGameFactory.DisputeGameFactoryCaller, &s.L1.env.Bindings.OptimismPortal2.OptimismPortal2Caller) + params, err := e2ehelpers.ProveWithdrawalParameters(t.Ctx(), s.L2.env.Bindings.ProofClient, s.L2.env.EthCl, s.L2.env.EthCl, s.lastL2WithdrawalTxHash, header, &s.L1.env.Bindings.L2OutputOracle.L2OutputOracleCaller, &s.L1.env.Bindings.DisputeGameFactory.DisputeGameFactoryCaller, &s.L1.env.Bindings.OptimismPortal2.OptimismPortal2Caller, s.allocType) require.NoError(t, err) return ¶ms, nil @@ -473,7 +476,7 @@ func (s *CrossLayerUser) getDisputeGame(t Testing, params withdrawals.ProvenWith Data: params.Data, } - portal2, err := bindingspreview.NewOptimismPortal2(config.L1Deployments.OptimismPortalProxy, s.L1.env.EthCl) + portal2, err := bindingspreview.NewOptimismPortal2(config.L1Deployments(s.allocType).OptimismPortalProxy, s.L1.env.EthCl) require.Nil(t, err) wdHash, err := wd.Hash() diff --git a/op-e2e/actions/helpers/user_test.go b/op-e2e/actions/helpers/user_test.go index 8ee60aa680e3..8990ed5fdd96 100644 --- a/op-e2e/actions/helpers/user_test.go +++ b/op-e2e/actions/helpers/user_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + bindingspreview "github.com/ethereum-optimism/optimism/op-node/bindings/preview" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common/hexutil" @@ -24,6 +26,7 @@ type hardforkScheduledTest struct { ecotoneTime *hexutil.Uint64 fjordTime *hexutil.Uint64 runToFork string + allocType config.AllocType } func (tc *hardforkScheduledTest) SetFork(fork string, v uint64) { @@ -51,6 +54,14 @@ func (tc *hardforkScheduledTest) fork(fork string) **hexutil.Uint64 { } } +func TestCrossLayerUser_Standard(t *testing.T) { + testCrossLayerUser(t, config.AllocTypeStandard) +} + +func TestCrossLayerUser_L2OO(t *testing.T) { + testCrossLayerUser(t, config.AllocTypeL2OO) +} + // TestCrossLayerUser tests that common actions of the CrossLayerUser actor work in various hardfork configurations: // - transact on L1 // - transact on L2 @@ -59,7 +70,7 @@ func (tc *hardforkScheduledTest) fork(fork string) **hexutil.Uint64 { // - prove tx on L1 // - wait 1 week + 1 second // - finalize withdrawal on L1 -func TestCrossLayerUser(t *testing.T) { +func testCrossLayerUser(t *testing.T, allocType config.AllocType) { futureTime := uint64(20) farFutureTime := uint64(2000) @@ -75,14 +86,18 @@ func TestCrossLayerUser(t *testing.T) { fork := fork t.Run("fork_"+fork, func(t *testing.T) { t.Run("at_genesis", func(t *testing.T) { - tc := hardforkScheduledTest{} + tc := hardforkScheduledTest{ + allocType: allocType, + } for _, f := range forks[:i+1] { // activate, all up to and incl this fork, at genesis tc.SetFork(f, 0) } runCrossLayerUserTest(t, tc) }) t.Run("after_genesis", func(t *testing.T) { - tc := hardforkScheduledTest{} + tc := hardforkScheduledTest{ + allocType: allocType, + } for _, f := range forks[:i] { // activate, all up to this fork, at genesis tc.SetFork(f, 0) } @@ -92,7 +107,9 @@ func TestCrossLayerUser(t *testing.T) { runCrossLayerUserTest(t, tc) }) t.Run("not_yet", func(t *testing.T) { - tc := hardforkScheduledTest{} + tc := hardforkScheduledTest{ + allocType: allocType, + } for _, f := range forks[:i] { // activate, all up to this fork, at genesis tc.SetFork(f, 0) } @@ -109,7 +126,9 @@ func TestCrossLayerUser(t *testing.T) { func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { t := NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams) + params := DefaultRollupTestParams() + params.AllocType = test.allocType + dp := e2eutils.MakeDeployParams(t, params) // This overwrites all deploy-config settings, // so even when the deploy-config defaults change, we test the right transitions. dp.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime @@ -136,7 +155,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { seq.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg)) var proposer *L2Proposer - if e2eutils.UseFaultProofs() { + if test.allocType.UsesProofs() { optimismPortal2Contract, err := bindingspreview.NewOptimismPortal2(sd.DeploymentsL1.OptimismPortalProxy, miner.EthClient()) require.NoError(t, err) respectedGameType, err := optimismPortal2Contract.RespectedGameType(&bind.CallOpts{}) @@ -148,6 +167,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { DisputeGameType: respectedGameType, ProposerKey: dp.Secrets.Proposer, AllowNonFinalized: true, + AllocType: test.allocType, }, miner.EthClient(), seq.RollupClient()) } else { proposer = NewL2Proposer(t, log, &ProposerCfg{ @@ -155,6 +175,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { ProposerKey: dp.Secrets.Proposer, ProposalRetryInterval: 3 * time.Second, AllowNonFinalized: true, + AllocType: test.allocType, }, miner.EthClient(), seq.RollupClient()) } @@ -171,7 +192,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { EthCl: l1Cl, Signer: types.LatestSigner(sd.L1Cfg.Config), AddressCorpora: addresses, - Bindings: NewL1Bindings(t, l1Cl), + Bindings: NewL1Bindings(t, l1Cl, test.allocType), } l2UserEnv := &BasicUserEnv[*L2Bindings]{ EthCl: l2Cl, @@ -180,7 +201,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { Bindings: NewL2Bindings(t, l2Cl, l2ProofCl), } - alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234))) + alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234)), test.allocType) alice.L1.SetUserEnv(l1UserEnv) alice.L2.SetUserEnv(l2UserEnv) @@ -288,7 +309,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { miner.ActL1EndBlock(t) // If using fault proofs we need to resolve the game - if e2eutils.UseFaultProofs() { + if test.allocType.UsesProofs() { // Resolve the root claim alice.ActResolveClaim(t) miner.ActL1StartBlock(12)(t) diff --git a/op-e2e/actions/helpers/utils.go b/op-e2e/actions/helpers/utils.go index f4f1b812cbaa..a4e3a65fc9b9 100644 --- a/op-e2e/actions/helpers/utils.go +++ b/op-e2e/actions/helpers/utils.go @@ -1,6 +1,7 @@ package helpers import ( + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-node/node/safedb" "github.com/ethereum-optimism/optimism/op-node/rollup/interop" @@ -10,11 +11,14 @@ import ( "github.com/ethereum/go-ethereum/p2p" ) -var DefaultRollupTestParams = &e2eutils.TestParams{ - MaxSequencerDrift: 40, - SequencerWindowSize: 120, - ChannelTimeout: 120, - L1BlockTime: 15, +func DefaultRollupTestParams() *e2eutils.TestParams { + return &e2eutils.TestParams{ + MaxSequencerDrift: 40, + SequencerWindowSize: 120, + ChannelTimeout: 120, + L1BlockTime: 15, + AllocType: config.DefaultAllocType, + } } var DefaultAlloc = &e2eutils.AllocParams{PrefundTestUsers: true} diff --git a/op-e2e/actions/interop/interop_test.go b/op-e2e/actions/interop/interop_test.go index 8badc474e944..57c77aaec0b0 100644 --- a/op-e2e/actions/interop/interop_test.go +++ b/op-e2e/actions/interop/interop_test.go @@ -20,7 +20,7 @@ var _ interop.InteropBackend = (*testutils.MockInteropBackend)(nil) func TestInteropVerifier(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) // Temporary work-around: interop needs to be active, for cross-safety to not be instant. // The state genesis in this test is pre-interop however. diff --git a/op-e2e/actions/proofs/helpers/env.go b/op-e2e/actions/proofs/helpers/env.go index ca670acb228d..de18c8cbce93 100644 --- a/op-e2e/actions/proofs/helpers/env.go +++ b/op-e2e/actions/proofs/helpers/env.go @@ -4,6 +4,8 @@ import ( "context" "math/rand" + e2ecfg "github.com/ethereum-optimism/optimism/op-e2e/config" + altda "github.com/ethereum-optimism/optimism/op-alt-da" batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" @@ -90,7 +92,7 @@ func NewL2FaultProofEnv[c any](t helpers.Testing, testCfg *TestCfg[c], tp *e2eut EthCl: l1EthCl, Signer: types.LatestSigner(sd.L1Cfg.Config), AddressCorpora: addresses, - Bindings: helpers.NewL1Bindings(t, l1EthCl), + Bindings: helpers.NewL1Bindings(t, l1EthCl, e2ecfg.AllocTypeStandard), } l2UserEnv := &helpers.BasicUserEnv[*helpers.L2Bindings]{ EthCl: l2EthCl, @@ -98,10 +100,10 @@ func NewL2FaultProofEnv[c any](t helpers.Testing, testCfg *TestCfg[c], tp *e2eut AddressCorpora: addresses, Bindings: helpers.NewL2Bindings(t, l2EthCl, engine.GethClient()), } - alice := helpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b))) + alice := helpers.NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)), e2ecfg.AllocTypeStandard) alice.L1.SetUserEnv(l1UserEnv) alice.L2.SetUserEnv(l2UserEnv) - bob := helpers.NewCrossLayerUser(log, dp.Secrets.Bob, rand.New(rand.NewSource(0xbeef))) + bob := helpers.NewCrossLayerUser(log, dp.Secrets.Bob, rand.New(rand.NewSource(0xbeef)), e2ecfg.AllocTypeStandard) bob.L1.SetUserEnv(l1UserEnv) bob.L2.SetUserEnv(l2UserEnv) @@ -204,7 +206,7 @@ func (env *L2FaultProofEnv) RunFaultProofProgram(t helpers.Testing, l2ClaimBlock type TestParam func(p *e2eutils.TestParams) func NewTestParams(params ...TestParam) *e2eutils.TestParams { - dfault := helpers.DefaultRollupTestParams + dfault := helpers.DefaultRollupTestParams() for _, apply := range params { apply(dfault) } diff --git a/op-e2e/actions/proposer/l2_proposer_test.go b/op-e2e/actions/proposer/l2_proposer_test.go index a75ece69b080..917fff2bafd5 100644 --- a/op-e2e/actions/proposer/l2_proposer_test.go +++ b/op-e2e/actions/proposer/l2_proposer_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" upgradesHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/upgrades/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -23,31 +25,27 @@ import ( // TestProposerBatchType run each proposer-related test case in singular batch mode and span batch mode. func TestProposerBatchType(t *testing.T) { - tests := []struct { - name string - f func(gt *testing.T, deltaTimeOffset *hexutil.Uint64) - }{ - {"RunProposerTest", RunProposerTest}, - } - for _, test := range tests { - test := test - t.Run(test.name+"_SingularBatch", func(t *testing.T) { - test.f(t, nil) - }) - } - - deltaTimeOffset := hexutil.Uint64(0) - for _, test := range tests { - test := test - t.Run(test.name+"_SpanBatch", func(t *testing.T) { - test.f(t, &deltaTimeOffset) - }) - } + t.Run("SingularBatch/Standard", func(t *testing.T) { + runProposerTest(t, nil, config.AllocTypeStandard) + }) + t.Run("SingularBatch/L2OO", func(t *testing.T) { + runProposerTest(t, nil, config.AllocTypeL2OO) + }) + t.Run("SpanBatch/Standard", func(t *testing.T) { + deltaTimeOffset := hexutil.Uint64(0) + runProposerTest(t, &deltaTimeOffset, config.AllocTypeStandard) + }) + t.Run("SpanBatch/L2OO", func(t *testing.T) { + deltaTimeOffset := hexutil.Uint64(0) + runProposerTest(t, &deltaTimeOffset, config.AllocTypeL2OO) + }) } -func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { +func runProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64, allocType config.AllocType) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + params := actionsHelpers.DefaultRollupTestParams() + params.AllocType = allocType + dp := e2eutils.MakeDeployParams(t, params) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -58,7 +56,7 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg)) var proposer *actionsHelpers.L2Proposer - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { optimismPortal2Contract, err := bindingspreview.NewOptimismPortal2(sd.DeploymentsL1.OptimismPortalProxy, miner.EthClient()) require.NoError(t, err) respectedGameType, err := optimismPortal2Contract.RespectedGameType(&bind.CallOpts{}) @@ -70,6 +68,7 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { DisputeGameType: respectedGameType, ProposerKey: dp.Secrets.Proposer, AllowNonFinalized: true, + AllocType: allocType, }, miner.EthClient(), rollupSeqCl) } else { proposer = actionsHelpers.NewL2Proposer(t, log, &actionsHelpers.ProposerCfg{ @@ -77,6 +76,7 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ProposerKey: dp.Secrets.Proposer, ProposalRetryInterval: 3 * time.Second, AllowNonFinalized: false, + AllocType: allocType, }, miner.EthClient(), rollupSeqCl) } @@ -118,7 +118,7 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { } // check that L1 stored the expected output root - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { optimismPortal2Contract, err := bindingspreview.NewOptimismPortal2(sd.DeploymentsL1.OptimismPortalProxy, miner.EthClient()) require.NoError(t, err) respectedGameType, err := optimismPortal2Contract.RespectedGameType(&bind.CallOpts{}) diff --git a/op-e2e/actions/safedb/safedb_test.go b/op-e2e/actions/safedb/safedb_test.go index 369825e46263..f4a2a1767a1a 100644 --- a/op-e2e/actions/safedb/safedb_test.go +++ b/op-e2e/actions/safedb/safedb_test.go @@ -14,7 +14,7 @@ import ( func TestRecordSafeHeadUpdates(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - sd, miner, sequencer, verifier, verifierEng, batcher := helpers.SetupSafeDBTest(t, actionsHelpers.DefaultRollupTestParams) + sd, miner, sequencer, verifier, verifierEng, batcher := helpers.SetupSafeDBTest(t, actionsHelpers.DefaultRollupTestParams()) verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg) sequencer.ActL2PipelineFull(t) diff --git a/op-e2e/actions/sequencer/l2_sequencer_test.go b/op-e2e/actions/sequencer/l2_sequencer_test.go index 5192c25d7afd..bd8b0a40770a 100644 --- a/op-e2e/actions/sequencer/l2_sequencer_test.go +++ b/op-e2e/actions/sequencer/l2_sequencer_test.go @@ -4,6 +4,8 @@ import ( "math/big" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -23,6 +25,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) @@ -92,7 +95,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { // while the verifier-codepath only ever sees the valid post-reorg L1 chain. func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) miner, _, sequencer := helpers.SetupSequencerTest(t, sd, log) diff --git a/op-e2e/actions/sync/sync_test.go b/op-e2e/actions/sync/sync_test.go index 523b68517afb..af6d40408dda 100644 --- a/op-e2e/actions/sync/sync_test.go +++ b/op-e2e/actions/sync/sync_test.go @@ -67,7 +67,7 @@ func TestSyncBatchType(t *testing.T) { func DerivationWithFlakyL1RPC(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelError) // mute all the temporary derivation errors that we forcefully create @@ -107,7 +107,7 @@ func DerivationWithFlakyL1RPC(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { func FinalizeWhileSyncing(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) upgradesHelpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelError) // mute all the temporary derivation errors that we forcefully create @@ -153,7 +153,7 @@ func FinalizeWhileSyncing(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { // TestUnsafeSync tests that a verifier properly imports unsafe blocks via gossip. func TestUnsafeSync(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelInfo) @@ -181,7 +181,7 @@ func TestUnsafeSync(gt *testing.T) { func TestBackupUnsafe(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) minTs := hexutil.Uint64(0) // Activate Delta hardfork upgradesHelpers.ApplyDeltaTimeOffset(dp, &minTs) @@ -342,7 +342,7 @@ func TestBackupUnsafe(gt *testing.T) { func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) minTs := hexutil.Uint64(0) // Activate Delta hardfork upgradesHelpers.ApplyDeltaTimeOffset(dp, &minTs) @@ -475,7 +475,7 @@ func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) { func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) minTs := hexutil.Uint64(0) // Activate Delta hardfork upgradesHelpers.ApplyDeltaTimeOffset(dp, &minTs) @@ -694,7 +694,7 @@ func BatchSubmitBlock(t actionsHelpers.Testing, miner *actionsHelpers.L1Miner, s // when passed a single unsafe block. op-geth can either snap sync or full sync here. func TestELSync(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) log := testlog.Logger(t, log.LevelInfo) @@ -747,7 +747,7 @@ func PrepareELSyncedNode(t actionsHelpers.Testing, miner *actionsHelpers.L1Miner // 8. Create 1 more block & batch submit everything & assert that the verifier picked up those blocks func TestELSyncTransitionstoCL(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) logger := testlog.Logger(t, log.LevelInfo) @@ -804,7 +804,7 @@ func TestELSyncTransitionstoCL(gt *testing.T) { func TestELSyncTransitionsToCLSyncAfterNodeRestart(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) logger := testlog.Logger(t, log.LevelInfo) @@ -846,7 +846,7 @@ func TestELSyncTransitionsToCLSyncAfterNodeRestart(gt *testing.T) { func TestForcedELSyncCLAfterNodeRestart(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, actionsHelpers.DefaultAlloc) logger := testlog.Logger(t, log.LevelInfo) @@ -892,7 +892,7 @@ func TestForcedELSyncCLAfterNodeRestart(gt *testing.T) { func TestInvalidPayloadInSpanBatch(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) minTs := hexutil.Uint64(0) // Activate Delta hardfork upgradesHelpers.ApplyDeltaTimeOffset(dp, &minTs) @@ -997,7 +997,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) { func TestSpanBatchAtomicity_Consolidation(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) minTs := hexutil.Uint64(0) // Activate Delta hardfork upgradesHelpers.ApplyDeltaTimeOffset(dp, &minTs) @@ -1065,7 +1065,7 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) { func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) { t := actionsHelpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, actionsHelpers.DefaultRollupTestParams()) minTs := hexutil.Uint64(0) // Activate Delta hardfork upgradesHelpers.ApplyDeltaTimeOffset(dp, &minTs) diff --git a/op-e2e/actions/upgrades/dencun_fork_test.go b/op-e2e/actions/upgrades/dencun_fork_test.go index a9e3eb2cc256..b15634c78adf 100644 --- a/op-e2e/actions/upgrades/dencun_fork_test.go +++ b/op-e2e/actions/upgrades/dencun_fork_test.go @@ -19,7 +19,7 @@ import ( func TestDencunL1ForkAfterGenesis(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) offset := hexutil.Uint64(24) dp.DeployConfig.L1CancunTimeOffset = &offset sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) @@ -62,7 +62,7 @@ func TestDencunL1ForkAfterGenesis(gt *testing.T) { func TestDencunL1ForkAtGenesis(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) require.Zero(t, *dp.DeployConfig.L1CancunTimeOffset) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -119,7 +119,7 @@ func verifyEcotoneBlock(gt *testing.T, header *types.Header) { func TestDencunL2ForkAfterGenesis(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) require.Zero(t, *dp.DeployConfig.L1CancunTimeOffset) // This test wil fork on the second block offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime * 2) @@ -157,7 +157,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) { func TestDencunL2ForkAtGenesis(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) require.Zero(t, *dp.DeployConfig.L2GenesisEcotoneTimeOffset) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) @@ -195,7 +195,7 @@ func newEngine(t helpers.Testing, sd *e2eutils.SetupData, log log.Logger) *helpe // TestDencunBlobTxRPC tries to send a Blob tx to the L2 engine via RPC, it should not be accepted. func TestDencunBlobTxRPC(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -209,7 +209,7 @@ func TestDencunBlobTxRPC(gt *testing.T) { // TestDencunBlobTxInTxPool tries to insert a blob tx directly into the tx pool, it should not be accepted. func TestDencunBlobTxInTxPool(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) @@ -222,7 +222,7 @@ func TestDencunBlobTxInTxPool(gt *testing.T) { // TestDencunBlobTxInclusion tries to send a Blob tx to the L2 engine, it should not be accepted. func TestDencunBlobTxInclusion(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) diff --git a/op-e2e/actions/upgrades/ecotone_fork_test.go b/op-e2e/actions/upgrades/ecotone_fork_test.go index c4135266e162..6b51b5b470a4 100644 --- a/op-e2e/actions/upgrades/ecotone_fork_test.go +++ b/op-e2e/actions/upgrades/ecotone_fork_test.go @@ -42,7 +42,7 @@ func verifyCodeHashMatches(t helpers.Testing, client *ethclient.Client, address func TestEcotoneNetworkUpgradeTransactions(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) ecotoneOffset := hexutil.Uint64(4) log := testlog.Logger(t, log.LevelDebug) @@ -240,7 +240,7 @@ func TestEcotoneNetworkUpgradeTransactions(gt *testing.T) { // TestEcotoneBeforeL1 tests that the L2 Ecotone fork can activate before L1 Dencun does func TestEcotoneBeforeL1(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) offset := hexutil.Uint64(0) farOffset := hexutil.Uint64(10000) dp.DeployConfig.L2GenesisRegolithTimeOffset = &offset diff --git a/op-e2e/actions/upgrades/fjord_fork_test.go b/op-e2e/actions/upgrades/fjord_fork_test.go index 9444fcfcb7ae..564ee49aa17d 100644 --- a/op-e2e/actions/upgrades/fjord_fork_test.go +++ b/op-e2e/actions/upgrades/fjord_fork_test.go @@ -31,7 +31,7 @@ var ( func TestFjordNetworkUpgradeTransactions(gt *testing.T) { t := helpers.NewDefaultTesting(gt) - dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams) + dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) genesisBlock := hexutil.Uint64(0) fjordOffset := hexutil.Uint64(2) diff --git a/op-e2e/actions/upgrades/span_batch_test.go b/op-e2e/actions/upgrades/span_batch_test.go index fc1707b158a0..3888cae8a5e0 100644 --- a/op-e2e/actions/upgrades/span_batch_test.go +++ b/op-e2e/actions/upgrades/span_batch_test.go @@ -6,6 +6,8 @@ import ( crand "crypto/rand" "fmt" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "math/big" "math/rand" "testing" @@ -39,6 +41,7 @@ func TestDropSpanBatchBeforeHardfork(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) // do not activate Delta hardfork for verifier @@ -128,6 +131,7 @@ func TestHardforkMiddleOfSpanBatch(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) @@ -241,6 +245,7 @@ func TestAcceptSingularBatchAfterHardfork(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } minTs := hexutil.Uint64(0) dp := e2eutils.MakeDeployParams(t, p) @@ -327,6 +332,7 @@ func TestMixOfBatchesAfterHardfork(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } minTs := hexutil.Uint64(0) dp := e2eutils.MakeDeployParams(t, p) @@ -418,6 +424,7 @@ func TestSpanBatchEmptyChain(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) minTs := hexutil.Uint64(0) @@ -481,6 +488,7 @@ func TestSpanBatchLowThroughputChain(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } dp := e2eutils.MakeDeployParams(t, p) minTs := hexutil.Uint64(0) @@ -595,6 +603,7 @@ func TestBatchEquivalence(gt *testing.T) { SequencerWindowSize: 24, ChannelTimeout: 20, L1BlockTime: 12, + AllocType: config.AllocTypeStandard, } // Delta activated deploy config dp := e2eutils.MakeDeployParams(t, p) diff --git a/op-e2e/config/init.go b/op-e2e/config/init.go index 526c7a96b389..6fe29126996a 100644 --- a/op-e2e/config/init.go +++ b/op-e2e/config/init.go @@ -1,14 +1,11 @@ package config import ( - "encoding/json" - "errors" - "flag" "fmt" "log/slog" "os" "path/filepath" - "testing" + "slices" "time" "github.com/ethereum/go-ethereum/common/hexutil" @@ -16,7 +13,6 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" - "github.com/ethereum-optimism/optimism/op-e2e/external" op_service "github.com/ethereum-optimism/optimism/op-service" oplog "github.com/ethereum-optimism/optimism/op-service/log" ) @@ -32,6 +28,35 @@ const ( LegacyLevelTrace ) +type AllocType string + +const ( + AllocTypeStandard AllocType = "standard" + AllocTypeAltDA AllocType = "alt-da" + AllocTypeL2OO AllocType = "l2oo" + AllocTypeMTCannon AllocType = "mt-cannon" + + DefaultAllocType = AllocTypeStandard +) + +func (a AllocType) Check() error { + if !slices.Contains(allocTypes, a) { + return fmt.Errorf("unknown alloc type: %q", a) + } + return nil +} + +func (a AllocType) UsesProofs() bool { + switch a { + case AllocTypeStandard, AllocTypeMTCannon, AllocTypeAltDA: + return true + default: + return false + } +} + +var allocTypes = []AllocType{AllocTypeStandard, AllocTypeAltDA, AllocTypeL2OO, AllocTypeMTCannon} + var ( // All of the following variables are set in the init function // and read from JSON files on disk that are generated by the @@ -39,27 +64,56 @@ var ( // in end to end tests. // L1Allocs represents the L1 genesis block state. - L1Allocs *foundry.ForgeAllocs + l1AllocsByType = make(map[AllocType]*foundry.ForgeAllocs) // L1Deployments maps contract names to accounts in the L1 // genesis block state. - L1Deployments *genesis.L1Deployments + l1DeploymentsByType = make(map[AllocType]*genesis.L1Deployments) // l2Allocs represents the L2 allocs, by hardfork/mode (e.g. delta, ecotone, interop, other) - l2Allocs map[genesis.L2AllocsMode]*foundry.ForgeAllocs + l2AllocsByType = make(map[AllocType]genesis.L2AllocsModeMap) // DeployConfig represents the deploy config used by the system. - DeployConfig *genesis.DeployConfig - // ExternalL2Shim is the shim to use if external ethereum client testing is - // enabled - ExternalL2Shim string - // ExternalL2TestParms is additional metadata for executing external L2 - // tests. - ExternalL2TestParms external.TestParms + deployConfigsByType = make(map[AllocType]*genesis.DeployConfig) // EthNodeVerbosity is the (legacy geth) level of verbosity to output EthNodeVerbosity int ) -func init() { - var l1AllocsPath, l2AllocsDir, l1DeploymentsPath, deployConfigPath, externalL2 string +func L1Allocs(allocType AllocType) *foundry.ForgeAllocs { + allocs, ok := l1AllocsByType[allocType] + if !ok { + panic(fmt.Errorf("unknown L1 alloc type: %q", allocType)) + } + return allocs.Copy() +} +func L1Deployments(allocType AllocType) *genesis.L1Deployments { + deployments, ok := l1DeploymentsByType[allocType] + if !ok { + panic(fmt.Errorf("unknown L1 deployments type: %q", allocType)) + } + return deployments.Copy() +} + +func L2Allocs(allocType AllocType, mode genesis.L2AllocsMode) *foundry.ForgeAllocs { + allocsByType, ok := l2AllocsByType[allocType] + if !ok { + panic(fmt.Errorf("unknown L2 alloc type: %q", allocType)) + } + + allocs, ok := allocsByType[mode] + if !ok { + panic(fmt.Errorf("unknown L2 allocs mode: %q", mode)) + } + return allocs.Copy() +} + +func DeployConfig(allocType AllocType) *genesis.DeployConfig { + dc, ok := deployConfigsByType[allocType] + if !ok { + panic(fmt.Errorf("unknown deploy config type: %q", allocType)) + } + return dc.Copy() +} + +func init() { cwd, err := os.Getwd() if err != nil { panic(err) @@ -69,19 +123,9 @@ func init() { panic(err) } - defaultL1AllocsPath := filepath.Join(root, ".devnet", "allocs-l1.json") - defaultL2AllocsDir := filepath.Join(root, ".devnet") - defaultL1DeploymentsPath := filepath.Join(root, ".devnet", "addresses.json") - defaultDeployConfigPath := filepath.Join(root, "packages", "contracts-bedrock", "deploy-config", "devnetL1.json") - - flag.StringVar(&l1AllocsPath, "l1-allocs", defaultL1AllocsPath, "") - flag.StringVar(&l2AllocsDir, "l2-allocs-dir", defaultL2AllocsDir, "") - flag.StringVar(&l1DeploymentsPath, "l1-deployments", defaultL1DeploymentsPath, "") - flag.StringVar(&deployConfigPath, "deploy-config", defaultDeployConfigPath, "") - flag.StringVar(&externalL2, "externalL2", "", "Enable tests with external L2") - flag.IntVar(&EthNodeVerbosity, "ethLogVerbosity", LegacyLevelInfo, "The (legacy geth) level of verbosity to use for the eth node logs") - testing.Init() // Register test flags before parsing - flag.Parse() + for _, allocType := range allocTypes { + initAllocType(root, allocType) + } // Setup global logger lvl := log.FromLegacyLevel(EthNodeVerbosity) @@ -102,100 +146,81 @@ func init() { }) } oplog.SetGlobalLogHandler(handler) +} - if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil { +func initAllocType(root string, allocType AllocType) { + devnetDir := filepath.Join(root, fmt.Sprintf(".devnet-%s", allocType)) + l1AllocsPath := filepath.Join(devnetDir, "allocs-l1.json") + l2AllocsDir := devnetDir + l1DeploymentsPath := filepath.Join(devnetDir, "addresses.json") + deployConfigPath := filepath.Join(root, "packages", "contracts-bedrock", "deploy-config", "devnetL1.json") + + var missing bool + for _, fp := range []string{devnetDir, l1AllocsPath, l1DeploymentsPath} { + _, err := os.Stat(fp) + if os.IsNotExist(err) { + missing = true + break + } + if err != nil { + panic(err) + } + } + if missing { + log.Warn("allocs file not found, skipping", "allocType", allocType) return } - L1Allocs, err = foundry.LoadForgeAllocs(l1AllocsPath) + l1Allocs, err := foundry.LoadForgeAllocs(l1AllocsPath) if err != nil { panic(err) } - l2Allocs = make(map[genesis.L2AllocsMode]*foundry.ForgeAllocs) + l1AllocsByType[allocType] = l1Allocs + l2Alloc := make(map[genesis.L2AllocsMode]*foundry.ForgeAllocs) mustL2Allocs := func(mode genesis.L2AllocsMode) { name := "allocs-l2-" + string(mode) allocs, err := foundry.LoadForgeAllocs(filepath.Join(l2AllocsDir, name+".json")) if err != nil { panic(err) } - l2Allocs[mode] = allocs + l2Alloc[mode] = allocs } mustL2Allocs(genesis.L2AllocsGranite) mustL2Allocs(genesis.L2AllocsFjord) mustL2Allocs(genesis.L2AllocsEcotone) mustL2Allocs(genesis.L2AllocsDelta) - L1Deployments, err = genesis.NewL1Deployments(l1DeploymentsPath) + l2AllocsByType[allocType] = l2Alloc + l1Deployments, err := genesis.NewL1Deployments(l1DeploymentsPath) if err != nil { panic(err) } - DeployConfig, err = genesis.NewDeployConfig(deployConfigPath) + l1DeploymentsByType[allocType] = l1Deployments + dc, err := genesis.NewDeployConfig(deployConfigPath) if err != nil { panic(err) } // Do not use clique in the in memory tests. Otherwise block building // would be much more complex. - DeployConfig.L1UseClique = false + dc.L1UseClique = false // Set the L1 genesis block timestamp to now - DeployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) - DeployConfig.FundDevAccounts = true + dc.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) + dc.FundDevAccounts = true // Speed up the in memory tests - DeployConfig.L1BlockTime = 2 - DeployConfig.L2BlockTime = 1 - - if L1Deployments != nil { - DeployConfig.SetDeployments(L1Deployments) - } - - if externalL2 != "" { - if err := initExternalL2(externalL2); err != nil { - panic(fmt.Errorf("could not initialize external L2: %w", err)) - } - } -} - -func L2Allocs(mode genesis.L2AllocsMode) *foundry.ForgeAllocs { - allocs, ok := l2Allocs[mode] - if !ok { - panic(fmt.Errorf("unknown L2 allocs mode: %q", mode)) - } - return allocs.Copy() + dc.L1BlockTime = 2 + dc.L2BlockTime = 1 + dc.SetDeployments(l1Deployments) + deployConfigsByType[allocType] = dc } -func initExternalL2(externalL2 string) error { - var err error - ExternalL2Shim, err = filepath.Abs(filepath.Join(externalL2, "shim")) - if err != nil { - return fmt.Errorf("could not compute abs of externalL2Nodes shim: %w", err) - } - - _, err = os.Stat(ExternalL2Shim) - if err != nil { - return fmt.Errorf("failed to stat externalL2Nodes path: %w", err) - } - - file, err := os.Open(filepath.Join(externalL2, "test_parms.json")) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("could not open external L2 test parms: %w", err) - } - defer file.Close() - - if err := json.NewDecoder(file).Decode(&ExternalL2TestParms); err != nil { - return fmt.Errorf("could not decode external L2 test parms: %w", err) +func AllocTypeFromEnv() AllocType { + allocType := os.Getenv("OP_E2E_ALLOC_TYPE") + if allocType == "" { + return DefaultAllocType } - - return nil -} - -func allExist(filenames ...string) error { - for _, filename := range filenames { - if _, err := os.Stat(filename); err != nil { - fmt.Printf("file %s does not exist, skipping genesis generation\n", filename) - return err - } + out := AllocType(allocType) + if err := out.Check(); err != nil { + panic(err) } - return nil + return out } diff --git a/op-e2e/devnet/devnet_test.go b/op-e2e/devnet/devnet_test.go deleted file mode 100644 index 8bf7bd7665da..000000000000 --- a/op-e2e/devnet/devnet_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package devnet - -import ( - "context" - "log/slog" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-e2e/system/bridge" - "github.com/ethereum-optimism/optimism/op-service/testlog" -) - -func TestDevnet(t *testing.T) { - lgr := testlog.Logger(t, slog.LevelDebug) - ctx, done := context.WithTimeout(context.Background(), time.Minute) - defer done() - - sys, err := NewSystem(ctx, lgr) - require.NoError(t, err) - - t.Run("SyncFinalized", func(t *testing.T) { - // SyncFinalized can run in parallel to Withdrawals test, because propopser - // already posts unfinalized output roots in devnet mode. - t.Parallel() - testSyncFinalized(t, sys) - }) - t.Run("Withdrawal", func(t *testing.T) { - t.Parallel() - bridge.RunWithdrawalsTest(t, sys) - }) -} - -func testSyncFinalized(t *testing.T, sys *System) { - const timeout = 4 * time.Minute - ctx, done := context.WithTimeout(context.Background(), timeout) - defer done() - - require.EventuallyWithT(t, func(tc *assert.CollectT) { - ss, err := sys.Rollup.SyncStatus(ctx) - assert.NoError(tc, err) - if err != nil { - t.Log(err) - return - } - t.Logf("SyncStatus: %+v", ss) - assert.NotZero(tc, ss.FinalizedL2.Number) - }, timeout, 2*time.Second) -} diff --git a/op-e2e/devnet/setup.go b/op-e2e/devnet/setup.go deleted file mode 100644 index 557caed0388b..000000000000 --- a/op-e2e/devnet/setup.go +++ /dev/null @@ -1,105 +0,0 @@ -package devnet - -import ( - "context" - "crypto/ecdsa" - "os" - "path/filepath" - - "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" - - "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" - "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" - op_service "github.com/ethereum-optimism/optimism/op-service" - "github.com/ethereum-optimism/optimism/op-service/dial" - "github.com/ethereum-optimism/optimism/op-service/sources" - - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" -) - -// TODO(#10968): read from docker-compose.yml -const ( - L1RPCURL = "http://127.0.0.1:8545" - L2RPCURL = "http://127.0.0.1:9545" - RollupURL = "http://127.0.0.1:7545" -) - -type System struct { - L1 *ethclient.Client - L2 *ethclient.Client - Rollup *sources.RollupClient - Cfg e2esys.SystemConfig -} - -func NewSystem(ctx context.Context, lgr log.Logger) (sys *System, err error) { - sys = new(System) - sys.L1, err = dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, lgr, L1RPCURL) - if err != nil { - return nil, err - } - sys.L2, err = dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, lgr, L2RPCURL) - if err != nil { - return nil, err - } - sys.Rollup, err = dial.DialRollupClientWithTimeout(ctx, dial.DefaultDialTimeout, lgr, RollupURL) - if err != nil { - return nil, err - } - - secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() - if err != nil { - return nil, err - } - - // TODO(#10968): We need to re-read the deploy config because op-e2e/config.init() overwrites - // some deploy config variables. This will be fixed soon. - cwd, err := os.Getwd() - if err != nil { - return nil, err - } - root, err := op_service.FindMonorepoRoot(cwd) - if err != nil { - return nil, err - } - deployConfigPath := filepath.Join(root, "packages", "contracts-bedrock", "deploy-config", "devnetL1.json") - deployConfig, err := genesis.NewDeployConfig(deployConfigPath) - if err != nil { - return nil, err - } - - // Incomplete SystemConfig suffices for withdrawal test (only consumer right now) - sys.Cfg = e2esys.SystemConfig{ - DeployConfig: deployConfig, - L1Deployments: config.L1Deployments.Copy(), - Secrets: secrets, - } - return sys, nil -} - -func (s System) NodeClient(role string) *ethclient.Client { - switch role { - case e2esys.RoleL1: - return s.L1 - case e2esys.RoleSeq, e2esys.RoleVerif: - // we have only one L2 node - return s.L2 - default: - panic("devnet.System: unknown role: " + role) - } -} - -func (s System) RollupClient(string) *sources.RollupClient { - // we ignore role, have only one L2 rollup - return s.Rollup -} - -func (s System) Config() e2esys.SystemConfig { - return s.Cfg -} - -func (s System) TestAccount(idx int) *ecdsa.PrivateKey { - // first 12 indices are in use by the devnet - return s.Cfg.Secrets.AccountAtIdx(13 + idx) -} diff --git a/op-e2e/e2e.go b/op-e2e/e2e.go index ebcf5750e381..54533cf4fef9 100644 --- a/op-e2e/e2e.go +++ b/op-e2e/e2e.go @@ -2,30 +2,15 @@ package op_e2e import ( "crypto/md5" - "fmt" "os" - "runtime" "strconv" "strings" "testing" - "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" ) func RunMain(m *testing.M) { - if config.ExternalL2Shim != "" { - fmt.Println("Running tests with external L2 process adapter at ", config.ExternalL2Shim) - // As these are integration tests which launch many other processes, the - // default parallelism makes the tests flaky. This change aims to - // reduce the flakiness of these tests. - maxProcs := runtime.NumCPU() / 4 - if maxProcs == 0 { - maxProcs = 1 - } - runtime.GOMAXPROCS(maxProcs) - } - os.Exit(m.Run()) } @@ -67,18 +52,6 @@ func UsesCannon(t e2eutils.TestingBase) { } } -func SkipOnFaultProofs(t e2eutils.TestingBase) { - if e2eutils.UseFaultProofs() { - t.Skip("Skipping test for fault proofs") - } -} - -func SkipOnL2OO(t e2eutils.TestingBase) { - if e2eutils.UseL2OO() { - t.Skip("Skipping test for L2OO") - } -} - type executorInfo struct { total uint64 idx uint64 diff --git a/op-e2e/e2eutils/addresses_test.go b/op-e2e/e2eutils/addresses_test.go index c140a411937a..d45e9dbfc4d0 100644 --- a/op-e2e/e2eutils/addresses_test.go +++ b/op-e2e/e2eutils/addresses_test.go @@ -3,6 +3,8 @@ package e2eutils import ( "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/stretchr/testify/require" ) @@ -12,6 +14,7 @@ func TestCollectAddresses(t *testing.T) { SequencerWindowSize: 120, ChannelTimeout: 120, L1BlockTime: 15, + AllocType: config.AllocTypeStandard, } dp := MakeDeployParams(t, tp) alloc := &AllocParams{PrefundTestUsers: true} diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index d8fb223507ac..2d72e53ecc62 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -11,6 +11,8 @@ import ( "testing" "time" + e2econfig "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-service/crypto" "github.com/ethereum/go-ethereum/ethclient" @@ -23,7 +25,6 @@ import ( challenger "github.com/ethereum-optimism/optimism/op-challenger" "github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/cliapp" @@ -115,12 +116,12 @@ func FindMonorepoRoot(t *testing.T) string { return "" } -func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) { +func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis, allocType e2econfig.AllocType) { require := require.New(t) root := FindMonorepoRoot(t) c.Cannon.VmBin = root + "cannon/bin/cannon" c.Cannon.Server = root + "op-program/bin/op-program" - if e2eutils.UseMTCannon() { + if allocType == e2econfig.AllocTypeMTCannon { t.Log("Using MT-Cannon absolute prestate") c.CannonAbsolutePreState = root + "op-program/bin/prestate-mt.bin.gz" } else { @@ -141,17 +142,17 @@ func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, c.Cannon.RollupConfigPath = rollupFile } -func WithCannon(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) Option { +func WithCannon(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis, allocType e2econfig.AllocType) Option { return func(c *config.Config) { c.TraceTypes = append(c.TraceTypes, types.TraceTypeCannon) - applyCannonConfig(c, t, rollupCfg, l2Genesis) + applyCannonConfig(c, t, rollupCfg, l2Genesis, allocType) } } -func WithPermissioned(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) Option { +func WithPermissioned(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis, allocType e2econfig.AllocType) Option { return func(c *config.Config) { c.TraceTypes = append(c.TraceTypes, types.TraceTypePermissioned) - applyCannonConfig(c, t, rollupCfg, l2Genesis) + applyCannonConfig(c, t, rollupCfg, l2Genesis, allocType) } } diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index 7651d0941d67..536c0fb8e907 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" @@ -94,6 +96,7 @@ type FactoryHelper struct { PrivKey *ecdsa.PrivateKey FactoryAddr common.Address Factory *bindings.DisputeGameFactory + AllocType config.AllocType } type FactoryCfg struct { @@ -113,6 +116,10 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, system DisputeSystem, o client := system.NodeClient("l1") chainID, err := client.ChainID(ctx) require.NoError(err) + + allocType := config.AllocTypeFromEnv() + require.True(allocType.UsesProofs(), "AllocType %v does not support proofs", allocType) + factoryCfg := &FactoryCfg{PrivKey: TestKey} for _, opt := range opts { opt(factoryCfg) @@ -134,6 +141,7 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, system DisputeSystem, o PrivKey: factoryCfg.PrivKey, Factory: factory, FactoryAddr: factoryAddr, + AllocType: allocType, } } @@ -208,7 +216,7 @@ func (h *FactoryHelper) startOutputCannonGameOfType(ctx context.Context, l2Node provider := outputs.NewTraceProvider(logger, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) return &OutputCannonGameHelper{ - OutputGameHelper: *NewOutputGameHelper(h.T, h.Require, h.Client, h.Opts, h.PrivKey, game, h.FactoryAddr, createdEvent.DisputeProxy, provider, h.System), + OutputGameHelper: *NewOutputGameHelper(h.T, h.Require, h.Client, h.Opts, h.PrivKey, game, h.FactoryAddr, createdEvent.DisputeProxy, provider, h.System, h.AllocType), } } @@ -262,7 +270,7 @@ func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node stri provider := outputs.NewTraceProvider(logger, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) return &OutputAlphabetGameHelper{ - OutputGameHelper: *NewOutputGameHelper(h.T, h.Require, h.Client, h.Opts, h.PrivKey, game, h.FactoryAddr, createdEvent.DisputeProxy, provider, h.System), + OutputGameHelper: *NewOutputGameHelper(h.T, h.Require, h.Client, h.Opts, h.PrivKey, game, h.FactoryAddr, createdEvent.DisputeProxy, provider, h.System, h.AllocType), } } diff --git a/op-e2e/e2eutils/disputegame/output_cannon_helper.go b/op-e2e/e2eutils/disputegame/output_cannon_helper.go index edd3125dd766..5b0f923b0dc2 100644 --- a/op-e2e/e2eutils/disputegame/output_cannon_helper.go +++ b/op-e2e/e2eutils/disputegame/output_cannon_helper.go @@ -35,7 +35,7 @@ type OutputCannonGameHelper struct { func (g *OutputCannonGameHelper) StartChallenger(ctx context.Context, name string, options ...challenger.Option) *challenger.Helper { opts := []challenger.Option{ - challenger.WithCannon(g.T, g.System.RollupCfg(), g.System.L2Genesis()), + challenger.WithCannon(g.T, g.System.RollupCfg(), g.System.L2Genesis(), g.AllocType), challenger.WithFactoryAddress(g.FactoryAddr), challenger.WithGameAddress(g.Addr), } @@ -331,7 +331,7 @@ func (g *OutputCannonGameHelper) createCannonTraceProvider(ctx context.Context, func (g *OutputCannonGameHelper) defaultChallengerOptions() []challenger.Option { return []challenger.Option{ - challenger.WithCannon(g.T, g.System.RollupCfg(), g.System.L2Genesis()), + challenger.WithCannon(g.T, g.System.RollupCfg(), g.System.L2Genesis(), g.AllocType), challenger.WithFactoryAddress(g.FactoryAddr), challenger.WithGameAddress(g.Addr), } diff --git a/op-e2e/e2eutils/disputegame/output_game_helper.go b/op-e2e/e2eutils/disputegame/output_game_helper.go index b1d18392b258..3914b3399b14 100644 --- a/op-e2e/e2eutils/disputegame/output_game_helper.go +++ b/op-e2e/e2eutils/disputegame/output_game_helper.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/preimages" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/outputs" @@ -40,10 +42,11 @@ type OutputGameHelper struct { Addr common.Address CorrectOutputProvider *outputs.OutputTraceProvider System DisputeSystem + AllocType config.AllocType } func NewOutputGameHelper(t *testing.T, require *require.Assertions, client *ethclient.Client, opts *bind.TransactOpts, privKey *ecdsa.PrivateKey, - game contracts.FaultDisputeGameContract, factoryAddr common.Address, addr common.Address, correctOutputProvider *outputs.OutputTraceProvider, system DisputeSystem) *OutputGameHelper { + game contracts.FaultDisputeGameContract, factoryAddr common.Address, addr common.Address, correctOutputProvider *outputs.OutputTraceProvider, system DisputeSystem, allocType config.AllocType) *OutputGameHelper { return &OutputGameHelper{ T: t, Require: require, @@ -55,6 +58,7 @@ func NewOutputGameHelper(t *testing.T, require *require.Assertions, client *ethc Addr: addr, CorrectOutputProvider: correctOutputProvider, System: system, + AllocType: allocType, } } diff --git a/op-e2e/e2eutils/setup.go b/op-e2e/e2eutils/setup.go index 52dd6ec2d3ae..57c7c845672d 100644 --- a/op-e2e/e2eutils/setup.go +++ b/op-e2e/e2eutils/setup.go @@ -39,6 +39,7 @@ type DeployParams struct { MnemonicConfig *MnemonicConfig Secrets *Secrets Addresses *Addresses + AllocType config.AllocType } // TestParams parametrizes the most essential rollup configuration parameters @@ -48,6 +49,7 @@ type TestParams struct { ChannelTimeout uint64 L1BlockTime uint64 UseAltDA bool + AllocType config.AllocType } func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { @@ -56,7 +58,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { require.NoError(t, err) addresses := secrets.Addresses() - deployConfig := config.DeployConfig.Copy() + deployConfig := config.DeployConfig(tp.AllocType) deployConfig.MaxSequencerDrift = tp.MaxSequencerDrift deployConfig.SequencerWindowSize = tp.SequencerWindowSize deployConfig.ChannelTimeoutBedrock = tp.ChannelTimeout @@ -75,6 +77,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { MnemonicConfig: mnemonicCfg, Secrets: secrets, Addresses: addresses, + AllocType: tp.AllocType, } } @@ -110,10 +113,14 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * logger := log.NewLogger(log.DiscardHandler()) require.NoError(t, deployConf.Check(logger)) - l1Deployments := config.L1Deployments.Copy() + l1Deployments := config.L1Deployments(deployParams.AllocType) require.NoError(t, l1Deployments.Check(deployConf)) - l1Genesis, err := genesis.BuildL1DeveloperGenesis(deployConf, config.L1Allocs, l1Deployments) + l1Genesis, err := genesis.BuildL1DeveloperGenesis( + deployConf, + config.L1Allocs(deployParams.AllocType), + l1Deployments, + ) require.NoError(t, err, "failed to create l1 genesis") if alloc.PrefundTestUsers { for _, addr := range deployParams.Addresses.All() { @@ -133,7 +140,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * if ecotoneTime := deployConf.EcotoneTime(l1Block.Time()); ecotoneTime != nil && *ecotoneTime == 0 { allocsMode = genesis.L2AllocsEcotone } - l2Allocs := config.L2Allocs(allocsMode) + l2Allocs := config.L2Allocs(deployParams.AllocType, allocsMode) l2Genesis, err := genesis.BuildL2Genesis(deployConf, l2Allocs, l1Block.Header()) require.NoError(t, err, "failed to create l2 genesis") if alloc.PrefundTestUsers { @@ -235,22 +242,3 @@ func ApplyDeployConfigForks(deployConfig *genesis.DeployConfig) { deployConfig.L2GenesisCanyonTimeOffset = new(hexutil.Uint64) deployConfig.L2GenesisRegolithTimeOffset = new(hexutil.Uint64) } - -func UseFaultProofs() bool { - return !UseL2OO() -} - -func UseL2OO() bool { - return (os.Getenv("OP_E2E_USE_L2OO") == "true" || - os.Getenv("DEVNET_L2OO") == "true") -} - -func UseAltDA() bool { - return (os.Getenv("OP_E2E_USE_ALTDA") == "true" || - os.Getenv("DEVNET_ALTDA") == "true") -} - -func UseMTCannon() bool { - return (os.Getenv("OP_E2E_USE_MT_CANNON") == "true" || - os.Getenv("USE_MT_CANNON") == "true") -} diff --git a/op-e2e/e2eutils/setup_test.go b/op-e2e/e2eutils/setup_test.go index 6ce4176591e0..5a6e5dd2ddc0 100644 --- a/op-e2e/e2eutils/setup_test.go +++ b/op-e2e/e2eutils/setup_test.go @@ -24,6 +24,7 @@ func TestSetup(t *testing.T) { SequencerWindowSize: 120, ChannelTimeout: 120, L1BlockTime: 15, + AllocType: config.AllocTypeStandard, } dp := MakeDeployParams(t, tp) alloc := &AllocParams{PrefundTestUsers: true} @@ -34,6 +35,7 @@ func TestSetup(t *testing.T) { require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice) require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12)) - require.Contains(t, sd.L1Cfg.Alloc, config.L1Deployments.OptimismPortalProxy) + expAllocs := config.L1Deployments(config.DefaultAllocType) + require.Contains(t, sd.L1Cfg.Alloc, expAllocs.AddressManager) require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr) } diff --git a/op-e2e/external/config.go b/op-e2e/external/config.go deleted file mode 100644 index 943abe6346f9..000000000000 --- a/op-e2e/external/config.go +++ /dev/null @@ -1,69 +0,0 @@ -package external - -import ( - "bytes" - "encoding/json" - "os" - "strings" - "testing" -) - -type Config struct { - DataDir string `json:"data_dir"` - JWTPath string `json:"jwt_path"` - ChainID uint64 `json:"chain_id"` - GasCeil uint64 `json:"gas_ceil"` - GenesisPath string `json:"genesis_path"` - Verbosity uint64 `json:"verbosity"` - - // EndpointsReadyPath is the location to write the endpoint configuration file. - // Note, this should be written atomically by writing the JSON, then moving - // it to this path to avoid races. A helper AtomicEncode is provided for - // golang clients. - EndpointsReadyPath string `json:"endpoints_ready_path"` -} - -// AtomicEncode json encodes val to path+".atomic" then moves the path+".atomic" -// file to path -func AtomicEncode(path string, val any) error { - atomicPath := path + ".atomic" - atomicFile, err := os.Create(atomicPath) - if err != nil { - return err - } - defer atomicFile.Close() - if err = json.NewEncoder(atomicFile).Encode(val); err != nil { - return err - } - return os.Rename(atomicPath, path) -} - -type Endpoints struct { - HTTPEndpoint string `json:"http_endpoint"` - WSEndpoint string `json:"ws_endpoint"` - HTTPAuthEndpoint string `json:"http_auth_endpoint"` - WSAuthEndpoint string `json:"ws_auth_endpoint"` -} - -type TestParms struct { - // SkipTests is a map from test name to skip message. The skip message may - // be arbitrary, but the test name should match the skipped test (either - // base, or a sub-test) exactly. Precisely, the skip name must match rune for - // rune starting with the first rune. If the skip name does not match all - // runes, the first mismatched rune must be a '/'. - SkipTests map[string]string `json:"skip_tests"` -} - -func (tp TestParms) SkipIfNecessary(t testing.TB) { - if len(tp.SkipTests) == 0 { - return - } - var base bytes.Buffer - for _, name := range strings.Split(t.Name(), "/") { - base.WriteString(name) - if msg, ok := tp.SkipTests[base.String()]; ok { - t.Skip(msg) - } - base.WriteRune('/') - } -} diff --git a/op-e2e/external_geth/.gitignore b/op-e2e/external_geth/.gitignore deleted file mode 100644 index f034fb8a9e82..000000000000 --- a/op-e2e/external_geth/.gitignore +++ /dev/null @@ -1 +0,0 @@ -op-geth diff --git a/op-e2e/external_geth/Makefile b/op-e2e/external_geth/Makefile deleted file mode 100644 index f0dd9130282d..000000000000 --- a/op-e2e/external_geth/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -default: shim op-geth - -op-geth: - go build -o op-geth "github.com/ethereum/go-ethereum/cmd/geth" -.PHONY: op-geth - -shim: main.go - go build -o shim . diff --git a/op-e2e/external_geth/README.md b/op-e2e/external_geth/README.md deleted file mode 100644 index 7438fb037f3b..000000000000 --- a/op-e2e/external_geth/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# external_geth shim - -This shim is an example of how to write an adapter for an external ethereum -client to allow for its use in the op-e2e tests. - -## Invocation - -Generally speaking, you can utilize this shim by simply executing: - -``` -make test-external-geth -``` - -The `Makefile` is structured such that if you duplicate this directory and -tweak this code, you may simply execute: - -``` -make test-external- -``` - -and the execution should happen as well. - -*NOTE:* Attempting to iterate for development requires explicit rebuilding of -the binary being shimmed. Most likely to accomplish this, you may want to add -initialization code to the TestMain of the e2e to build your binary, or use -some other technique like custom build scripts or IDE integrations which cause -the binary to be rebuilt before executing the tests. - -## Arguments - -*--config * The config path is a required argument, it points to a JSON -file that contains details of the L2 environment to bring up (including the -`genesis.json` path, the chain ID, the JWT path, and a ready file path). See -the data structures in `op-e2e/external/config.go` for more details. - -## Operation - -This shim will first execute a process to initialize the op-geth database. -Then, it will start the op-geth process itself. It watches the output of the -process and looks for the lines indicating that the HTTP server and Auth HTTP -server have started up. It then reads the ports which were allocated (because -the requested ports were passed in as ephemeral via the CLI arguments). - -## Skipping tests - -Although ideally, all tests would be structured such that they may execute -either with an in-process op-geth or with an extra-process ethereum client, -this is not always the case. You may optionally create a `test_parms.json` -file in the `external_` directory, as there is in the -`external_geth` directory which specifies a map of tests to skip, and -accompanying skip text. See the `op-e2e/external/config.go` file for more -details. - -## Generalization - -This shim is included to help document and demonstrates the usage of the -external ethereum process e2e test execution. It is configured to execute in -CI to help ensure that the tests remain compatible with external clients. - -To create your own external test client, these files can likely be used as a -starting point, changing the arguments, log scraping, and other details. Or, -depending on the client and your preference, any binary which is capable of -reading and writing the necessary JSON files should be sufficient (though -will be required to replicate some of the parsing and other logic encapsulated -here). diff --git a/op-e2e/external_geth/main.go b/op-e2e/external_geth/main.go deleted file mode 100644 index c8921b9b3ece..000000000000 --- a/op-e2e/external_geth/main.go +++ /dev/null @@ -1,205 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "os" - "os/exec" - "os/signal" - "path/filepath" - "strconv" - "syscall" - "time" - - "github.com/ethereum-optimism/optimism/op-e2e/external" - "github.com/onsi/gomega/gbytes" - "github.com/onsi/gomega/gexec" -) - -func main() { - var configPath string - flag.StringVar(&configPath, "config", "", "Execute based on the config in this file") - flag.Parse() - if err := run(configPath); err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - os.Exit(0) -} - -func run(configPath string) error { - if configPath == "" { - return fmt.Errorf("must supply a '--config ' flag") - } - - configFile, err := os.Open(configPath) - if err != nil { - return fmt.Errorf("could not open config: %w", err) - } - - var config external.Config - if err := json.NewDecoder(configFile).Decode(&config); err != nil { - return fmt.Errorf("could not decode config file: %w", err) - } - - binPath, err := filepath.Abs("op-geth") - if err != nil { - return fmt.Errorf("could not get absolute path of op-geth") - } - if _, err := os.Stat(binPath); err != nil { - return fmt.Errorf("could not locate op-geth in working directory, did you forget to run '--init'?") - } - - fmt.Printf("================== op-geth shim initializing chain config ==========================\n") - if err := initialize(binPath, config); err != nil { - return fmt.Errorf("could not initialize datadir: %s %w", binPath, err) - } - - fmt.Printf("================== op-geth shim executing op-geth ==========================\n") - sess, err := execute(binPath, config) - if err != nil { - return fmt.Errorf("could not execute geth: %w", err) - } - defer sess.Close() - - fmt.Printf("================== op-geth shim encoding ready-file ==========================\n") - if err := external.AtomicEncode(config.EndpointsReadyPath, sess.endpoints); err != nil { - return fmt.Errorf("could not encode endpoints") - } - - fmt.Printf("================== op-geth shim awaiting termination ==========================\n") - - sigs := make(chan os.Signal, 1) - defer signal.Stop(sigs) - signal.Notify(sigs, os.Interrupt, syscall.SIGTERM) - - select { - case <-sigs: - fmt.Printf("================== op-geth shim caught signal, killing ==========================\n") - sess.session.Terminate() - return awaitExit(sess.session) - case <-sess.session.Exited: - return fmt.Errorf("geth exited with code %d", sess.session.ExitCode()) - case <-time.After(30 * time.Minute): - fmt.Printf("================== op-geth shim timed out, killing ==========================\n") - sess.session.Terminate() - if err := awaitExit(sess.session); err != nil { - fmt.Printf("error killing geth: %v\n", err) - } - return errors.New("geth timed out after 30 minutes") - } -} - -func awaitExit(sess *gexec.Session) error { - select { - case <-sess.Exited: - return nil - case <-time.After(5 * time.Second): - sess.Kill() - select { - case <-sess.Exited: - return nil - case <-time.After(30 * time.Second): - return fmt.Errorf("exiting after 30 second timeout") - } - } -} - -func initialize(binPath string, config external.Config) error { - cmd := exec.Command( - binPath, - "--datadir", config.DataDir, - "--state.scheme=hash", - "init", config.GenesisPath, - ) - return cmd.Run() -} - -type gethSession struct { - session *gexec.Session - endpoints *external.Endpoints -} - -func (es *gethSession) Close() { - es.session.Terminate() - select { - case <-time.After(5 * time.Second): - es.session.Kill() - case <-es.session.Exited: - } -} - -func execute(binPath string, config external.Config) (*gethSession, error) { - if config.Verbosity < 2 { - return nil, fmt.Errorf("a minimum configured verbosity of 2 is required") - } - cmd := exec.Command( - binPath, - "--datadir", config.DataDir, - "--http", - "--http.addr", "127.0.0.1", - "--http.port", "0", - "--http.api", "web3,debug,eth,txpool,net,engine", - "--ws", - "--ws.addr", "127.0.0.1", - "--ws.port", "0", - "--ws.api", "debug,eth,txpool,net,engine", - "--syncmode=full", - "--state.scheme=hash", - "--nodiscover", - "--port", "0", - "--maxpeers", "0", - "--networkid", strconv.FormatUint(config.ChainID, 10), - "--authrpc.addr", "127.0.0.1", - "--authrpc.port", "0", - "--authrpc.jwtsecret", config.JWTPath, - "--gcmode=archive", - "--verbosity", strconv.FormatUint(config.Verbosity, 10), - ) - sess, err := gexec.Start(cmd, os.Stdout, os.Stderr) - if err != nil { - return nil, fmt.Errorf("could not start op-geth session: %w", err) - } - matcher := gbytes.Say("HTTP server started\\s*endpoint=127.0.0.1:") - var enginePort, httpPort int - for enginePort == 0 || httpPort == 0 { - match, err := matcher.Match(sess.Err) - if err != nil { - return nil, fmt.Errorf("could not execute matcher") - } - if !match { - if sess.Err.Closed() { - return nil, fmt.Errorf("op-geth exited before announcing http ports") - } - // Wait for a bit more output, then try again - time.Sleep(10 * time.Millisecond) - continue - } - var authString string - var port int - if _, err := fmt.Fscanf(sess.Err, "%d %s", &port, &authString); err != nil && !errors.Is(err, io.EOF) { - return nil, fmt.Errorf("error while reading auth string: %w", err) - } - switch authString { - case "auth=true": - enginePort = port - case "auth=false": - httpPort = port - default: - return nil, fmt.Errorf("unexpected auth string %q", authString) - } - } - - return &gethSession{ - session: sess, - endpoints: &external.Endpoints{ - HTTPEndpoint: fmt.Sprintf("http://127.0.0.1:%d/", httpPort), - WSEndpoint: fmt.Sprintf("ws://127.0.0.1:%d/", httpPort), - HTTPAuthEndpoint: fmt.Sprintf("http://127.0.0.1:%d/", enginePort), - WSAuthEndpoint: fmt.Sprintf("ws://127.0.0.1:%d/", enginePort), - }, - }, nil -} diff --git a/op-e2e/external_geth/main_test.go b/op-e2e/external_geth/main_test.go deleted file mode 100644 index b971057e0cc2..000000000000 --- a/op-e2e/external_geth/main_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "net" - "net/url" - "os" - "os/exec" - "path/filepath" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" - - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-service/endpoint" -) - -func TestShim(t *testing.T) { - shimPath, err := filepath.Abs("shim") - require.NoError(t, err) - cmd := exec.Command("go", "build", "-o", shimPath, ".") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - require.NoError(t, err) - require.FileExists(t, "shim") - - opGethPath, err := filepath.Abs("op-geth") - require.NoError(t, err) - cmd = exec.Command("go", "build", "-o", opGethPath, "github.com/ethereum/go-ethereum/cmd/geth") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - require.NoError(t, err) - require.FileExists(t, "op-geth") - - config.EthNodeVerbosity = config.LegacyLevelDebug - - ec := (&e2esys.ExternalRunner{ - Name: "TestShim", - BinPath: shimPath, - }).Run(t) - t.Cleanup(func() { _ = ec.Close() }) - - for _, rpcEndpoint := range []string{ - ec.UserRPC().(endpoint.HttpRPC).HttpRPC(), - ec.AuthRPC().(endpoint.HttpRPC).HttpRPC(), - ec.UserRPC().(endpoint.WsRPC).WsRPC(), - ec.AuthRPC().(endpoint.WsRPC).WsRPC(), - } { - plainURL, err := url.ParseRequestURI(rpcEndpoint) - require.NoError(t, err) - _, err = net.DialTimeout("tcp", plainURL.Host, time.Second) - require.NoError(t, err, "could not connect to HTTP port") - } -} diff --git a/op-e2e/external_geth/test_parms.json b/op-e2e/external_geth/test_parms.json deleted file mode 100644 index c00d8722658e..000000000000 --- a/op-e2e/external_geth/test_parms.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "skip_tests":{ - "TestPendingGasLimit":"This test requires directly modifying go structures and cannot be implemented with flags" - } -} diff --git a/op-e2e/external_geth/tools.go b/op-e2e/external_geth/tools.go deleted file mode 100644 index b78b4dd4a469..000000000000 --- a/op-e2e/external_geth/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package main - -import _ "github.com/ethereum/go-ethereum/cmd/geth" diff --git a/op-e2e/faultproofs/multi_test.go b/op-e2e/faultproofs/multi_test.go index 83b475d60e0d..2034e394842c 100644 --- a/op-e2e/faultproofs/multi_test.go +++ b/op-e2e/faultproofs/multi_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" @@ -27,7 +29,7 @@ func TestMultipleGameTypes(t *testing.T) { // Start a challenger with both cannon and alphabet support gameFactory.StartChallenger(ctx, "TowerDefense", - challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg), + challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, config.AllocTypeFromEnv()), challenger.WithAlphabet(), challenger.WithPrivKey(sys.Cfg.Secrets.Alice), ) diff --git a/op-e2e/faultproofs/permissioned_test.go b/op-e2e/faultproofs/permissioned_test.go index 09c4646fe8ce..98e4e2d9fc90 100644 --- a/op-e2e/faultproofs/permissioned_test.go +++ b/op-e2e/faultproofs/permissioned_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" @@ -26,7 +28,7 @@ func TestPermissionedGameType(t *testing.T) { gameFactory.StartChallenger(ctx, "TowerDefense", challenger.WithValidPrestateRequired(), challenger.WithInvalidCannonPrestate(), - challenger.WithPermissioned(t, sys.RollupConfig, sys.L2GenesisCfg), + challenger.WithPermissioned(t, sys.RollupConfig, sys.L2GenesisCfg, config.AllocTypeFromEnv()), challenger.WithPrivKey(sys.Cfg.Secrets.Alice), ) diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index aebe6a8fd1a9..78fcff01fc49 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -7,6 +7,8 @@ import ( "path/filepath" "testing" + e2econfig "github.com/ethereum-optimism/optimism/op-e2e/config" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" @@ -250,7 +252,7 @@ func runCannon(t *testing.T, ctx context.Context, sys *e2esys.System, inputs uti l1Beacon := sys.L1BeaconEndpoint().RestHTTP() rollupEndpoint := sys.RollupEndpoint("sequencer").RPC() l2Endpoint := sys.NodeEndpoint("sequencer").RPC() - cannonOpts := challenger.WithCannon(t, sys.RollupCfg(), sys.L2Genesis()) + cannonOpts := challenger.WithCannon(t, sys.RollupCfg(), sys.L2Genesis(), e2econfig.AllocTypeFromEnv()) dir := t.TempDir() proofsDir := filepath.Join(dir, "cannon-proofs") cfg := config.NewConfig(common.Address{}, l1Endpoint, l1Beacon, rollupEndpoint, l2Endpoint, dir) diff --git a/op-e2e/opgeth/op_geth.go b/op-e2e/opgeth/op_geth.go index 1e15eecbd08a..5a376c6a5e72 100644 --- a/op-e2e/opgeth/op_geth.go +++ b/op-e2e/opgeth/op_geth.go @@ -56,7 +56,7 @@ type OpGeth struct { func NewOpGeth(t testing.TB, ctx context.Context, cfg *e2esys.SystemConfig) (*OpGeth, error) { logger := testlog.Logger(t, log.LevelCrit) - l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs, config.L1Deployments) + l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs(config.AllocTypeStandard), config.L1Deployments(config.AllocTypeStandard)) require.NoError(t, err) l1Block := l1Genesis.ToBlock() @@ -69,7 +69,7 @@ func NewOpGeth(t testing.TB, ctx context.Context, cfg *e2esys.SystemConfig) (*Op } else if ecotoneTime := cfg.DeployConfig.EcotoneTime(l1Block.Time()); ecotoneTime != nil && *ecotoneTime <= 0 { allocsMode = genesis.L2AllocsEcotone } - l2Allocs := config.L2Allocs(allocsMode) + l2Allocs := config.L2Allocs(config.AllocTypeStandard, allocsMode) l2Genesis, err := genesis.BuildL2Genesis(cfg.DeployConfig, l2Allocs, l1Block.Header()) require.NoError(t, err) l2GenesisBlock := l2Genesis.ToBlock() @@ -88,20 +88,10 @@ func NewOpGeth(t testing.TB, ctx context.Context, cfg *e2esys.SystemConfig) (*Op } var node services.EthInstance - if cfg.ExternalL2Shim == "" { - gethNode, err := geth.InitL2("l2", l2Genesis, cfg.JWTFilePath) - require.NoError(t, err) - require.NoError(t, gethNode.Node.Start()) - node = gethNode - } else { - externalNode := (&e2esys.ExternalRunner{ - Name: "l2", - BinPath: cfg.ExternalL2Shim, - Genesis: l2Genesis, - JWTPath: cfg.JWTFilePath, - }).Run(t) - node = externalNode - } + gethNode, err := geth.InitL2("l2", l2Genesis, cfg.JWTFilePath) + require.NoError(t, err) + require.NoError(t, gethNode.Node.Start()) + node = gethNode auth := rpc.WithHTTPAuth(gn.NewJWTAuth(cfg.JWTSecret)) l2Node, err := client.NewRPC(ctx, logger, node.AuthRPC().RPC(), client.WithGethRPCOptions(auth)) diff --git a/op-e2e/system/bridge/validity_test.go b/op-e2e/system/bridge/validity_test.go index d79919015951..240751ab1c45 100644 --- a/op-e2e/system/bridge/validity_test.go +++ b/op-e2e/system/bridge/validity_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" @@ -264,9 +266,17 @@ func TestMixedDepositValidity(t *testing.T) { } } +func TestMixedWithdrawalValidity_L2OO(t *testing.T) { + testMixedWithdrawalValidity(t, config.AllocTypeL2OO) +} + +func TestMixedWithdrawalValidity_Standard(t *testing.T) { + testMixedWithdrawalValidity(t, config.AllocTypeStandard) +} + // TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are // rejected while unmodified ones are accepted. This runs test cases in different systems. -func TestMixedWithdrawalValidity(t *testing.T) { +func testMixedWithdrawalValidity(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t) // There are 7 different fields we try modifying to cause a failure, plus one "good" test result we test. @@ -279,7 +289,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { op_e2e.InitParallel(t) // Create our system configuration, funding all accounts we created for L1/L2, and start it - cfg := e2esys.DefaultSystemConfig(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) cfg.Nodes["sequencer"].SafeDBPath = t.TempDir() cfg.DeployConfig.L2BlockTime = 2 require.LessOrEqual(t, cfg.DeployConfig.FinalizationPeriodSeconds, uint64(6)) @@ -421,7 +431,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { // Wait for the finalization period, then we can finalize this withdrawal. require.NotEqual(t, cfg.L1Deployments.L2OutputOracleProxy, common.Address{}) var blockNumber uint64 - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { blockNumber, err = wait.ForGamePublished(ctx, l1Client, cfg.L1Deployments.OptimismPortalProxy, cfg.L1Deployments.DisputeGameFactoryProxy, receipt.BlockNumber) } else { blockNumber, err = wait.ForOutputRootPublished(ctx, l1Client, cfg.L1Deployments.L2OutputOracleProxy, receipt.BlockNumber) @@ -438,7 +448,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { blockCl := ethclient.NewClient(rpcClient) // Now create the withdrawal - params, err := helpers.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, blockCl, tx.Hash(), header, l2OutputOracle, disputeGameFactory, optimismPortal2) + params, err := helpers.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, blockCl, tx.Hash(), header, l2OutputOracle, disputeGameFactory, optimismPortal2, cfg.AllocType) require.Nil(t, err) // Obtain our withdrawal parameters @@ -527,7 +537,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { } else { require.NoError(t, err) - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { // Start a challenger to resolve claims and games once the clock expires factoryHelper := disputegame.NewFactoryHelper(t, ctx, sys) factoryHelper.StartChallenger(ctx, "Challenger", @@ -555,7 +565,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { // Wait for finalization and then create the Finalized Withdrawal Transaction ctx, withdrawalCancel := context.WithTimeout(context.Background(), 60*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) defer withdrawalCancel() - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { err = wait.ForWithdrawalCheck(ctx, l1Client, withdrawal, cfg.L1Deployments.OptimismPortalProxy, transactor.Account.L1Opts.From) require.NoError(t, err) } else { diff --git a/op-e2e/system/bridge/withdrawal.go b/op-e2e/system/bridge/withdrawal.go index 29864db02dbf..fac12aa844a2 100644 --- a/op-e2e/system/bridge/withdrawal.go +++ b/op-e2e/system/bridge/withdrawal.go @@ -17,7 +17,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-service/sources" ) @@ -33,7 +32,7 @@ type CommonSystem interface { // balance changes on L1 and L2 and has to include gas fees in the balance checks. // It does not check that the withdrawal can be executed prior to the end of the finality period. func RunWithdrawalsTest(t *testing.T, sys CommonSystem) { - t.Logf("WithdrawalsTest: running with FP == %t", e2eutils.UseFaultProofs()) + t.Logf("WithdrawalsTest: running with allocType == %s", sys.Config().AllocType) cfg := sys.Config() l1Client := sys.NodeClient(e2esys.RoleL1) @@ -129,7 +128,7 @@ func RunWithdrawalsTest(t *testing.T, sys CommonSystem) { proveFee := new(big.Int).Mul(new(big.Int).SetUint64(proveReceipt.GasUsed), proveReceipt.EffectiveGasPrice) finalizeFee := new(big.Int).Mul(new(big.Int).SetUint64(finalizeReceipt.GasUsed), finalizeReceipt.EffectiveGasPrice) fees = new(big.Int).Add(proveFee, finalizeFee) - if e2eutils.UseFaultProofs() { + if sys.Config().AllocType.UsesProofs() { resolveClaimFee := new(big.Int).Mul(new(big.Int).SetUint64(resolveClaimReceipt.GasUsed), resolveClaimReceipt.EffectiveGasPrice) resolveFee := new(big.Int).Mul(new(big.Int).SetUint64(resolveReceipt.GasUsed), resolveReceipt.EffectiveGasPrice) fees = new(big.Int).Add(fees, resolveClaimFee) diff --git a/op-e2e/system/bridge/withdrawal_test.go b/op-e2e/system/bridge/withdrawal_test.go index 9f37240cbdbe..1f56fe4c4ade 100644 --- a/op-e2e/system/bridge/withdrawal_test.go +++ b/op-e2e/system/bridge/withdrawal_test.go @@ -4,18 +4,25 @@ import ( "testing" op_e2e "github.com/ethereum-optimism/optimism/op-e2e" - + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/stretchr/testify/require" ) -// TestWithdrawals checks that a deposit and then withdrawal execution succeeds. It verifies the +func TestWithdrawals_L2OO(t *testing.T) { + testWithdrawals(t, config.AllocTypeL2OO) +} + +func TestWithdrawals_Standard(t *testing.T) { + testWithdrawals(t, config.AllocTypeStandard) +} + +// testWithdrawals checks that a deposit and then withdrawal execution succeeds. It verifies the // balance changes on L1 and L2 and has to include gas fees in the balance checks. // It does not check that the withdrawal can be executed prior to the end of the finality period. -func TestWithdrawals(t *testing.T) { +func testWithdrawals(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t) - - cfg := e2esys.DefaultSystemConfig(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) cfg.DeployConfig.FinalizationPeriodSeconds = 2 // 2s finalization period cfg.L1FinalizedDistance = 2 // Finalize quick, don't make the proposer wait too long diff --git a/op-e2e/system/da/brotli_batcher_test.go b/op-e2e/system/da/brotli_batcher_test.go index b44bd5af1623..a55e5bced8ad 100644 --- a/op-e2e/system/da/brotli_batcher_test.go +++ b/op-e2e/system/da/brotli_batcher_test.go @@ -67,7 +67,7 @@ func TestBrotliBatcherFjord(t *testing.T) { cfg.DeployConfig.L2GenesisFjordTimeOffset = &genesisActivation // set up batcher to use brotli - sys, err := cfg.Start(t, e2esys.SystemConfigOption{Key: "compressionAlgo", Role: "brotli", Action: nil}) + sys, err := cfg.Start(t, e2esys.StartOption{Key: "compressionAlgo", Role: "brotli", Action: nil}) require.Nil(t, err, "Error starting up system") log := testlog.Logger(t, log.LevelInfo) diff --git a/op-e2e/system/da/eip4844_test.go b/op-e2e/system/da/eip4844_test.go index 27cc1db0d21d..f3cf8fc7f03f 100644 --- a/op-e2e/system/da/eip4844_test.go +++ b/op-e2e/system/da/eip4844_test.go @@ -34,13 +34,19 @@ import ( "github.com/ethereum/go-ethereum/params" ) -// TestSystem4844E2E runs the SystemE2E test with 4844 enabled on L1, and active on the rollup in +// TestSystem4844E2E* run the SystemE2E test with 4844 enabled on L1, and active on the rollup in // the op-batcher and verifier. It submits a txpool-blocking transaction before running // each test to ensure the batcher is able to clear it. -func TestSystem4844E2E(t *testing.T) { - t.Run("calldata", func(t *testing.T) { testSystem4844E2E(t, false, batcherFlags.CalldataType) }) - t.Run("single-blob", func(t *testing.T) { testSystem4844E2E(t, false, batcherFlags.BlobsType) }) - t.Run("multi-blob", func(t *testing.T) { testSystem4844E2E(t, true, batcherFlags.BlobsType) }) +func TestSystem4844E2E_Calldata(t *testing.T) { + testSystem4844E2E(t, false, batcherFlags.CalldataType) +} + +func TestSystem4844E2E_SingleBlob(t *testing.T) { + testSystem4844E2E(t, false, batcherFlags.BlobsType) +} + +func TestSystem4844E2E_MultiBlob(t *testing.T) { + testSystem4844E2E(t, true, batcherFlags.BlobsType) } func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAvailabilityType) { @@ -68,7 +74,7 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva // is started, as is required by the function. var jamChan chan error jamCtx, jamCancel := context.WithTimeout(context.Background(), 20*time.Second) - action := e2esys.SystemConfigOption{ + action := e2esys.StartOption{ Key: "beforeBatcherStart", Action: func(cfg *e2esys.SystemConfig, s *e2esys.System) { driver := s.BatchSubmitter.TestDriver() diff --git a/op-e2e/system/da/startstop_test.go b/op-e2e/system/da/startstop_test.go index c15c1c039438..e085029983af 100644 --- a/op-e2e/system/da/startstop_test.go +++ b/op-e2e/system/da/startstop_test.go @@ -18,37 +18,28 @@ import ( "github.com/stretchr/testify/require" ) -// TestSystemBatchType run each system e2e test case in singular batch mode and span batch mode. +// TestSystemBatchType* run each system e2e test case in singular batch mode and span batch mode. // If the test case tests batch submission and advancing safe head, it should be tested in both singular and span batch mode. -func TestSystemBatchType(t *testing.T) { - tests := []struct { - name string - f func(*testing.T, func(*e2esys.SystemConfig)) - }{ - {"StopStartBatcher", StopStartBatcher}, - } - for _, test := range tests { - test := test - t.Run(test.name+"_SingularBatch", func(t *testing.T) { - test.f(t, func(sc *e2esys.SystemConfig) { - sc.BatcherBatchType = derive.SingularBatchType - }) - }) - t.Run(test.name+"_SpanBatch", func(t *testing.T) { - test.f(t, func(sc *e2esys.SystemConfig) { - sc.BatcherBatchType = derive.SpanBatchType - }) - }) - t.Run(test.name+"_SpanBatchMaxBlocks", func(t *testing.T) { - test.f(t, func(sc *e2esys.SystemConfig) { - sc.BatcherBatchType = derive.SpanBatchType - sc.BatcherMaxBlocksPerSpanBatch = 2 - }) - }) - } +func TestSystemBatchType_SingularBatch(t *testing.T) { + testStartStopBatcher(t, func(sc *e2esys.SystemConfig) { + sc.BatcherBatchType = derive.SingularBatchType + }) +} + +func TestSystemBatchType_SpanBatch(t *testing.T) { + testStartStopBatcher(t, func(sc *e2esys.SystemConfig) { + sc.BatcherBatchType = derive.SpanBatchType + }) +} + +func TestSystemBatchType_SpanBatchMaxBlocks(t *testing.T) { + testStartStopBatcher(t, func(sc *e2esys.SystemConfig) { + sc.BatcherBatchType = derive.SpanBatchType + sc.BatcherMaxBlocksPerSpanBatch = 2 + }) } -func StopStartBatcher(t *testing.T, cfgMod func(*e2esys.SystemConfig)) { +func testStartStopBatcher(t *testing.T, cfgMod func(*e2esys.SystemConfig)) { op_e2e.InitParallel(t) cfg := e2esys.DefaultSystemConfig(t) diff --git a/op-e2e/system/e2esys/external.go b/op-e2e/system/e2esys/external.go deleted file mode 100644 index cfdc4fcb88cc..000000000000 --- a/op-e2e/system/e2esys/external.go +++ /dev/null @@ -1,147 +0,0 @@ -package e2esys - -import ( - "encoding/json" - "errors" - "math/big" - "os" - "os/exec" - "path/filepath" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-e2e/external" - "github.com/ethereum-optimism/optimism/op-service/endpoint" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" - "github.com/onsi/gomega/gexec" - "github.com/stretchr/testify/require" -) - -type ExternalRunner struct { - Name string - BinPath string - Genesis *core.Genesis - JWTPath string - // 4844: a datadir specifically for tx-pool blobs - BlobPoolPath string -} - -type ExternalEthClient struct { - Session *gexec.Session - Endpoints external.Endpoints -} - -func (eec *ExternalEthClient) UserRPC() endpoint.RPC { - return endpoint.WsOrHttpRPC{ - WsURL: eec.Endpoints.WSEndpoint, - HttpURL: eec.Endpoints.HTTPEndpoint, - } -} - -func (eec *ExternalEthClient) AuthRPC() endpoint.RPC { - return endpoint.WsOrHttpRPC{ - WsURL: eec.Endpoints.WSAuthEndpoint, - HttpURL: eec.Endpoints.HTTPAuthEndpoint, - } -} - -func (eec *ExternalEthClient) Close() error { - eec.Session.Terminate() - select { - case <-time.After(5 * time.Second): - eec.Session.Kill() - select { - case <-time.After(30 * time.Second): - return errors.New("external client failed to terminate") - case <-eec.Session.Exited: - } - case <-eec.Session.Exited: - } - return nil -} - -func (er *ExternalRunner) Run(t testing.TB) *ExternalEthClient { - if er.BinPath == "" { - t.Error("no external bin path set") - } - - if er.JWTPath == "" { - er.JWTPath = writeDefaultJWT(t) - } - - if er.Genesis == nil { - er.Genesis = &core.Genesis{ - Alloc: types.GenesisAlloc{ - common.Address{1}: types.Account{Balance: big.NewInt(1)}, - }, - Config: params.OptimismTestConfig, - Difficulty: big.NewInt(0), - } - } - - workDir := t.TempDir() - - config := external.Config{ - DataDir: filepath.Join(workDir, "datadir"), - JWTPath: er.JWTPath, - ChainID: er.Genesis.Config.ChainID.Uint64(), - GenesisPath: filepath.Join(workDir, "genesis.json"), - EndpointsReadyPath: filepath.Join(workDir, "endpoints.json"), - Verbosity: uint64(config.EthNodeVerbosity), - } - - err := os.Mkdir(config.DataDir, 0o700) - require.NoError(t, err) - - genesisFile, err := os.Create(config.GenesisPath) - require.NoError(t, err) - err = json.NewEncoder(genesisFile).Encode(er.Genesis) - require.NoError(t, err) - - configPath := filepath.Join(workDir, "config.json") - configFile, err := os.Create(configPath) - require.NoError(t, err) - err = json.NewEncoder(configFile).Encode(config) - require.NoError(t, err) - - cmd := exec.Command(er.BinPath, "--config", configPath) - cmd.Dir = filepath.Dir(er.BinPath) - sess, err := gexec.Start( - cmd, - gexec.NewPrefixedWriter("[extout:"+er.Name+"]", os.Stdout), - gexec.NewPrefixedWriter("[exterr:"+er.Name+"]", os.Stderr), - ) - require.NoError(t, err) - - // 2 minutes may seem like a long timeout, and, it definitely is. That - // being said, when running these tests with high parallelism turned on, the - // node startup time can be substantial (remember, this usually is a - // multi-step process initializing the database and then starting the - // client). - require.Eventually( - t, - func() bool { - _, err := os.Stat(config.EndpointsReadyPath) - return err == nil - }, - 2*time.Minute, - 10*time.Millisecond, - "external runner did not create ready file at %s within timeout", - config.EndpointsReadyPath, - ) - - readyFile, err := os.Open(config.EndpointsReadyPath) - require.NoError(t, err) - var endpoints external.Endpoints - err = json.NewDecoder(readyFile).Decode(&endpoints) - require.NoError(t, err) - - return &ExternalEthClient{ - Session: sess, - Endpoints: endpoints, - } -} diff --git a/op-e2e/system/e2esys/setup.go b/op-e2e/system/e2esys/setup.go index bc4364de9ad4..d461cda61657 100644 --- a/op-e2e/system/e2esys/setup.go +++ b/op-e2e/system/e2esys/setup.go @@ -83,17 +83,34 @@ var ( genesisTime = hexutil.Uint64(0) ) -func DefaultSystemConfig(t testing.TB) SystemConfig { - config.ExternalL2TestParms.SkipIfNecessary(t) +type SystemConfigOpts struct { + AllocType config.AllocType +} + +type SystemConfigOpt func(s *SystemConfigOpts) + +func WithAllocType(allocType config.AllocType) SystemConfigOpt { + return func(s *SystemConfigOpts) { + s.AllocType = allocType + } +} + +func DefaultSystemConfig(t testing.TB, opts ...SystemConfigOpt) SystemConfig { + sco := &SystemConfigOpts{ + AllocType: config.DefaultAllocType, + } + for _, opt := range opts { + opt(sco) + } secrets, err := e2eutils.DefaultMnemonicConfig.Secrets() require.NoError(t, err) - deployConfig := config.DeployConfig.Copy() + deployConfig := config.DeployConfig(sco.AllocType) deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) e2eutils.ApplyDeployConfigForks(deployConfig) require.NoError(t, deployConfig.Check(testlog.Logger(t, log.LevelInfo)), "Deploy config is invalid, do you need to run make devnet-allocs?") - l1Deployments := config.L1Deployments.Copy() + l1Deployments := config.L1Deployments(sco.AllocType) require.NoError(t, l1Deployments.Check(deployConfig)) require.Equal(t, secrets.Addresses().Batcher, deployConfig.BatchSenderAddress) @@ -116,6 +133,7 @@ func DefaultSystemConfig(t testing.TB) SystemConfig { JWTSecret: testingJWTSecret, L1FinalizedDistance: 8, // Short, for faster tests. BlobsPath: t.TempDir(), + AllocType: sco.AllocType, Nodes: map[string]*rollupNode.Config{ RoleSeq: { Driver: driver.Config{ @@ -161,15 +179,14 @@ func DefaultSystemConfig(t testing.TB) SystemConfig { GethOptions: map[string][]geth.GethOption{}, P2PTopology: nil, // no P2P connectivity by default NonFinalizedProposals: false, - ExternalL2Shim: config.ExternalL2Shim, DataAvailabilityType: batcherFlags.CalldataType, BatcherMaxPendingTransactions: 1, BatcherTargetNumFrames: 1, } } -func RegolithSystemConfig(t *testing.T, regolithTimeOffset *hexutil.Uint64) SystemConfig { - cfg := DefaultSystemConfig(t) +func RegolithSystemConfig(t *testing.T, regolithTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := DefaultSystemConfig(t, opts...) cfg.DeployConfig.L2GenesisRegolithTimeOffset = regolithTimeOffset cfg.DeployConfig.L2GenesisCanyonTimeOffset = nil cfg.DeployConfig.L2GenesisDeltaTimeOffset = nil @@ -180,34 +197,34 @@ func RegolithSystemConfig(t *testing.T, regolithTimeOffset *hexutil.Uint64) Syst return cfg } -func CanyonSystemConfig(t *testing.T, canyonTimeOffset *hexutil.Uint64) SystemConfig { - cfg := RegolithSystemConfig(t, &genesisTime) +func CanyonSystemConfig(t *testing.T, canyonTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := RegolithSystemConfig(t, &genesisTime, opts...) cfg.DeployConfig.L2GenesisCanyonTimeOffset = canyonTimeOffset return cfg } -func DeltaSystemConfig(t *testing.T, deltaTimeOffset *hexutil.Uint64) SystemConfig { - cfg := CanyonSystemConfig(t, &genesisTime) +func DeltaSystemConfig(t *testing.T, deltaTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := CanyonSystemConfig(t, &genesisTime, opts...) cfg.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset return cfg } -func EcotoneSystemConfig(t *testing.T, ecotoneTimeOffset *hexutil.Uint64) SystemConfig { - cfg := DeltaSystemConfig(t, &genesisTime) +func EcotoneSystemConfig(t *testing.T, ecotoneTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := DeltaSystemConfig(t, &genesisTime, opts...) // from Ecotone onwards, activate L1 Cancun at genesis cfg.DeployConfig.L1CancunTimeOffset = &genesisTime cfg.DeployConfig.L2GenesisEcotoneTimeOffset = ecotoneTimeOffset return cfg } -func FjordSystemConfig(t *testing.T, fjordTimeOffset *hexutil.Uint64) SystemConfig { - cfg := EcotoneSystemConfig(t, &genesisTime) +func FjordSystemConfig(t *testing.T, fjordTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := EcotoneSystemConfig(t, &genesisTime, opts...) cfg.DeployConfig.L2GenesisFjordTimeOffset = fjordTimeOffset return cfg } -func GraniteSystemConfig(t *testing.T, graniteTimeOffset *hexutil.Uint64) SystemConfig { - cfg := FjordSystemConfig(t, &genesisTime) +func GraniteSystemConfig(t *testing.T, graniteTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := FjordSystemConfig(t, &genesisTime, opts...) cfg.DeployConfig.L2GenesisGraniteTimeOffset = graniteTimeOffset return cfg } @@ -300,6 +317,8 @@ type SystemConfig struct { // SupportL1TimeTravel determines if the L1 node supports quickly skipping forward in time SupportL1TimeTravel bool + + AllocType config.AllocType } type System struct { @@ -437,37 +456,37 @@ func (sys *System) Close() { type SystemConfigHook func(sCfg *SystemConfig, s *System) -type SystemConfigOption struct { +type StartOption struct { Key string Role string Action SystemConfigHook } -type SystemConfigOptions struct { +type startOptions struct { opts map[string]SystemConfigHook } -func NewSystemConfigOptions(_opts []SystemConfigOption) (SystemConfigOptions, error) { +func parseStartOptions(_opts []StartOption) (startOptions, error) { opts := make(map[string]SystemConfigHook) for _, opt := range _opts { if _, ok := opts[opt.Key+":"+opt.Role]; ok { - return SystemConfigOptions{}, fmt.Errorf("duplicate option for key %s and role %s", opt.Key, opt.Role) + return startOptions{}, fmt.Errorf("duplicate option for key %s and role %s", opt.Key, opt.Role) } opts[opt.Key+":"+opt.Role] = opt.Action } - return SystemConfigOptions{ + return startOptions{ opts: opts, }, nil } -func (s *SystemConfigOptions) Get(key, role string) (SystemConfigHook, bool) { +func (s *startOptions) Get(key, role string) (SystemConfigHook, bool) { v, ok := s.opts[key+":"+role] return v, ok } -func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*System, error) { - opts, err := NewSystemConfigOptions(_opts) +func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System, error) { + parsedStartOpts, err := parseStartOptions(startOpts) if err != nil { return nil, err } @@ -493,7 +512,11 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste return nil, err } - l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs, config.L1Deployments) + l1Genesis, err := genesis.BuildL1DeveloperGenesis( + cfg.DeployConfig, + config.L1Allocs(cfg.AllocType), + config.L1Deployments(cfg.AllocType), + ) if err != nil { return nil, err } @@ -518,7 +541,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste allocsMode := cfg.DeployConfig.AllocMode(l1Block.Time()) t.Log("Generating L2 genesis", "l2_allocs_mode", string(allocsMode)) - l2Allocs := config.L2Allocs(allocsMode) + l2Allocs := config.L2Allocs(cfg.AllocType, allocsMode) l2Genesis, err := genesis.BuildL2Genesis(cfg.DeployConfig, l2Allocs, l1Block.Header()) if err != nil { return nil, err @@ -626,39 +649,22 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste for _, name := range l2Nodes { var ethClient services.EthInstance - if cfg.ExternalL2Shim == "" { - if name != RoleSeq && !cfg.DisableTxForwarder { - cfg.GethOptions[name] = append(cfg.GethOptions[name], func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error { - ethCfg.RollupSequencerHTTP = sys.EthInstances[RoleSeq].UserRPC().RPC() - return nil - }) - } - - l2Geth, err := geth.InitL2(name, l2Genesis, cfg.JWTFilePath, cfg.GethOptions[name]...) - if err != nil { - return nil, err - } - if err := l2Geth.Node.Start(); err != nil { - return nil, err - } - - ethClient = l2Geth - } else { - if len(cfg.GethOptions[name]) > 0 { - t.Skip("External L2 nodes do not support configuration through GethOptions") - } - - if name != RoleSeq && !cfg.DisableTxForwarder { - cfg.Loggers[name].Warn("External L2 nodes do not support `RollupSequencerHTTP` configuration. No tx forwarding support.") - } + if name != RoleSeq && !cfg.DisableTxForwarder { + cfg.GethOptions[name] = append(cfg.GethOptions[name], func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error { + ethCfg.RollupSequencerHTTP = sys.EthInstances[RoleSeq].UserRPC().RPC() + return nil + }) + } - ethClient = (&ExternalRunner{ - Name: name, - BinPath: cfg.ExternalL2Shim, - Genesis: l2Genesis, - JWTPath: cfg.JWTFilePath, - }).Run(t) + l2Geth, err := geth.InitL2(name, l2Genesis, cfg.JWTFilePath, cfg.GethOptions[name]...) + if err != nil { + return nil, err } + if err := l2Geth.Node.Start(); err != nil { + return nil, err + } + + ethClient = l2Geth sys.EthInstances[name] = ethClient } @@ -758,7 +764,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste sys.RollupNodes[name] = n - if action, ok := opts.Get("afterRollupNodeStart", name); ok { + if action, ok := parsedStartOpts.Get("afterRollupNodeStart", name); ok { action(&cfg, sys) } } @@ -791,11 +797,11 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste // L2Output Submitter var proposerCLIConfig *l2os.CLIConfig - if e2eutils.UseFaultProofs() { + if cfg.AllocType.UsesProofs() { proposerCLIConfig = &l2os.CLIConfig{ L1EthRpc: sys.EthInstances[RoleL1].UserRPC().RPC(), RollupRpc: sys.RollupNodes[RoleSeq].UserRPC().RPC(), - DGFAddress: config.L1Deployments.DisputeGameFactoryProxy.Hex(), + DGFAddress: config.L1Deployments(cfg.AllocType).DisputeGameFactoryProxy.Hex(), ProposalInterval: 6 * time.Second, DisputeGameType: 254, // Fast game type PollInterval: 500 * time.Millisecond, @@ -810,7 +816,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste proposerCLIConfig = &l2os.CLIConfig{ L1EthRpc: sys.EthInstances[RoleL1].UserRPC().RPC(), RollupRpc: sys.RollupNodes[RoleSeq].UserRPC().RPC(), - L2OOAddress: config.L1Deployments.L2OutputOracleProxy.Hex(), + L2OOAddress: config.L1Deployments(cfg.AllocType).L2OutputOracleProxy.Hex(), PollInterval: 500 * time.Millisecond, TxMgrConfig: setuputils.NewTxMgrConfig(sys.EthInstances[RoleL1].UserRPC(), cfg.Secrets.Proposer), AllowNonFinalized: cfg.NonFinalizedProposals, @@ -843,7 +849,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste var compressionAlgo derive.CompressionAlgo = derive.Zlib // if opt has brotli key, set the compression algo as brotli - if _, ok := opts.Get("compressionAlgo", "brotli"); ok { + if _, ok := parsedStartOpts.Get("compressionAlgo", "brotli"); ok { compressionAlgo = derive.Brotli10 } @@ -893,7 +899,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste return nil, fmt.Errorf("failed to setup batch submitter: %w", err) } sys.BatchSubmitter = batcher - if action, ok := opts.Get("beforeBatcherStart", ""); ok { + if action, ok := parsedStartOpts.Get("beforeBatcherStart", ""); ok { action(&cfg, sys) } if err := batcher.Start(context.Background()); err != nil { diff --git a/op-e2e/system/gastoken/gastoken_test.go b/op-e2e/system/gastoken/gastoken_test.go index 839f33634046..4b2e6009c3f2 100644 --- a/op-e2e/system/gastoken/gastoken_test.go +++ b/op-e2e/system/gastoken/gastoken_test.go @@ -6,13 +6,14 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-e2e/config" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" "github.com/ethereum-optimism/optimism/op-e2e/bindings" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/receipts" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -30,10 +31,17 @@ func TestMain(m *testing.M) { op_e2e.RunMain(m) } -func TestCustomGasToken(t *testing.T) { - op_e2e.InitParallel(t, op_e2e.SkipOnFaultProofs) // Custom Gas Token feature is not yet compatible with fault proofs +func TestCustomGasToken_L2OO(t *testing.T) { + testCustomGasToken(t, config.AllocTypeL2OO) +} + +func TestCustomGasToken_Standard(t *testing.T) { + testCustomGasToken(t, config.AllocTypeStandard) +} - cfg := e2esys.DefaultSystemConfig(t) +func testCustomGasToken(t *testing.T, allocType config.AllocType) { + op_e2e.InitParallel(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) offset := hexutil.Uint64(0) cfg.DeployConfig.L2GenesisRegolithTimeOffset = &offset cfg.DeployConfig.L1CancunTimeOffset = &offset @@ -183,7 +191,7 @@ func TestCustomGasToken(t *testing.T) { proveFee := new(big.Int).Mul(new(big.Int).SetUint64(proveReceipt.GasUsed), proveReceipt.EffectiveGasPrice) finalizeFee := new(big.Int).Mul(new(big.Int).SetUint64(finalizeReceipt.GasUsed), finalizeReceipt.EffectiveGasPrice) fees = new(big.Int).Add(proveFee, finalizeFee) - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { resolveClaimFee := new(big.Int).Mul(new(big.Int).SetUint64(resolveClaimReceipt.GasUsed), resolveClaimReceipt.EffectiveGasPrice) resolveFee := new(big.Int).Mul(new(big.Int).SetUint64(resolveReceipt.GasUsed), resolveReceipt.EffectiveGasPrice) fees = new(big.Int).Add(fees, resolveClaimFee) @@ -329,7 +337,7 @@ func TestCustomGasToken(t *testing.T) { proveReceipt, finalizeReceipt, resolveClaimReceipt, resolveReceipt := helpers.ProveAndFinalizeWithdrawal(t, cfg, sys, "verifier", cfg.Secrets.Alice, receipt) require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status) require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status) - if e2eutils.UseFaultProofs() { + if allocType.UsesProofs() { require.Equal(t, types.ReceiptStatusSuccessful, resolveClaimReceipt.Status) require.Equal(t, types.ReceiptStatusSuccessful, resolveReceipt.Status) } diff --git a/op-e2e/system/helpers/withdrawal_helper.go b/op-e2e/system/helpers/withdrawal_helper.go index b7d11a63060c..8e763f6670bf 100644 --- a/op-e2e/system/helpers/withdrawal_helper.go +++ b/op-e2e/system/helpers/withdrawal_helper.go @@ -16,7 +16,6 @@ import ( gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" @@ -96,7 +95,14 @@ func defaultWithdrawalTxOpts() *WithdrawalTxOpts { } } -func ProveAndFinalizeWithdrawal(t *testing.T, cfg e2esys.SystemConfig, clients ClientProvider, l2NodeName string, ethPrivKey *ecdsa.PrivateKey, l2WithdrawalReceipt *types.Receipt) (*types.Receipt, *types.Receipt, *types.Receipt, *types.Receipt) { +func ProveAndFinalizeWithdrawal( + t *testing.T, + cfg e2esys.SystemConfig, + clients ClientProvider, + l2NodeName string, + ethPrivKey *ecdsa.PrivateKey, + l2WithdrawalReceipt *types.Receipt, +) (*types.Receipt, *types.Receipt, *types.Receipt, *types.Receipt) { params, proveReceipt := ProveWithdrawal(t, cfg, clients, l2NodeName, ethPrivKey, l2WithdrawalReceipt) finalizeReceipt, resolveClaimReceipt, resolveReceipt := FinalizeWithdrawal(t, cfg, clients.NodeClient("l1"), ethPrivKey, proveReceipt, params) return proveReceipt, finalizeReceipt, resolveClaimReceipt, resolveReceipt @@ -107,14 +113,17 @@ func ProveWithdrawal(t *testing.T, cfg e2esys.SystemConfig, clients ClientProvid ctx, cancel := context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) defer cancel() + allocType := cfg.AllocType + l1Client := clients.NodeClient(e2esys.RoleL1) var blockNumber uint64 var err error - if e2eutils.UseFaultProofs() { - blockNumber, err = wait.ForGamePublished(ctx, l1Client, config.L1Deployments.OptimismPortalProxy, config.L1Deployments.DisputeGameFactoryProxy, l2WithdrawalReceipt.BlockNumber) + l1Deployments := config.L1Deployments(allocType) + if allocType.UsesProofs() { + blockNumber, err = wait.ForGamePublished(ctx, l1Client, l1Deployments.OptimismPortalProxy, l1Deployments.DisputeGameFactoryProxy, l2WithdrawalReceipt.BlockNumber) require.NoError(t, err) } else { - blockNumber, err = wait.ForOutputRootPublished(ctx, l1Client, config.L1Deployments.L2OutputOracleProxy, l2WithdrawalReceipt.BlockNumber) + blockNumber, err = wait.ForOutputRootPublished(ctx, l1Client, l1Deployments.L2OutputOracleProxy, l2WithdrawalReceipt.BlockNumber) require.NoError(t, err) } @@ -128,19 +137,19 @@ func ProveWithdrawal(t *testing.T, cfg e2esys.SystemConfig, clients ClientProvid header, err := receiptCl.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNumber)) require.NoError(t, err) - oracle, err := bindings.NewL2OutputOracleCaller(config.L1Deployments.L2OutputOracleProxy, l1Client) + oracle, err := bindings.NewL2OutputOracleCaller(l1Deployments.L2OutputOracleProxy, l1Client) require.NoError(t, err) - factory, err := bindings.NewDisputeGameFactoryCaller(config.L1Deployments.DisputeGameFactoryProxy, l1Client) + factory, err := bindings.NewDisputeGameFactoryCaller(l1Deployments.DisputeGameFactoryProxy, l1Client) require.NoError(t, err) - portal2, err := bindingspreview.NewOptimismPortal2Caller(config.L1Deployments.OptimismPortalProxy, l1Client) + portal2, err := bindingspreview.NewOptimismPortal2Caller(l1Deployments.OptimismPortalProxy, l1Client) require.NoError(t, err) - params, err := ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, blockCl, l2WithdrawalReceipt.TxHash, header, oracle, factory, portal2) + params, err := ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, blockCl, l2WithdrawalReceipt.TxHash, header, oracle, factory, portal2, allocType) require.NoError(t, err) - portal, err := bindings.NewOptimismPortal(config.L1Deployments.OptimismPortalProxy, l1Client) + portal, err := bindings.NewOptimismPortal(l1Deployments.OptimismPortalProxy, l1Client) require.NoError(t, err) opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig()) @@ -170,8 +179,8 @@ func ProveWithdrawal(t *testing.T, cfg e2esys.SystemConfig, clients ClientProvid return params, proveReceipt } -func ProveWithdrawalParameters(ctx context.Context, proofCl withdrawals.ProofClient, l2ReceiptCl withdrawals.ReceiptClient, l2BlockCl withdrawals.BlockClient, txHash common.Hash, header *types.Header, l2OutputOracleContract *bindings.L2OutputOracleCaller, disputeGameFactoryContract *bindings.DisputeGameFactoryCaller, optimismPortal2Contract *bindingspreview.OptimismPortal2Caller) (withdrawals.ProvenWithdrawalParameters, error) { - if e2eutils.UseFaultProofs() { +func ProveWithdrawalParameters(ctx context.Context, proofCl withdrawals.ProofClient, l2ReceiptCl withdrawals.ReceiptClient, l2BlockCl withdrawals.BlockClient, txHash common.Hash, header *types.Header, l2OutputOracleContract *bindings.L2OutputOracleCaller, disputeGameFactoryContract *bindings.DisputeGameFactoryCaller, optimismPortal2Contract *bindingspreview.OptimismPortal2Caller, allocType config.AllocType) (withdrawals.ProvenWithdrawalParameters, error) { + if allocType.UsesProofs() { return withdrawals.ProveWithdrawalParametersFaultProofs(ctx, proofCl, l2ReceiptCl, l2BlockCl, txHash, disputeGameFactoryContract, optimismPortal2Contract) } else { return withdrawals.ProveWithdrawalParameters(ctx, proofCl, l2ReceiptCl, l2BlockCl, txHash, header, l2OutputOracleContract) @@ -192,13 +201,16 @@ func FinalizeWithdrawal(t *testing.T, cfg e2esys.SystemConfig, l1Client *ethclie Data: params.Data, } + allocType := cfg.AllocType + opts, err := bind.NewKeyedTransactorWithChainID(privKey, cfg.L1ChainIDBig()) require.NoError(t, err) var resolveClaimReceipt *types.Receipt var resolveReceipt *types.Receipt - if e2eutils.UseFaultProofs() { - portal2, err := bindingspreview.NewOptimismPortal2(config.L1Deployments.OptimismPortalProxy, l1Client) + l1Deployments := config.L1Deployments(allocType) + if allocType.UsesProofs() { + portal2, err := bindingspreview.NewOptimismPortal2(l1Deployments.OptimismPortalProxy, l1Client) require.NoError(t, err) wdHash, err := wd.Hash() @@ -245,19 +257,17 @@ func FinalizeWithdrawal(t *testing.T, cfg e2esys.SystemConfig, l1Client *ethclie require.Equal(t, gameTypes.GameStatusDefenderWon, status, "game must have resolved with defender won") t.Logf("resolve was not needed, the game was already resolved") } - } - if e2eutils.UseFaultProofs() { t.Log("FinalizeWithdrawal: waiting for successful withdrawal check...") - err := wait.ForWithdrawalCheck(ctx, l1Client, wd, config.L1Deployments.OptimismPortalProxy, opts.From) + err = wait.ForWithdrawalCheck(ctx, l1Client, wd, l1Deployments.OptimismPortalProxy, opts.From) require.NoError(t, err) } else { t.Log("FinalizeWithdrawal: waiting for finalization...") - err := wait.ForFinalizationPeriod(ctx, l1Client, withdrawalProofReceipt.BlockNumber, config.L1Deployments.L2OutputOracleProxy) + err := wait.ForFinalizationPeriod(ctx, l1Client, withdrawalProofReceipt.BlockNumber, l1Deployments.L2OutputOracleProxy) require.NoError(t, err) } - portal, err := bindings.NewOptimismPortal(config.L1Deployments.OptimismPortalProxy, l1Client) + portal, err := bindings.NewOptimismPortal(l1Deployments.OptimismPortalProxy, l1Client) require.NoError(t, err) // Finalize withdrawal diff --git a/op-e2e/system/p2p/gossip_test.go b/op-e2e/system/p2p/gossip_test.go index 6958bdffcf4f..9204d217f89e 100644 --- a/op-e2e/system/p2p/gossip_test.go +++ b/op-e2e/system/p2p/gossip_test.go @@ -7,19 +7,20 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-node/p2p" + "github.com/ethereum-optimism/optimism/op-node/rollup/driver" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/log" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" rollupNode "github.com/ethereum-optimism/optimism/op-node/node" - "github.com/ethereum-optimism/optimism/op-node/p2p" - "github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/retry" - "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) diff --git a/op-e2e/system/proofs/proposer_fp_test.go b/op-e2e/system/proofs/proposer_fp_test.go index 4916d9d521a0..6be17baaaf25 100644 --- a/op-e2e/system/proofs/proposer_fp_test.go +++ b/op-e2e/system/proofs/proposer_fp_test.go @@ -8,6 +8,8 @@ import ( op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" "github.com/ethereum-optimism/optimism/op-e2e/bindings" @@ -20,9 +22,8 @@ import ( ) func TestL2OutputSubmitterFaultProofs(t *testing.T) { - op_e2e.InitParallel(t, op_e2e.SkipOnL2OO) - - cfg := e2esys.DefaultSystemConfig(t) + op_e2e.InitParallel(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(config.AllocTypeStandard)) cfg.NonFinalizedProposals = true // speed up the time till we see output proposals sys, err := cfg.Start(t) diff --git a/op-e2e/system/proofs/proposer_l2oo_test.go b/op-e2e/system/proofs/proposer_l2oo_test.go index 3b737e0971b5..ccc9da0701c0 100644 --- a/op-e2e/system/proofs/proposer_l2oo_test.go +++ b/op-e2e/system/proofs/proposer_l2oo_test.go @@ -7,6 +7,7 @@ import ( "time" op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" @@ -16,9 +17,9 @@ import ( ) func TestL2OutputSubmitter(t *testing.T) { - op_e2e.InitParallel(t, op_e2e.SkipOnFaultProofs) + op_e2e.InitParallel(t) - cfg := e2esys.DefaultSystemConfig(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(config.AllocTypeL2OO)) cfg.NonFinalizedProposals = true // speed up the time till we see output proposals sys, err := cfg.Start(t) From fed6f3546564af15e85bad0f15baac8303855602 Mon Sep 17 00:00:00 2001 From: smartcontracts Date: Tue, 1 Oct 2024 14:07:44 -0400 Subject: [PATCH 095/116] fix(ci): temporarily make semgrep scan ok on err (#12227) Temporarily makes semgrep-scan not fail on error. Currently this is a problem for external contributions because semgrep-scan loads rules from the online app which requires a login. External contributors don't get access to this login. --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3d8c25faa5cc..c4da7cb22ebb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1167,7 +1167,7 @@ jobs: # --timeout (in seconds) limits the time per rule and file. # SEMGREP_TIMEOUT is the same, but docs have conflicting defaults (5s in CLI flag, 1800 in some places) # https://semgrep.dev/docs/troubleshooting/semgrep-app#if-the-job-is-aborted-due-to-taking-too-long - command: semgrep ci --timeout=100 --no-suppress-errors + command: semgrep ci --timeout=100 # If semgrep hangs, stop the scan after 20m, to prevent a useless 5h job no_output_timeout: 20m - notify-failures-on-develop From a2653a38946e697ef4f1a76b62d9d77acd8a8cad Mon Sep 17 00:00:00 2001 From: Inphi Date: Tue, 1 Oct 2024 14:37:18 -0400 Subject: [PATCH 096/116] cannon: 64-bit Refactor (#12029) * cannon: 64-bit Refactor Refactor Cannon codebase to support both 32-bit and 64-bit MIPS emulation while reusing code as much as possible. * fix 64-bit test compilation errors * review comments * more review comments * fcntl syscall err for 64-bit * simplify pad check * cannon: sc must store lsb 32 on 64-bits * lint * fix sc 64-bit logic * add TODO state test --- .circleci/config.yml | 34 +- cannon/Makefile | 24 +- cannon/cmd/load_elf.go | 4 +- cannon/cmd/run.go | 20 +- cannon/main.go | 2 +- cannon/mipsevm/arch/arch32.go | 48 +++ cannon/mipsevm/arch/arch64.go | 48 +++ cannon/mipsevm/arch/byteorder.go | 7 + cannon/mipsevm/exec/memory.go | 12 +- cannon/mipsevm/exec/mips_instructions.go | 335 +++++++++++++----- cannon/mipsevm/exec/mips_syscalls.go | 85 +++-- cannon/mipsevm/exec/preimage.go | 16 +- cannon/mipsevm/exec/stack.go | 10 +- cannon/mipsevm/iface.go | 21 +- cannon/mipsevm/memory/memory.go | 140 +++++--- cannon/mipsevm/memory/memory_test.go | 4 +- cannon/mipsevm/memory/page.go | 2 +- cannon/mipsevm/memory/page_test.go | 6 +- cannon/mipsevm/multithreaded/instrumented.go | 7 +- .../multithreaded/instrumented_test.go | 1 - cannon/mipsevm/multithreaded/mips.go | 68 ++-- cannon/mipsevm/multithreaded/stack.go | 12 +- cannon/mipsevm/multithreaded/state.go | 76 ++-- cannon/mipsevm/multithreaded/state_test.go | 49 ++- .../multithreaded/testutil/expectations.go | 48 +-- .../testutil/expectations_test.go | 5 +- .../multithreaded/testutil/mutators.go | 21 +- .../mipsevm/multithreaded/testutil/thread.go | 11 +- cannon/mipsevm/multithreaded/thread.go | 59 +-- cannon/mipsevm/program/load.go | 16 +- cannon/mipsevm/program/metadata.go | 12 +- cannon/mipsevm/program/patch.go | 67 ++-- cannon/mipsevm/singlethreaded/instrumented.go | 8 +- cannon/mipsevm/singlethreaded/mips.go | 26 +- cannon/mipsevm/singlethreaded/state.go | 64 ++-- cannon/mipsevm/singlethreaded/state_test.go | 3 +- .../mipsevm/singlethreaded/testutil/state.go | 21 +- cannon/mipsevm/state.go | 10 +- cannon/mipsevm/tests/evm_common_test.go | 81 ++--- .../mipsevm/tests/evm_multithreaded_test.go | 135 +++---- .../mipsevm/tests/evm_singlethreaded_test.go | 31 +- cannon/mipsevm/tests/fuzz_evm_common_test.go | 39 +- .../tests/fuzz_evm_multithreaded_test.go | 4 +- cannon/mipsevm/testutil/mips.go | 7 +- cannon/mipsevm/testutil/rand.go | 19 +- cannon/mipsevm/testutil/state.go | 47 +-- cannon/mipsevm/testutil/vmtests.go | 7 +- cannon/mipsevm/versions/detect.go | 2 +- cannon/mipsevm/versions/detect_test.go | 3 + cannon/mipsevm/versions/state.go | 50 ++- cannon/mipsevm/versions/state_test.go | 4 + cannon/mipsevm/witness.go | 7 +- cannon/multicannon/exec.go | 5 +- cannon/multicannon/run.go | 1 - go.mod | 1 + go.sum | 2 + 56 files changed, 1181 insertions(+), 666 deletions(-) create mode 100644 cannon/mipsevm/arch/arch32.go create mode 100644 cannon/mipsevm/arch/arch64.go create mode 100644 cannon/mipsevm/arch/byteorder.go diff --git a/.circleci/config.yml b/.circleci/config.yml index c4da7cb22ebb..736d615bc058 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -162,6 +162,9 @@ jobs: description: Whether to notify on failure type: boolean default: false + mips64: + type: boolean + default: false resource_class: xlarge steps: - checkout @@ -184,14 +187,29 @@ jobs: command: | make lint working_directory: cannon - - run: - name: Cannon Go tests - command: | - export SKIP_SLOW_TESTS=<> - mkdir -p /testlogs - gotestsum --format=testname --junitfile=/tmp/test-results/cannon.xml --jsonfile=/testlogs/log.json \ - -- -parallel=8 -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage.out ./... - working_directory: cannon + - when: + condition: + not: <> + steps: + - run: + name: Cannon Go 32-bit tests + command: | + export SKIP_SLOW_TESTS=<> + mkdir -p /testlogs + gotestsum --format=testname --junitfile=/tmp/test-results/cannon.xml --jsonfile=/testlogs/log.json \ + -- -parallel=8 -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage.out ./... + working_directory: cannon + - when: + condition: <> + steps: + - run: + name: Cannon Go 64-bit tests + command: | + export SKIP_SLOW_TESTS=<> + mkdir -p /testlogs + gotestsum --format=testname --junitfile=/tmp/test-results/cannon.xml --jsonfile=/testlogs/log.json \ + -- --tags=cannon64 -parallel=8 -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage.out ./... + working_directory: cannon - run: name: upload Cannon coverage command: codecov --verbose --clean --flags cannon-go-tests diff --git a/cannon/Makefile b/cannon/Makefile index 408700613d3d..5376b1b62086 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -15,18 +15,25 @@ endif .DEFAULT_GOAL := cannon -cannon-impl: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon-impl . +cannon32-impl: + env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build --tags=cannon32 -v $(LDFLAGS) -o ./bin/cannon32-impl . -cannon-embeds: cannon-impl - @cp bin/cannon-impl ./multicannon/embeds/cannon-2 - @cp bin/cannon-impl ./multicannon/embeds/cannon-1 +cannon64-impl: + env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build --tags=cannon64 -v $(LDFLAGS) -o ./bin/cannon64-impl . + +cannon-embeds: cannon32-impl cannon64-impl + # singlethreaded-v2 + @cp bin/cannon32-impl ./multicannon/embeds/cannon-2 + # multithreaded + @cp bin/cannon32-impl ./multicannon/embeds/cannon-1 + # 64-bit multithreaded + @cp bin/cannon64-impl ./multicannon/embeds/cannon-3 cannon: cannon-embeds env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon ./multicannon/ clean: - rm -rf bin + rm -rf bin multicannon/embeds/cannon* elf: make -C ./testdata/example elf @@ -84,9 +91,10 @@ fuzz: go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallCloneMT ./mipsevm/tests .PHONY: \ - cannon \ - cannon-impl \ + cannon32-impl \ + cannon64-impl \ cannon-embeds \ + cannon \ clean \ test \ lint \ diff --git a/cannon/cmd/load_elf.go b/cannon/cmd/load_elf.go index c76626941e9b..7609a3b7091d 100644 --- a/cannon/cmd/load_elf.go +++ b/cannon/cmd/load_elf.go @@ -25,7 +25,7 @@ var ( } LoadELFPathFlag = &cli.PathFlag{ Name: "path", - Usage: "Path to 32-bit big-endian MIPS ELF file", + Usage: "Path to 32/64-bit big-endian MIPS ELF file", TakesFile: true, Required: true, } @@ -80,7 +80,7 @@ func LoadELF(ctx *cli.Context) error { } return program.PatchStack(state) } - case versions.VersionMultiThreaded: + case versions.VersionMultiThreaded, versions.VersionMultiThreaded64: createInitialState = func(f *elf.File) (mipsevm.FPVMState, error) { return program.LoadELF(f, multithreaded.CreateInitialState) } diff --git a/cannon/cmd/run.go b/cannon/cmd/run.go index 6d536eeeceac..b9854082c5a9 100644 --- a/cannon/cmd/run.go +++ b/cannon/cmd/run.go @@ -11,19 +11,19 @@ import ( "strings" "time" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" "github.com/ethereum-optimism/optimism/cannon/serialize" + preimage "github.com/ethereum-optimism/optimism/op-preimage" "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/pkg/profile" "github.com/urfave/cli/v2" - - "github.com/ethereum-optimism/optimism/cannon/mipsevm" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" - preimage "github.com/ethereum-optimism/optimism/op-preimage" - "github.com/ethereum-optimism/optimism/op-service/jsonutil" ) var ( @@ -128,7 +128,7 @@ type Proof struct { OracleKey hexutil.Bytes `json:"oracle-key,omitempty"` OracleValue hexutil.Bytes `json:"oracle-value,omitempty"` - OracleOffset uint32 `json:"oracle-offset,omitempty"` + OracleOffset arch.Word `json:"oracle-offset,omitempty"` } type rawHint string @@ -288,7 +288,7 @@ func Run(ctx *cli.Context) error { stopAtAnyPreimage := false var stopAtPreimageKeyPrefix []byte - stopAtPreimageOffset := uint32(0) + stopAtPreimageOffset := arch.Word(0) if ctx.IsSet(RunStopAtPreimageFlag.Name) { val := ctx.String(RunStopAtPreimageFlag.Name) parts := strings.Split(val, "@") @@ -297,11 +297,11 @@ func Run(ctx *cli.Context) error { } stopAtPreimageKeyPrefix = common.FromHex(parts[0]) if len(parts) == 2 { - x, err := strconv.ParseUint(parts[1], 10, 32) + x, err := strconv.ParseUint(parts[1], 10, arch.WordSizeBytes) if err != nil { return fmt.Errorf("invalid preimage offset: %w", err) } - stopAtPreimageOffset = uint32(x) + stopAtPreimageOffset = arch.Word(x) } } else { switch ctx.String(RunStopAtPreimageTypeFlag.Name) { @@ -463,7 +463,7 @@ func Run(ctx *cli.Context) error { } lastPreimageKey, lastPreimageValue, lastPreimageOffset := vm.LastPreimage() - if lastPreimageOffset != ^uint32(0) { + if lastPreimageOffset != ^arch.Word(0) { if stopAtAnyPreimage { l.Info("Stopping at preimage read") break diff --git a/cannon/main.go b/cannon/main.go index 176ce315708f..015aea6317f3 100644 --- a/cannon/main.go +++ b/cannon/main.go @@ -14,7 +14,7 @@ import ( func main() { app := cli.NewApp() - app.Name = "cannon" + app.Name = os.Args[0] app.Usage = "MIPS Fault Proof tool" app.Description = "MIPS Fault Proof tool" app.Commands = []*cli.Command{ diff --git a/cannon/mipsevm/arch/arch32.go b/cannon/mipsevm/arch/arch32.go new file mode 100644 index 000000000000..98a22c8382bc --- /dev/null +++ b/cannon/mipsevm/arch/arch32.go @@ -0,0 +1,48 @@ +//go:build !cannon64 +// +build !cannon64 + +package arch + +import "encoding/binary" + +type ( + // Word differs from the tradditional meaning in MIPS. The type represents the *maximum* architecture specific access length and value sizes. + Word = uint32 + // SignedInteger specifies the maximum signed integer type used for arithmetic. + SignedInteger = int32 +) + +const ( + IsMips32 = true + WordSize = 32 + WordSizeBytes = WordSize >> 3 + PageAddrSize = 12 + PageKeySize = WordSize - PageAddrSize + + MemProofLeafCount = 28 + MemProofSize = MemProofLeafCount * 32 + + AddressMask = 0xFFffFFfc + ExtMask = 0x3 + + HeapStart = 0x05_00_00_00 + HeapEnd = 0x60_00_00_00 + ProgramBreak = 0x40_00_00_00 + HighMemoryStart = 0x7f_ff_d0_00 +) + +var ByteOrderWord = byteOrder32{} + +type byteOrder32 struct{} + +func (bo byteOrder32) Word(b []byte) Word { + return binary.BigEndian.Uint32(b) +} + +func (bo byteOrder32) AppendWord(b []byte, v uint32) []byte { + return binary.BigEndian.AppendUint32(b, v) +} + +func (bo byteOrder32) PutWord(b []byte, v uint32) { + binary.BigEndian.PutUint32(b, v) +} diff --git a/cannon/mipsevm/arch/arch64.go b/cannon/mipsevm/arch/arch64.go new file mode 100644 index 000000000000..e01b44c50bab --- /dev/null +++ b/cannon/mipsevm/arch/arch64.go @@ -0,0 +1,48 @@ +//go:build cannon64 +// +build cannon64 + +package arch + +import "encoding/binary" + +type ( + // Word differs from the tradditional meaning in MIPS. The type represents the *maximum* architecture specific access length and value sizes + Word = uint64 + // SignedInteger specifies the maximum signed integer type used for arithmetic. + SignedInteger = int64 +) + +const ( + IsMips32 = false + WordSize = 64 + WordSizeBytes = WordSize >> 3 + PageAddrSize = 12 + PageKeySize = WordSize - PageAddrSize + + MemProofLeafCount = 60 + MemProofSize = MemProofLeafCount * 32 + + AddressMask = 0xFFFFFFFFFFFFFFF8 + ExtMask = 0x7 + + HeapStart = 0x10_00_00_00_00_00_00_00 + HeapEnd = 0x60_00_00_00_00_00_00_00 + ProgramBreak = 0x40_00_00_00_00_00_00_00 + HighMemoryStart = 0x7F_FF_FF_FF_D0_00_00_00 +) + +var ByteOrderWord = byteOrder64{} + +type byteOrder64 struct{} + +func (bo byteOrder64) Word(b []byte) Word { + return binary.BigEndian.Uint64(b) +} + +func (bo byteOrder64) AppendWord(b []byte, v uint64) []byte { + return binary.BigEndian.AppendUint64(b, v) +} + +func (bo byteOrder64) PutWord(b []byte, v uint64) { + binary.BigEndian.PutUint64(b, v) +} diff --git a/cannon/mipsevm/arch/byteorder.go b/cannon/mipsevm/arch/byteorder.go new file mode 100644 index 000000000000..a633d6858864 --- /dev/null +++ b/cannon/mipsevm/arch/byteorder.go @@ -0,0 +1,7 @@ +package arch + +type ByteOrder interface { + Word([]byte) Word + AppendWord([]byte, Word) []byte + PutWord([]byte, Word) +} diff --git a/cannon/mipsevm/exec/memory.go b/cannon/mipsevm/exec/memory.go index 3dea28dce29b..2a0afcbdea3f 100644 --- a/cannon/mipsevm/exec/memory.go +++ b/cannon/mipsevm/exec/memory.go @@ -7,12 +7,12 @@ import ( ) type MemTracker interface { - TrackMemAccess(addr uint32) + TrackMemAccess(addr Word) } type MemoryTrackerImpl struct { memory *memory.Memory - lastMemAccess uint32 + lastMemAccess Word memProofEnabled bool // proof of first unique memory access memProof [memory.MEM_PROOF_SIZE]byte @@ -24,9 +24,9 @@ func NewMemoryTracker(memory *memory.Memory) *MemoryTrackerImpl { return &MemoryTrackerImpl{memory: memory} } -func (m *MemoryTrackerImpl) TrackMemAccess(effAddr uint32) { +func (m *MemoryTrackerImpl) TrackMemAccess(effAddr Word) { if m.memProofEnabled && m.lastMemAccess != effAddr { - if m.lastMemAccess != ^uint32(0) { + if m.lastMemAccess != ^Word(0) { panic(fmt.Errorf("unexpected different mem access at %08x, already have access at %08x buffered", effAddr, m.lastMemAccess)) } m.lastMemAccess = effAddr @@ -36,7 +36,7 @@ func (m *MemoryTrackerImpl) TrackMemAccess(effAddr uint32) { // TrackMemAccess2 creates a proof for a memory access following a call to TrackMemAccess // This is used to generate proofs for contiguous memory accesses within the same step -func (m *MemoryTrackerImpl) TrackMemAccess2(effAddr uint32) { +func (m *MemoryTrackerImpl) TrackMemAccess2(effAddr Word) { if m.memProofEnabled && m.lastMemAccess+4 != effAddr { panic(fmt.Errorf("unexpected disjointed mem access at %08x, last memory access is at %08x buffered", effAddr, m.lastMemAccess)) } @@ -46,7 +46,7 @@ func (m *MemoryTrackerImpl) TrackMemAccess2(effAddr uint32) { func (m *MemoryTrackerImpl) Reset(enableProof bool) { m.memProofEnabled = enableProof - m.lastMemAccess = ^uint32(0) + m.lastMemAccess = ^Word(0) } func (m *MemoryTrackerImpl) MemProof() [memory.MEM_PROOF_SIZE]byte { diff --git a/cannon/mipsevm/exec/mips_instructions.go b/cannon/mipsevm/exec/mips_instructions.go index aec14192df93..326a3d4f504f 100644 --- a/cannon/mipsevm/exec/mips_instructions.go +++ b/cannon/mipsevm/exec/mips_instructions.go @@ -1,16 +1,24 @@ package exec import ( + "fmt" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" + + // TODO(#12205): MIPS64 port. Replace with a custom library + u128 "lukechampine.com/uint128" ) const ( - OpLoadLinked = 0x30 - OpStoreConditional = 0x38 + OpLoadLinked = 0x30 + OpStoreConditional = 0x38 + OpLoadLinked64 = 0x34 + OpStoreConditional64 = 0x3c ) -func GetInstructionDetails(pc uint32, memory *memory.Memory) (insn, opcode, fun uint32) { +func GetInstructionDetails(pc Word, memory *memory.Memory) (insn, opcode, fun uint32) { insn = memory.GetMemory(pc) opcode = insn >> 26 // First 6-bits fun = insn & 0x3f // Last 6-bits @@ -18,47 +26,53 @@ func GetInstructionDetails(pc uint32, memory *memory.Memory) (insn, opcode, fun return insn, opcode, fun } -func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]uint32, memory *memory.Memory, insn, opcode, fun uint32, memTracker MemTracker, stackTracker StackTracker) (memUpdated bool, memAddr uint32, err error) { +func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]Word, memory *memory.Memory, insn, opcode, fun uint32, memTracker MemTracker, stackTracker StackTracker) (memUpdated bool, memAddr Word, err error) { // j-type j/jal if opcode == 2 || opcode == 3 { - linkReg := uint32(0) + linkReg := Word(0) if opcode == 3 { linkReg = 31 } - // Take top 4 bits of the next PC (its 256 MB region), and concatenate with the 26-bit offset - target := (cpu.NextPC & 0xF0000000) | ((insn & 0x03FFFFFF) << 2) + // Take the top bits of the next PC (its 256 MB region), and concatenate with the 26-bit offset + target := (cpu.NextPC & SignExtend(0xF0000000, 32)) | Word((insn&0x03FFFFFF)<<2) stackTracker.PushStack(cpu.PC, target) err = HandleJump(cpu, registers, linkReg, target) return } // register fetch - rs := uint32(0) // source register 1 value - rt := uint32(0) // source register 2 / temp value - rtReg := (insn >> 16) & 0x1F + rs := Word(0) // source register 1 value + rt := Word(0) // source register 2 / temp value + rtReg := Word((insn >> 16) & 0x1F) // R-type or I-type (stores rt) rs = registers[(insn>>21)&0x1F] rdReg := rtReg - if opcode == 0 || opcode == 0x1c { + if opcode == 0x27 || opcode == 0x1A || opcode == 0x1B { // 64-bit opcodes lwu, ldl, ldr + assertMips64(insn) + // store rt value with store + rt = registers[rtReg] + // store actual rt with lwu, ldl and ldr + rdReg = rtReg + } else if opcode == 0 || opcode == 0x1c { // R-type (stores rd) rt = registers[rtReg] - rdReg = (insn >> 11) & 0x1F + rdReg = Word((insn >> 11) & 0x1F) } else if opcode < 0x20 { // rt is SignExtImm // don't sign extend for andi, ori, xori if opcode == 0xC || opcode == 0xD || opcode == 0xe { // ZeroExtImm - rt = insn & 0xFFFF + rt = Word(insn & 0xFFFF) } else { // SignExtImm - rt = SignExtend(insn&0xFFFF, 16) + rt = SignExtendImmediate(insn) } } else if opcode >= 0x28 || opcode == 0x22 || opcode == 0x26 { // store rt value with store rt = registers[rtReg] - // store actual rt with lwl and lwr + // store actual rt with lwl, ldl, and lwr rdReg = rtReg } @@ -67,30 +81,39 @@ func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]uint32, memor return } - storeAddr := uint32(0xFF_FF_FF_FF) + storeAddr := ^Word(0) // memory fetch (all I-type) // we do the load for stores also - mem := uint32(0) + mem := Word(0) if opcode >= 0x20 { // M[R[rs]+SignExtImm] - rs += SignExtend(insn&0xFFFF, 16) - addr := rs & 0xFFFFFFFC + rs += SignExtendImmediate(insn) + addr := rs & arch.AddressMask memTracker.TrackMemAccess(addr) - mem = memory.GetMemory(addr) + mem = memory.GetWord(addr) if opcode >= 0x28 { - // store - storeAddr = addr - // store opcodes don't write back to a register - rdReg = 0 + // store for 32-bit + // for 64-bit: ld (0x37) is the only non-store opcode >= 0x28 + // SAFETY: On 32-bit mode, 0x37 will be considered an invalid opcode by ExecuteMipsInstruction + if arch.IsMips32 || opcode != 0x37 { + // store + storeAddr = addr + // store opcodes don't write back to a register + rdReg = 0 + } } } // ALU val := ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem) - if opcode == 0 && fun >= 8 && fun < 0x1c { + funSel := uint32(0x1c) + if !arch.IsMips32 { + funSel = 0x20 + } + if opcode == 0 && fun >= 8 && fun < funSel { if fun == 8 || fun == 9 { // jr/jalr - linkReg := uint32(0) + linkReg := Word(0) if fun == 9 { linkReg = rdReg stackTracker.PushStack(cpu.PC, rs) @@ -112,16 +135,16 @@ func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]uint32, memor // lo and hi registers // can write back - if fun >= 0x10 && fun < 0x1c { + if fun >= 0x10 && fun < funSel { err = HandleHiLo(cpu, registers, fun, rs, rt, rdReg) return } } // write memory - if storeAddr != 0xFF_FF_FF_FF { + if storeAddr != ^Word(0) { memTracker.TrackMemAccess(storeAddr) - memory.SetMemory(storeAddr, val) + memory.SetWord(storeAddr, val) memUpdated = true memAddr = storeAddr } @@ -131,12 +154,24 @@ func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]uint32, memor return } -func SignExtendImmediate(insn uint32) uint32 { - return SignExtend(insn&0xFFFF, 16) +func SignExtendImmediate(insn uint32) Word { + return SignExtend(Word(insn&0xFFFF), 16) +} + +func assertMips64(insn uint32) { + if arch.IsMips32 { + panic(fmt.Sprintf("invalid instruction: %x", insn)) + } +} + +func assertMips64Fun(fun uint32) { + if arch.IsMips32 { + panic(fmt.Sprintf("invalid instruction func: %x", fun)) + } } -func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { - if opcode == 0 || (opcode >= 8 && opcode < 0xF) { +func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem Word) Word { + if opcode == 0 || (opcode >= 8 && opcode < 0xF) || (!arch.IsMips32 && (opcode == 0x18 || opcode == 0x19)) { // transform ArithLogI to SPECIAL switch opcode { case 8: @@ -153,24 +188,28 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { fun = 0x25 // ori case 0xE: fun = 0x26 // xori + case 0x18: + fun = 0x2c // daddi + case 0x19: + fun = 0x2d // daddiu } switch fun { case 0x00: // sll - return rt << ((insn >> 6) & 0x1F) + return SignExtend((rt&0xFFFFFFFF)<<((insn>>6)&0x1F), 32) case 0x02: // srl - return rt >> ((insn >> 6) & 0x1F) + return SignExtend((rt&0xFFFFFFFF)>>((insn>>6)&0x1F), 32) case 0x03: // sra - shamt := (insn >> 6) & 0x1F - return SignExtend(rt>>shamt, 32-shamt) + shamt := Word((insn >> 6) & 0x1F) + return SignExtend((rt&0xFFFFFFFF)>>shamt, 32-shamt) case 0x04: // sllv - return rt << (rs & 0x1F) + return SignExtend((rt&0xFFFFFFFF)<<(rs&0x1F), 32) case 0x06: // srlv - return rt >> (rs & 0x1F) + return SignExtend((rt&0xFFFFFFFF)>>(rs&0x1F), 32) case 0x07: // srav - shamt := rs & 0x1F - return SignExtend(rt>>shamt, 32-shamt) - // functs in range [0x8, 0x1b] are handled specially by other functions + shamt := Word(rs & 0x1F) + return SignExtend((rt&0xFFFFFFFF)>>shamt, 32-shamt) + // functs in range [0x8, 0x1b] for 32-bit and [0x8, 0x1f] for 64-bit are handled specially by other functions case 0x08: // jr return rs case 0x09: // jalr @@ -192,6 +231,15 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { return rs case 0x13: // mtlo return rs + case 0x14: // dsllv + assertMips64(insn) + return rt + case 0x16: // dsrlv + assertMips64(insn) + return rt + case 0x17: // dsrav + assertMips64(insn) + return rt case 0x18: // mult return rs case 0x19: // multu @@ -200,15 +248,27 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { return rs case 0x1b: // divu return rs + case 0x1C: // dmult + assertMips64(insn) + return rs + case 0x1D: // dmultu + assertMips64(insn) + return rs + case 0x1E: // ddiv + assertMips64(insn) + return rs + case 0x1F: // ddivu + assertMips64(insn) + return rs // The rest includes transformed R-type arith imm instructions case 0x20: // add - return rs + rt + return SignExtend(Word(int32(rs)+int32(rt)), 32) case 0x21: // addu - return rs + rt + return SignExtend(Word(uint32(rs)+uint32(rt)), 32) case 0x22: // sub - return rs - rt + return SignExtend(Word(int32(rs)-int32(rt)), 32) case 0x23: // subu - return rs - rt + return SignExtend(Word(uint32(rs)-uint32(rt)), 32) case 0x24: // and return rs & rt case 0x25: // or @@ -218,7 +278,7 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { case 0x27: // nor return ^(rs | rt) case 0x2a: // slti - if int32(rs) < int32(rt) { + if arch.SignedInteger(rs) < arch.SignedInteger(rt) { return 1 } return 0 @@ -227,8 +287,38 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { return 1 } return 0 + case 0x2c: // dadd + assertMips64(insn) + return rs + rt + case 0x2d: // daddu + assertMips64(insn) + return rs + rt + case 0x2e: // dsub + assertMips64(insn) + return rs - rt + case 0x2f: // dsubu + assertMips64(insn) + return rs - rt + case 0x38: // dsll + assertMips64(insn) + return rt << ((insn >> 6) & 0x1f) + case 0x3A: // dsrl + assertMips64(insn) + return rt >> ((insn >> 6) & 0x1f) + case 0x3B: // dsra + assertMips64(insn) + return Word(int64(rt) >> ((insn >> 6) & 0x1f)) + case 0x3C: // dsll32 + assertMips64(insn) + return rt << (((insn >> 6) & 0x1f) + 32) + case 0x3E: // dsll32 + assertMips64(insn) + return rt >> (((insn >> 6) & 0x1f) + 32) + case 0x3F: // dsll32 + assertMips64(insn) + return Word(int64(rt) >> (((insn >> 6) & 0x1f) + 32)) default: - panic("invalid instruction") + panic(fmt.Sprintf("invalid instruction: %x", insn)) } } else { switch opcode { @@ -236,7 +326,7 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { case 0x1C: switch fun { case 0x2: // mul - return uint32(int32(rs) * int32(rt)) + return SignExtend(Word(int32(rs)*int32(rt)), 32) case 0x20, 0x21: // clz, clo if fun == 0x20 { rs = ^rs @@ -245,45 +335,98 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { for ; rs&0x80000000 != 0; i++ { rs <<= 1 } - return i + return Word(i) } case 0x0F: // lui - return rt << 16 + return SignExtend(rt<<16, 32) case 0x20: // lb - return SignExtend((mem>>(24-(rs&3)*8))&0xFF, 8) + msb := uint32(arch.WordSize - 8) // 24 for 32-bit and 56 for 64-bit + return SignExtend((mem>>(msb-uint32(rs&arch.ExtMask)*8))&0xFF, 8) case 0x21: // lh - return SignExtend((mem>>(16-(rs&2)*8))&0xFFFF, 16) + msb := uint32(arch.WordSize - 16) // 16 for 32-bit and 48 for 64-bit + mask := Word(arch.ExtMask - 1) + return SignExtend((mem>>(msb-uint32(rs&mask)*8))&0xFFFF, 16) case 0x22: // lwl val := mem << ((rs & 3) * 8) - mask := uint32(0xFFFFFFFF) << ((rs & 3) * 8) - return (rt & ^mask) | val + mask := Word(uint32(0xFFFFFFFF) << ((rs & 3) * 8)) + return SignExtend(((rt & ^mask)|val)&0xFFFFFFFF, 32) case 0x23: // lw + // TODO(#12205): port to MIPS64 return mem + //return SignExtend((mem>>(32-((rs&0x4)<<3)))&0xFFFFFFFF, 32) case 0x24: // lbu - return (mem >> (24 - (rs&3)*8)) & 0xFF + msb := uint32(arch.WordSize - 8) // 24 for 32-bit and 56 for 64-bit + return (mem >> (msb - uint32(rs&arch.ExtMask)*8)) & 0xFF case 0x25: // lhu - return (mem >> (16 - (rs&2)*8)) & 0xFFFF + msb := uint32(arch.WordSize - 16) // 16 for 32-bit and 48 for 64-bit + mask := Word(arch.ExtMask - 1) + return (mem >> (msb - uint32(rs&mask)*8)) & 0xFFFF case 0x26: // lwr val := mem >> (24 - (rs&3)*8) - mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8) - return (rt & ^mask) | val + mask := Word(uint32(0xFFFFFFFF) >> (24 - (rs&3)*8)) + return SignExtend(((rt & ^mask)|val)&0xFFFFFFFF, 32) case 0x28: // sb - val := (rt & 0xFF) << (24 - (rs&3)*8) - mask := 0xFFFFFFFF ^ uint32(0xFF<<(24-(rs&3)*8)) + msb := uint32(arch.WordSize - 8) // 24 for 32-bit and 56 for 64-bit + val := (rt & 0xFF) << (msb - uint32(rs&arch.ExtMask)*8) + mask := ^Word(0) ^ Word(0xFF<<(msb-uint32(rs&arch.ExtMask)*8)) return (mem & mask) | val case 0x29: // sh - val := (rt & 0xFFFF) << (16 - (rs&2)*8) - mask := 0xFFFFFFFF ^ uint32(0xFFFF<<(16-(rs&2)*8)) + msb := uint32(arch.WordSize - 16) // 16 for 32-bit and 48 for 64-bit + rsMask := Word(arch.ExtMask - 1) // 2 for 32-bit and 6 for 64-bit + sl := msb - uint32(rs&rsMask)*8 + val := (rt & 0xFFFF) << sl + mask := ^Word(0) ^ Word(0xFFFF<> ((rs & 3) * 8) mask := uint32(0xFFFFFFFF) >> ((rs & 3) * 8) - return (mem & ^mask) | val + return (mem & Word(^mask)) | val case 0x2b: // sw + // TODO(#12205): port to MIPS64 return rt case 0x2e: // swr + // TODO(#12205): port to MIPS64 val := rt << (24 - (rs&3)*8) mask := uint32(0xFFFFFFFF) << (24 - (rs&3)*8) + return (mem & Word(^mask)) | val + + // MIPS64 + case 0x1A: // ldl + assertMips64(insn) + sl := (rs & 0x7) << 3 + val := mem << sl + mask := ^Word(0) << sl + return val | (rt & ^mask) + case 0x1B: // ldr + assertMips64(insn) + sr := 56 - ((rs & 0x7) << 3) + val := mem >> sr + mask := ^Word(0) << (64 - sr) + return val | (rt & mask) + case 0x27: // lwu + assertMips64(insn) + return (mem >> (32 - ((rs & 0x4) << 3))) & 0xFFFFFFFF + case 0x2C: // sdl + assertMips64(insn) + sr := (rs & 0x7) << 3 + val := rt >> sr + mask := ^Word(0) >> sr + return val | (mem & ^mask) + case 0x2D: // sdr + assertMips64(insn) + sl := 56 - ((rs & 0x7) << 3) + val := rt << sl + mask := ^Word(0) << sl + return val | (mem & ^mask) + case 0x37: // ld + assertMips64(insn) + return mem + case 0x3F: // sd + assertMips64(insn) + sl := (rs & 0x7) << 3 + val := rt << sl + mask := ^Word(0) << sl return (mem & ^mask) | val default: panic("invalid instruction") @@ -292,10 +435,10 @@ func ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem uint32) uint32 { panic("invalid instruction") } -func SignExtend(dat uint32, idx uint32) uint32 { +func SignExtend(dat Word, idx Word) Word { isSigned := (dat >> (idx - 1)) != 0 - signed := ((uint32(1) << (32 - idx)) - 1) << idx - mask := (uint32(1) << idx) - 1 + signed := ((Word(1) << (arch.WordSize - idx)) - 1) << idx + mask := (Word(1) << idx) - 1 if isSigned { return dat&mask | signed } else { @@ -303,7 +446,7 @@ func SignExtend(dat uint32, idx uint32) uint32 { } } -func HandleBranch(cpu *mipsevm.CpuScalars, registers *[32]uint32, opcode uint32, insn uint32, rtReg uint32, rs uint32) error { +func HandleBranch(cpu *mipsevm.CpuScalars, registers *[32]Word, opcode uint32, insn uint32, rtReg Word, rs Word) error { if cpu.NextPC != cpu.PC+4 { panic("branch in delay slot") } @@ -313,9 +456,9 @@ func HandleBranch(cpu *mipsevm.CpuScalars, registers *[32]uint32, opcode uint32, rt := registers[rtReg] shouldBranch = (rs == rt && opcode == 4) || (rs != rt && opcode == 5) } else if opcode == 6 { - shouldBranch = int32(rs) <= 0 // blez + shouldBranch = arch.SignedInteger(rs) <= 0 // blez } else if opcode == 7 { - shouldBranch = int32(rs) > 0 // bgtz + shouldBranch = arch.SignedInteger(rs) > 0 // bgtz } else if opcode == 1 { // regimm rtv := (insn >> 16) & 0x1F @@ -330,15 +473,15 @@ func HandleBranch(cpu *mipsevm.CpuScalars, registers *[32]uint32, opcode uint32, prevPC := cpu.PC cpu.PC = cpu.NextPC // execute the delay slot first if shouldBranch { - cpu.NextPC = prevPC + 4 + (SignExtend(insn&0xFFFF, 16) << 2) // then continue with the instruction the branch jumps to. + cpu.NextPC = prevPC + 4 + (SignExtend(Word(insn&0xFFFF), 16) << 2) // then continue with the instruction the branch jumps to. } else { cpu.NextPC = cpu.NextPC + 4 // branch not taken } return nil } -func HandleHiLo(cpu *mipsevm.CpuScalars, registers *[32]uint32, fun uint32, rs uint32, rt uint32, storeReg uint32) error { - val := uint32(0) +func HandleHiLo(cpu *mipsevm.CpuScalars, registers *[32]Word, fun uint32, rs Word, rt Word, storeReg Word) error { + val := Word(0) switch fun { case 0x10: // mfhi val = cpu.HI @@ -350,16 +493,44 @@ func HandleHiLo(cpu *mipsevm.CpuScalars, registers *[32]uint32, fun uint32, rs u cpu.LO = rs case 0x18: // mult acc := uint64(int64(int32(rs)) * int64(int32(rt))) - cpu.HI = uint32(acc >> 32) - cpu.LO = uint32(acc) + cpu.HI = SignExtend(Word(acc>>32), 32) + cpu.LO = SignExtend(Word(uint32(acc)), 32) case 0x19: // multu - acc := uint64(uint64(rs) * uint64(rt)) - cpu.HI = uint32(acc >> 32) - cpu.LO = uint32(acc) + acc := uint64(uint32(rs)) * uint64(uint32(rt)) + cpu.HI = SignExtend(Word(acc>>32), 32) + cpu.LO = SignExtend(Word(uint32(acc)), 32) case 0x1a: // div - cpu.HI = uint32(int32(rs) % int32(rt)) - cpu.LO = uint32(int32(rs) / int32(rt)) + cpu.HI = SignExtend(Word(int32(rs)%int32(rt)), 32) + cpu.LO = SignExtend(Word(int32(rs)/int32(rt)), 32) case 0x1b: // divu + cpu.HI = SignExtend(Word(uint32(rs)%uint32(rt)), 32) + cpu.LO = SignExtend(Word(uint32(rs)/uint32(rt)), 32) + case 0x14: // dsllv + assertMips64Fun(fun) + val = rt << (rs & 0x3F) + case 0x16: // dsrlv + assertMips64Fun(fun) + val = rt >> (rs & 0x3F) + case 0x17: // dsrav + assertMips64Fun(fun) + val = Word(int64(rt) >> (rs & 0x3F)) + case 0x1c: // dmult + // TODO(#12205): port to MIPS64. Is signed multiply needed for dmult + assertMips64Fun(fun) + acc := u128.From64(uint64(rs)).Mul(u128.From64(uint64(rt))) + cpu.HI = Word(acc.Hi) + cpu.LO = Word(acc.Lo) + case 0x1d: // dmultu + assertMips64Fun(fun) + acc := u128.From64(uint64(rs)).Mul(u128.From64(uint64(rt))) + cpu.HI = Word(acc.Hi) + cpu.LO = Word(acc.Lo) + case 0x1e: // ddiv + assertMips64Fun(fun) + cpu.HI = Word(int64(rs) % int64(rt)) + cpu.LO = Word(int64(rs) / int64(rt)) + case 0x1f: // ddivu + assertMips64Fun(fun) cpu.HI = rs % rt cpu.LO = rs / rt } @@ -373,7 +544,7 @@ func HandleHiLo(cpu *mipsevm.CpuScalars, registers *[32]uint32, fun uint32, rs u return nil } -func HandleJump(cpu *mipsevm.CpuScalars, registers *[32]uint32, linkReg uint32, dest uint32) error { +func HandleJump(cpu *mipsevm.CpuScalars, registers *[32]Word, linkReg Word, dest Word) error { if cpu.NextPC != cpu.PC+4 { panic("jump in delay slot") } @@ -386,7 +557,7 @@ func HandleJump(cpu *mipsevm.CpuScalars, registers *[32]uint32, linkReg uint32, return nil } -func HandleRd(cpu *mipsevm.CpuScalars, registers *[32]uint32, storeReg uint32, val uint32, conditional bool) error { +func HandleRd(cpu *mipsevm.CpuScalars, registers *[32]Word, storeReg Word, val Word, conditional bool) error { if storeReg >= 32 { panic("invalid register") } diff --git a/cannon/mipsevm/exec/mips_syscalls.go b/cannon/mipsevm/exec/mips_syscalls.go index 57df29d760ac..abb186266b71 100644 --- a/cannon/mipsevm/exec/mips_syscalls.go +++ b/cannon/mipsevm/exec/mips_syscalls.go @@ -8,10 +8,18 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" ) +type Word = arch.Word + +const ( + AddressMask = arch.AddressMask +) + +// TODO(#12205): redefine syscalls for MIPS64 // Syscall codes const ( SysMmap = 4090 @@ -79,7 +87,7 @@ const ( // Errors const ( - SysErrorSignal = ^uint32(0) + SysErrorSignal = ^Word(0) MipsEBADF = 0x9 MipsEINVAL = 0x16 MipsEAGAIN = 0xb @@ -92,7 +100,7 @@ const ( FutexWakePrivate = 129 FutexTimeoutSteps = 10_000 FutexNoTimeout = ^uint64(0) - FutexEmptyAddr = ^uint32(0) + FutexEmptyAddr = ^Word(0) ) // SysClone flags @@ -145,7 +153,7 @@ const ( ClockGettimeMonotonicFlag = 1 ) -func GetSyscallArgs(registers *[32]uint32) (syscallNum, a0, a1, a2, a3 uint32) { +func GetSyscallArgs(registers *[32]Word) (syscallNum, a0, a1, a2, a3 Word) { syscallNum = registers[2] // v0 a0 = registers[4] @@ -156,8 +164,8 @@ func GetSyscallArgs(registers *[32]uint32) (syscallNum, a0, a1, a2, a3 uint32) { return syscallNum, a0, a1, a2, a3 } -func HandleSysMmap(a0, a1, heap uint32) (v0, v1, newHeap uint32) { - v1 = uint32(0) +func HandleSysMmap(a0, a1, heap Word) (v0, v1, newHeap Word) { + v1 = Word(0) newHeap = heap sz := a1 @@ -182,34 +190,41 @@ func HandleSysMmap(a0, a1, heap uint32) (v0, v1, newHeap uint32) { return v0, v1, newHeap } -func HandleSysRead(a0, a1, a2 uint32, preimageKey [32]byte, preimageOffset uint32, preimageReader PreimageReader, memory *memory.Memory, memTracker MemTracker) (v0, v1, newPreimageOffset uint32, memUpdated bool, memAddr uint32) { +func HandleSysRead( + a0, a1, a2 Word, + preimageKey [32]byte, + preimageOffset Word, + preimageReader PreimageReader, + memory *memory.Memory, + memTracker MemTracker, +) (v0, v1, newPreimageOffset Word, memUpdated bool, memAddr Word) { // args: a0 = fd, a1 = addr, a2 = count // returns: v0 = read, v1 = err code - v0 = uint32(0) - v1 = uint32(0) + v0 = Word(0) + v1 = Word(0) newPreimageOffset = preimageOffset switch a0 { case FdStdin: // leave v0 and v1 zero: read nothing, no error case FdPreimageRead: // pre-image oracle - effAddr := a1 & 0xFFffFFfc + effAddr := a1 & AddressMask memTracker.TrackMemAccess(effAddr) - mem := memory.GetMemory(effAddr) + mem := memory.GetWord(effAddr) dat, datLen := preimageReader.ReadPreimage(preimageKey, preimageOffset) //fmt.Printf("reading pre-image data: addr: %08x, offset: %d, datLen: %d, data: %x, key: %s count: %d\n", a1, preimageOffset, datLen, dat[:datLen], preimageKey, a2) - alignment := a1 & 3 - space := 4 - alignment + alignment := a1 & arch.ExtMask + space := arch.WordSizeBytes - alignment if space < datLen { datLen = space } if a2 < datLen { datLen = a2 } - var outMem [4]byte - binary.BigEndian.PutUint32(outMem[:], mem) + var outMem [arch.WordSizeBytes]byte + arch.ByteOrderWord.PutWord(outMem[:], mem) copy(outMem[alignment:], dat[:datLen]) - memory.SetMemory(effAddr, binary.BigEndian.Uint32(outMem[:])) + memory.SetWord(effAddr, arch.ByteOrderWord.Word(outMem[:])) memUpdated = true memAddr = effAddr newPreimageOffset += datLen @@ -219,17 +234,25 @@ func HandleSysRead(a0, a1, a2 uint32, preimageKey [32]byte, preimageOffset uint3 // don't actually read into memory, just say we read it all, we ignore the result anyway v0 = a2 default: - v0 = 0xFFffFFff + v0 = ^Word(0) v1 = MipsEBADF } return v0, v1, newPreimageOffset, memUpdated, memAddr } -func HandleSysWrite(a0, a1, a2 uint32, lastHint hexutil.Bytes, preimageKey [32]byte, preimageOffset uint32, oracle mipsevm.PreimageOracle, memory *memory.Memory, memTracker MemTracker, stdOut, stdErr io.Writer) (v0, v1 uint32, newLastHint hexutil.Bytes, newPreimageKey common.Hash, newPreimageOffset uint32) { +func HandleSysWrite(a0, a1, a2 Word, + lastHint hexutil.Bytes, + preimageKey [32]byte, + preimageOffset Word, + oracle mipsevm.PreimageOracle, + memory *memory.Memory, + memTracker MemTracker, + stdOut, stdErr io.Writer, +) (v0, v1 Word, newLastHint hexutil.Bytes, newPreimageKey common.Hash, newPreimageOffset Word) { // args: a0 = fd, a1 = addr, a2 = count // returns: v0 = written, v1 = err code - v1 = uint32(0) + v1 = Word(0) newLastHint = lastHint newPreimageKey = preimageKey newPreimageOffset = preimageOffset @@ -257,41 +280,41 @@ func HandleSysWrite(a0, a1, a2 uint32, lastHint hexutil.Bytes, preimageKey [32]b newLastHint = lastHint v0 = a2 case FdPreimageWrite: - effAddr := a1 & 0xFFffFFfc + effAddr := a1 & arch.AddressMask memTracker.TrackMemAccess(effAddr) - mem := memory.GetMemory(effAddr) + mem := memory.GetWord(effAddr) key := preimageKey - alignment := a1 & 3 - space := 4 - alignment + alignment := a1 & arch.ExtMask + space := arch.WordSizeBytes - alignment if space < a2 { a2 = space } copy(key[:], key[a2:]) - var tmp [4]byte - binary.BigEndian.PutUint32(tmp[:], mem) + var tmp [arch.WordSizeBytes]byte + arch.ByteOrderWord.PutWord(tmp[:], mem) copy(key[32-a2:], tmp[alignment:]) newPreimageKey = key newPreimageOffset = 0 //fmt.Printf("updating pre-image key: %s\n", m.state.PreimageKey) v0 = a2 default: - v0 = 0xFFffFFff + v0 = ^Word(0) v1 = MipsEBADF } return v0, v1, newLastHint, newPreimageKey, newPreimageOffset } -func HandleSysFcntl(a0, a1 uint32) (v0, v1 uint32) { +func HandleSysFcntl(a0, a1 Word) (v0, v1 Word) { // args: a0 = fd, a1 = cmd - v1 = uint32(0) + v1 = Word(0) if a1 == 1 { // F_GETFD: get file descriptor flags switch a0 { case FdStdin, FdStdout, FdStderr, FdPreimageRead, FdHintRead, FdPreimageWrite, FdHintWrite: v0 = 0 // No flags set default: - v0 = 0xFFffFFff + v0 = ^Word(0) v1 = MipsEBADF } } else if a1 == 3 { // F_GETFL: get file status flags @@ -301,18 +324,18 @@ func HandleSysFcntl(a0, a1 uint32) (v0, v1 uint32) { case FdStdout, FdStderr, FdPreimageWrite, FdHintWrite: v0 = 1 // O_WRONLY default: - v0 = 0xFFffFFff + v0 = ^Word(0) v1 = MipsEBADF } } else { - v0 = 0xFFffFFff + v0 = ^Word(0) v1 = MipsEINVAL // cmd not recognized by this kernel } return v0, v1 } -func HandleSyscallUpdates(cpu *mipsevm.CpuScalars, registers *[32]uint32, v0, v1 uint32) { +func HandleSyscallUpdates(cpu *mipsevm.CpuScalars, registers *[32]Word, v0, v1 Word) { registers[2] = v0 registers[7] = v1 diff --git a/cannon/mipsevm/exec/preimage.go b/cannon/mipsevm/exec/preimage.go index 15c1f98e9530..17f6d4e0f6f2 100644 --- a/cannon/mipsevm/exec/preimage.go +++ b/cannon/mipsevm/exec/preimage.go @@ -7,7 +7,7 @@ import ( ) type PreimageReader interface { - ReadPreimage(key [32]byte, offset uint32) (dat [32]byte, datLen uint32) + ReadPreimage(key [32]byte, offset Word) (dat [32]byte, datLen Word) } // TrackingPreimageOracleReader wraps around a PreimageOracle, implements the PreimageOracle interface, and adds tracking functionality. @@ -22,8 +22,8 @@ type TrackingPreimageOracleReader struct { lastPreimage []byte // key for above preimage lastPreimageKey [32]byte - // offset we last read from, or max uint32 if nothing is read this step - lastPreimageOffset uint32 + // offset we last read from, or max Word if nothing is read this step + lastPreimageOffset Word } func NewTrackingPreimageOracleReader(po mipsevm.PreimageOracle) *TrackingPreimageOracleReader { @@ -31,7 +31,7 @@ func NewTrackingPreimageOracleReader(po mipsevm.PreimageOracle) *TrackingPreimag } func (p *TrackingPreimageOracleReader) Reset() { - p.lastPreimageOffset = ^uint32(0) + p.lastPreimageOffset = ^Word(0) } func (p *TrackingPreimageOracleReader) Hint(v []byte) { @@ -45,7 +45,7 @@ func (p *TrackingPreimageOracleReader) GetPreimage(k [32]byte) []byte { return preimage } -func (p *TrackingPreimageOracleReader) ReadPreimage(key [32]byte, offset uint32) (dat [32]byte, datLen uint32) { +func (p *TrackingPreimageOracleReader) ReadPreimage(key [32]byte, offset Word) (dat [32]byte, datLen Word) { preimage := p.lastPreimage if key != p.lastPreimageKey { p.lastPreimageKey = key @@ -57,14 +57,14 @@ func (p *TrackingPreimageOracleReader) ReadPreimage(key [32]byte, offset uint32) p.lastPreimage = preimage } p.lastPreimageOffset = offset - if offset >= uint32(len(preimage)) { + if offset >= Word(len(preimage)) { panic("Preimage offset out-of-bounds") } - datLen = uint32(copy(dat[:], preimage[offset:])) + datLen = Word(copy(dat[:], preimage[offset:])) return } -func (p *TrackingPreimageOracleReader) LastPreimage() ([32]byte, []byte, uint32) { +func (p *TrackingPreimageOracleReader) LastPreimage() ([32]byte, []byte, Word) { return p.lastPreimageKey, p.lastPreimage, p.lastPreimageOffset } diff --git a/cannon/mipsevm/exec/stack.go b/cannon/mipsevm/exec/stack.go index 06e919c0352f..5f96afe0416b 100644 --- a/cannon/mipsevm/exec/stack.go +++ b/cannon/mipsevm/exec/stack.go @@ -8,7 +8,7 @@ import ( ) type StackTracker interface { - PushStack(caller uint32, target uint32) + PushStack(caller Word, target Word) PopStack() } @@ -19,7 +19,7 @@ type TraceableStackTracker interface { type NoopStackTracker struct{} -func (n *NoopStackTracker) PushStack(caller uint32, target uint32) {} +func (n *NoopStackTracker) PushStack(caller Word, target Word) {} func (n *NoopStackTracker) PopStack() {} @@ -28,8 +28,8 @@ func (n *NoopStackTracker) Traceback() {} type StackTrackerImpl struct { state mipsevm.FPVMState - stack []uint32 - caller []uint32 + stack []Word + caller []Word meta mipsevm.Metadata } @@ -45,7 +45,7 @@ func NewStackTrackerUnsafe(state mipsevm.FPVMState, meta mipsevm.Metadata) *Stac return &StackTrackerImpl{state: state, meta: meta} } -func (s *StackTrackerImpl) PushStack(caller uint32, target uint32) { +func (s *StackTrackerImpl) PushStack(caller Word, target Word) { s.caller = append(s.caller, caller) s.stack = append(s.stack, target) } diff --git a/cannon/mipsevm/iface.go b/cannon/mipsevm/iface.go index 8e8d758e9048..1b3b4efaf84a 100644 --- a/cannon/mipsevm/iface.go +++ b/cannon/mipsevm/iface.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" ) @@ -17,22 +18,22 @@ type FPVMState interface { GetMemory() *memory.Memory // GetHeap returns the current memory address at the top of the heap - GetHeap() uint32 + GetHeap() arch.Word // GetPreimageKey returns the most recently accessed preimage key GetPreimageKey() common.Hash // GetPreimageOffset returns the current offset into the current preimage - GetPreimageOffset() uint32 + GetPreimageOffset() arch.Word // GetPC returns the currently executing program counter - GetPC() uint32 + GetPC() arch.Word // GetCpu returns the currently active cpu scalars, including the program counter GetCpu() CpuScalars // GetRegistersRef returns a pointer to the currently active registers - GetRegistersRef() *[32]uint32 + GetRegistersRef() *[32]arch.Word // GetStep returns the current VM step GetStep() uint64 @@ -48,9 +49,9 @@ type FPVMState interface { // so a VM can start from any state without fetching prior pre-images, // and instead just repeat the last hint on setup, // to make sure pre-image requests can be served. - // The first 4 bytes are a uint32 length prefix. + // The first 4 bytes are a Word length prefix. // Warning: the hint MAY NOT BE COMPLETE. I.e. this is buffered, - // and should only be read when len(LastHint) > 4 && uint32(LastHint[:4]) <= len(LastHint[4:]) + // and should only be read when len(LastHint) > 4 && Word(LastHint[:4]) <= len(LastHint[4:]) GetLastHint() hexutil.Bytes // EncodeWitness returns the witness for the current state and the state hash @@ -60,10 +61,10 @@ type FPVMState interface { CreateVM(logger log.Logger, po PreimageOracle, stdOut, stdErr io.Writer, meta Metadata) FPVM } -type SymbolMatcher func(addr uint32) bool +type SymbolMatcher func(addr arch.Word) bool type Metadata interface { - LookupSymbol(addr uint32) string + LookupSymbol(addr arch.Word) string CreateSymbolMatcher(name string) SymbolMatcher } @@ -78,7 +79,7 @@ type FPVM interface { CheckInfiniteLoop() bool // LastPreimage returns the last preimage accessed by the VM - LastPreimage() (preimageKey [32]byte, preimage []byte, preimageOffset uint32) + LastPreimage() (preimageKey [32]byte, preimage []byte, preimageOffset arch.Word) // Traceback prints a traceback of the program to the console Traceback() @@ -91,5 +92,5 @@ type FPVM interface { // LookupSymbol returns the symbol located at the specified address. // May return an empty string if there's no symbol table available. - LookupSymbol(addr uint32) string + LookupSymbol(addr arch.Word) string } diff --git a/cannon/mipsevm/memory/memory.go b/cannon/mipsevm/memory/memory.go index ea5c279763b3..596e20294065 100644 --- a/cannon/mipsevm/memory/memory.go +++ b/cannon/mipsevm/memory/memory.go @@ -9,21 +9,25 @@ import ( "slices" "sort" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum/go-ethereum/crypto" "golang.org/x/exp/maps" ) // Note: 2**12 = 4 KiB, the min phys page size in the Go runtime. const ( - PageAddrSize = 12 - PageKeySize = 32 - PageAddrSize - PageSize = 1 << PageAddrSize - PageAddrMask = PageSize - 1 - MaxPageCount = 1 << PageKeySize - PageKeyMask = MaxPageCount - 1 + PageAddrSize = arch.PageAddrSize + PageKeySize = arch.PageKeySize + PageSize = 1 << PageAddrSize + PageAddrMask = PageSize - 1 + MaxPageCount = 1 << PageKeySize + PageKeyMask = MaxPageCount - 1 + MemProofLeafCount = arch.MemProofLeafCount ) -const MEM_PROOF_SIZE = 28 * 32 +const MEM_PROOF_SIZE = arch.MemProofSize + +type Word = arch.Word func HashPair(left, right [32]byte) [32]byte { out := crypto.Keccak256Hash(left[:], right[:]) @@ -45,22 +49,22 @@ type Memory struct { nodes map[uint64]*[32]byte // pageIndex -> cached page - pages map[uint32]*CachedPage + pages map[Word]*CachedPage // Note: since we don't de-alloc pages, we don't do ref-counting. // Once a page exists, it doesn't leave memory // two caches: we often read instructions from one page, and do memory things with another page. // this prevents map lookups each instruction - lastPageKeys [2]uint32 + lastPageKeys [2]Word lastPage [2]*CachedPage } func NewMemory() *Memory { return &Memory{ nodes: make(map[uint64]*[32]byte), - pages: make(map[uint32]*CachedPage), - lastPageKeys: [2]uint32{^uint32(0), ^uint32(0)}, // default to invalid keys, to not match any pages + pages: make(map[Word]*CachedPage), + lastPageKeys: [2]Word{^Word(0), ^Word(0)}, // default to invalid keys, to not match any pages } } @@ -68,7 +72,7 @@ func (m *Memory) PageCount() int { return len(m.pages) } -func (m *Memory) ForEachPage(fn func(pageIndex uint32, page *Page) error) error { +func (m *Memory) ForEachPage(fn func(pageIndex Word, page *Page) error) error { for pageIndex, cachedPage := range m.pages { if err := fn(pageIndex, cachedPage.Data); err != nil { return err @@ -77,16 +81,16 @@ func (m *Memory) ForEachPage(fn func(pageIndex uint32, page *Page) error) error return nil } -func (m *Memory) Invalidate(addr uint32) { - // addr must be aligned to 4 bytes - if addr&0x3 != 0 { +func (m *Memory) invalidate(addr Word) { + // addr must be aligned + if addr&arch.ExtMask != 0 { panic(fmt.Errorf("unaligned memory access: %x", addr)) } // find page, and invalidate addr within it if p, ok := m.pageLookup(addr >> PageAddrSize); ok { prevValid := p.Ok[1] - p.Invalidate(addr & PageAddrMask) + p.invalidate(addr & PageAddrMask) if !prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. return } @@ -105,23 +109,23 @@ func (m *Memory) Invalidate(addr uint32) { func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte { l := uint64(bits.Len64(gindex)) - if l > 28 { + if l > MemProofLeafCount { panic("gindex too deep") } if l > PageKeySize { depthIntoPage := l - 1 - PageKeySize pageIndex := (gindex >> depthIntoPage) & PageKeyMask - if p, ok := m.pages[uint32(pageIndex)]; ok { + if p, ok := m.pages[Word(pageIndex)]; ok { pageGindex := (1 << depthIntoPage) | (gindex & ((1 << depthIntoPage) - 1)) return p.MerkleizeSubtree(pageGindex) } else { - return zeroHashes[28-l] // page does not exist + return zeroHashes[MemProofLeafCount-l] // page does not exist } } n, ok := m.nodes[gindex] if !ok { // if the node doesn't exist, the whole sub-tree is zeroed - return zeroHashes[28-l] + return zeroHashes[MemProofLeafCount-l] } if n != nil { return *n @@ -133,16 +137,16 @@ func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte { return r } -func (m *Memory) MerkleProof(addr uint32) (out [MEM_PROOF_SIZE]byte) { +func (m *Memory) MerkleProof(addr Word) (out [MEM_PROOF_SIZE]byte) { proof := m.traverseBranch(1, addr, 0) // encode the proof - for i := 0; i < 28; i++ { + for i := 0; i < MemProofLeafCount; i++ { copy(out[i*32:(i+1)*32], proof[i][:]) } return out } -func (m *Memory) traverseBranch(parent uint64, addr uint32, depth uint8) (proof [][32]byte) { +func (m *Memory) traverseBranch(parent uint64, addr Word, depth uint8) (proof [][32]byte) { if depth == 32-5 { proof = make([][32]byte, 0, 32-5+1) proof = append(proof, m.MerkleizeSubtree(parent)) @@ -166,7 +170,7 @@ func (m *Memory) MerkleRoot() [32]byte { return m.MerkleizeSubtree(1) } -func (m *Memory) pageLookup(pageIndex uint32) (*CachedPage, bool) { +func (m *Memory) pageLookup(pageIndex Word) (*CachedPage, bool) { // hit caches if pageIndex == m.lastPageKeys[0] { return m.lastPage[0], true @@ -187,9 +191,9 @@ func (m *Memory) pageLookup(pageIndex uint32) (*CachedPage, bool) { return p, ok } -func (m *Memory) SetMemory(addr uint32, v uint32) { +func (m *Memory) SetMemory(addr Word, v uint32) { // addr must be aligned to 4 bytes - if addr&0x3 != 0 { + if addr&arch.ExtMask != 0 { panic(fmt.Errorf("unaligned memory access: %x", addr)) } @@ -201,14 +205,35 @@ func (m *Memory) SetMemory(addr uint32, v uint32) { // Go may mmap relatively large ranges, but we only allocate the pages just in time. p = m.AllocPage(pageIndex) } else { - m.Invalidate(addr) // invalidate this branch of memory, now that the value changed + m.invalidate(addr) // invalidate this branch of memory, now that the value changed } binary.BigEndian.PutUint32(p.Data[pageAddr:pageAddr+4], v) } -func (m *Memory) GetMemory(addr uint32) uint32 { +// SetWord stores [arch.Word] sized values at the specified address +func (m *Memory) SetWord(addr Word, v Word) { + // addr must be aligned to WordSizeBytes bytes + if addr&arch.ExtMask != 0 { + panic(fmt.Errorf("unaligned memory access: %x", addr)) + } + + pageIndex := addr >> PageAddrSize + pageAddr := addr & PageAddrMask + p, ok := m.pageLookup(pageIndex) + if !ok { + // allocate the page if we have not already. + // Go may mmap relatively large ranges, but we only allocate the pages just in time. + p = m.AllocPage(pageIndex) + } else { + m.invalidate(addr) // invalidate this branch of memory, now that the value changed + } + arch.ByteOrderWord.PutWord(p.Data[pageAddr:pageAddr+arch.WordSizeBytes], v) +} + +// GetMemory reads the 32-bit value located at the specified address. +func (m *Memory) GetMemory(addr Word) uint32 { // addr must be aligned to 4 bytes - if addr&0x3 != 0 { + if addr&arch.ExtMask != 0 { panic(fmt.Errorf("unaligned memory access: %x", addr)) } p, ok := m.pageLookup(addr >> PageAddrSize) @@ -219,7 +244,22 @@ func (m *Memory) GetMemory(addr uint32) uint32 { return binary.BigEndian.Uint32(p.Data[pageAddr : pageAddr+4]) } -func (m *Memory) AllocPage(pageIndex uint32) *CachedPage { +// GetWord reads the maximum sized value, [arch.Word], located at the specified address. +// Note: Also known by the MIPS64 specification as a "double-word" memory access. +func (m *Memory) GetWord(addr Word) Word { + // addr must be word aligned + if addr&arch.ExtMask != 0 { + panic(fmt.Errorf("unaligned memory access: %x", addr)) + } + p, ok := m.pageLookup(addr >> PageAddrSize) + if !ok { + return 0 + } + pageAddr := addr & PageAddrMask + return arch.ByteOrderWord.Word(p.Data[pageAddr : pageAddr+arch.WordSizeBytes]) +} + +func (m *Memory) AllocPage(pageIndex Word) *CachedPage { p := &CachedPage{Data: new(Page)} m.pages[pageIndex] = p // make nodes to root @@ -232,8 +272,8 @@ func (m *Memory) AllocPage(pageIndex uint32) *CachedPage { } type pageEntry struct { - Index uint32 `json:"index"` - Data *Page `json:"data"` + Index Word `json:"index"` + Data *Page `json:"data"` } func (m *Memory) MarshalJSON() ([]byte, error) { // nosemgrep @@ -256,8 +296,8 @@ func (m *Memory) UnmarshalJSON(data []byte) error { return err } m.nodes = make(map[uint64]*[32]byte) - m.pages = make(map[uint32]*CachedPage) - m.lastPageKeys = [2]uint32{^uint32(0), ^uint32(0)} + m.pages = make(map[Word]*CachedPage) + m.lastPageKeys = [2]Word{^Word(0), ^Word(0)} m.lastPage = [2]*CachedPage{nil, nil} for i, p := range pages { if _, ok := m.pages[p.Index]; ok { @@ -268,7 +308,7 @@ func (m *Memory) UnmarshalJSON(data []byte) error { return nil } -func (m *Memory) SetMemoryRange(addr uint32, r io.Reader) error { +func (m *Memory) SetMemoryRange(addr Word, r io.Reader) error { for { pageIndex := addr >> PageAddrSize pageAddr := addr & PageAddrMask @@ -284,7 +324,7 @@ func (m *Memory) SetMemoryRange(addr uint32, r io.Reader) error { } return err } - addr += uint32(n) + addr += Word(n) } } @@ -292,13 +332,13 @@ func (m *Memory) SetMemoryRange(addr uint32, r io.Reader) error { // The format is a simple concatenation of fields, with prefixed item count for repeating items and using big endian // encoding for numbers. // -// len(PageCount) uint32 +// len(PageCount) Word // For each page (order is arbitrary): // -// page index uint32 +// page index Word // page Data [PageSize]byte func (m *Memory) Serialize(out io.Writer) error { - if err := binary.Write(out, binary.BigEndian, uint32(m.PageCount())); err != nil { + if err := binary.Write(out, binary.BigEndian, Word(m.PageCount())); err != nil { return err } indexes := maps.Keys(m.pages) @@ -317,12 +357,12 @@ func (m *Memory) Serialize(out io.Writer) error { } func (m *Memory) Deserialize(in io.Reader) error { - var pageCount uint32 + var pageCount Word if err := binary.Read(in, binary.BigEndian, &pageCount); err != nil { return err } - for i := uint32(0); i < pageCount; i++ { - var pageIndex uint32 + for i := Word(0); i < pageCount; i++ { + var pageIndex Word if err := binary.Read(in, binary.BigEndian, &pageIndex); err != nil { return err } @@ -337,8 +377,8 @@ func (m *Memory) Deserialize(in io.Reader) error { func (m *Memory) Copy() *Memory { out := NewMemory() out.nodes = make(map[uint64]*[32]byte) - out.pages = make(map[uint32]*CachedPage) - out.lastPageKeys = [2]uint32{^uint32(0), ^uint32(0)} + out.pages = make(map[Word]*CachedPage) + out.lastPageKeys = [2]Word{^Word(0), ^Word(0)} out.lastPage = [2]*CachedPage{nil, nil} for k, page := range m.pages { data := new(Page) @@ -350,8 +390,8 @@ func (m *Memory) Copy() *Memory { type memReader struct { m *Memory - addr uint32 - count uint32 + addr Word + count Word } func (r *memReader) Read(dest []byte) (n int, err error) { @@ -365,7 +405,7 @@ func (r *memReader) Read(dest []byte) (n int, err error) { pageIndex := r.addr >> PageAddrSize start := r.addr & PageAddrMask - end := uint32(PageSize) + end := Word(PageSize) if pageIndex == (endAddr >> PageAddrSize) { end = endAddr & PageAddrMask @@ -376,12 +416,12 @@ func (r *memReader) Read(dest []byte) (n int, err error) { } else { n = copy(dest, make([]byte, end-start)) // default to zeroes } - r.addr += uint32(n) - r.count -= uint32(n) + r.addr += Word(n) + r.count -= Word(n) return n, nil } -func (m *Memory) ReadMemoryRange(addr uint32, count uint32) io.Reader { +func (m *Memory) ReadMemoryRange(addr Word, count Word) io.Reader { return &memReader{m: m, addr: addr, count: count} } diff --git a/cannon/mipsevm/memory/memory_test.go b/cannon/mipsevm/memory/memory_test.go index 5f3f9301e552..fac076c90e19 100644 --- a/cannon/mipsevm/memory/memory_test.go +++ b/cannon/mipsevm/memory/memory_test.go @@ -118,7 +118,7 @@ func TestMemoryReadWrite(t *testing.T) { _, err := rand.Read(data[:]) require.NoError(t, err) require.NoError(t, m.SetMemoryRange(0, bytes.NewReader(data))) - for _, i := range []uint32{0, 4, 1000, 20_000 - 4} { + for _, i := range []Word{0, 4, 1000, 20_000 - 4} { v := m.GetMemory(i) expected := binary.BigEndian.Uint32(data[i : i+4]) require.Equalf(t, expected, v, "read at %d", i) @@ -129,7 +129,7 @@ func TestMemoryReadWrite(t *testing.T) { m := NewMemory() data := []byte(strings.Repeat("under the big bright yellow sun ", 40)) require.NoError(t, m.SetMemoryRange(0x1337, bytes.NewReader(data))) - res, err := io.ReadAll(m.ReadMemoryRange(0x1337-10, uint32(len(data)+20))) + res, err := io.ReadAll(m.ReadMemoryRange(0x1337-10, Word(len(data)+20))) require.NoError(t, err) require.Equal(t, make([]byte, 10), res[:10], "empty start") require.Equal(t, data, res[10:len(res)-10], "result") diff --git a/cannon/mipsevm/memory/page.go b/cannon/mipsevm/memory/page.go index d3c3096b418e..defcc10b603c 100644 --- a/cannon/mipsevm/memory/page.go +++ b/cannon/mipsevm/memory/page.go @@ -70,7 +70,7 @@ type CachedPage struct { Ok [PageSize / 32]bool } -func (p *CachedPage) Invalidate(pageAddr uint32) { +func (p *CachedPage) invalidate(pageAddr Word) { if pageAddr >= PageSize { panic("invalid page addr") } diff --git a/cannon/mipsevm/memory/page_test.go b/cannon/mipsevm/memory/page_test.go index c2960421b670..e7a8167a9df4 100644 --- a/cannon/mipsevm/memory/page_test.go +++ b/cannon/mipsevm/memory/page_test.go @@ -29,16 +29,16 @@ func TestCachedPage(t *testing.T) { post := p.MerkleRoot() require.Equal(t, pre, post, "no change expected until cache is invalidated") - p.Invalidate(42) + p.invalidate(42) post2 := p.MerkleRoot() require.NotEqual(t, post, post2, "change after cache invalidation") p.Data[2000] = 0xef - p.Invalidate(42) + p.invalidate(42) post3 := p.MerkleRoot() require.Equal(t, post2, post3, "local invalidation is not global invalidation") - p.Invalidate(2000) + p.invalidate(2000) post4 := p.MerkleRoot() require.NotEqual(t, post3, post4, "can see the change now") diff --git a/cannon/mipsevm/multithreaded/instrumented.go b/cannon/mipsevm/multithreaded/instrumented.go index ac76d6cdb532..db61fd1207e8 100644 --- a/cannon/mipsevm/multithreaded/instrumented.go +++ b/cannon/mipsevm/multithreaded/instrumented.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" ) @@ -77,7 +78,7 @@ func (m *InstrumentedState) Step(proof bool) (wit *mipsevm.StepWitness, err erro wit.ProofData = append(wit.ProofData, memProof[:]...) wit.ProofData = append(wit.ProofData, memProof2[:]...) lastPreimageKey, lastPreimage, lastPreimageOffset := m.preimageOracle.LastPreimage() - if lastPreimageOffset != ^uint32(0) { + if lastPreimageOffset != ^arch.Word(0) { wit.PreimageOffset = lastPreimageOffset wit.PreimageKey = lastPreimageKey wit.PreimageValue = lastPreimage @@ -90,7 +91,7 @@ func (m *InstrumentedState) CheckInfiniteLoop() bool { return false } -func (m *InstrumentedState) LastPreimage() ([32]byte, []byte, uint32) { +func (m *InstrumentedState) LastPreimage() ([32]byte, []byte, arch.Word) { return m.preimageOracle.LastPreimage() } @@ -111,7 +112,7 @@ func (m *InstrumentedState) Traceback() { m.stackTracker.Traceback() } -func (m *InstrumentedState) LookupSymbol(addr uint32) string { +func (m *InstrumentedState) LookupSymbol(addr arch.Word) string { if m.meta == nil { return "" } diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index 20ce2b9cc0b0..f0b005257f7c 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -20,7 +20,6 @@ func vmFactory(state *State, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer func TestInstrumentedState_OpenMips(t *testing.T) { t.Parallel() - // TODO: Add mt-specific tests here testutil.RunVMTests_OpenMips(t, CreateEmptyState, vmFactory, "clone.bin") } diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index b06ad3917724..43abdbf57157 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -9,21 +9,24 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" ) +type Word = arch.Word + func (m *InstrumentedState) handleSyscall() error { thread := m.state.GetCurrentThread() syscallNum, a0, a1, a2, a3 := exec.GetSyscallArgs(m.state.GetRegistersRef()) - v0 := uint32(0) - v1 := uint32(0) + v0 := Word(0) + v1 := Word(0) //fmt.Printf("syscall: %d\n", syscallNum) switch syscallNum { case exec.SysMmap: - var newHeap uint32 + var newHeap Word v0, v1, newHeap = exec.HandleSysMmap(a0, a1, m.state.Heap) m.state.Heap = newHeap case exec.SysBrk: @@ -74,9 +77,9 @@ func (m *InstrumentedState) handleSyscall() error { m.state.ExitCode = uint8(a0) return nil case exec.SysRead: - var newPreimageOffset uint32 + var newPreimageOffset Word var memUpdated bool - var memAddr uint32 + var memAddr Word v0, v1, newPreimageOffset, memUpdated, memAddr = exec.HandleSysRead(a0, a1, a2, m.state.PreimageKey, m.state.PreimageOffset, m.preimageOracle, m.state.Memory, m.memoryTracker) m.state.PreimageOffset = newPreimageOffset if memUpdated { @@ -85,7 +88,7 @@ func (m *InstrumentedState) handleSyscall() error { case exec.SysWrite: var newLastHint hexutil.Bytes var newPreimageKey common.Hash - var newPreimageOffset uint32 + var newPreimageOffset Word v0, v1, newLastHint, newPreimageKey, newPreimageOffset = exec.HandleSysWrite(a0, a1, a2, m.state.LastHint, m.state.PreimageKey, m.state.PreimageOffset, m.preimageOracle, m.state.Memory, m.memoryTracker, m.stdOut, m.stdErr) m.state.LastHint = newLastHint m.state.PreimageKey = newPreimageKey @@ -105,11 +108,11 @@ func (m *InstrumentedState) handleSyscall() error { return nil case exec.SysFutex: // args: a0 = addr, a1 = op, a2 = val, a3 = timeout - effAddr := a0 & 0xFFffFFfc + effAddr := a0 & arch.AddressMask switch a1 { case exec.FutexWaitPrivate: m.memoryTracker.TrackMemAccess(effAddr) - mem := m.state.Memory.GetMemory(effAddr) + mem := m.state.Memory.GetWord(effAddr) if mem != a2 { v0 = exec.SysErrorSignal v1 = exec.MipsEAGAIN @@ -153,20 +156,20 @@ func (m *InstrumentedState) handleSyscall() error { switch a0 { case exec.ClockGettimeRealtimeFlag, exec.ClockGettimeMonotonicFlag: v0, v1 = 0, 0 - var secs, nsecs uint32 + var secs, nsecs Word if a0 == exec.ClockGettimeMonotonicFlag { // monotonic clock_gettime is used by Go guest programs for goroutine scheduling and to implement // `time.Sleep` (and other sleep related operations). - secs = uint32(m.state.Step / exec.HZ) - nsecs = uint32((m.state.Step % exec.HZ) * (1_000_000_000 / exec.HZ)) + secs = Word(m.state.Step / exec.HZ) + nsecs = Word((m.state.Step % exec.HZ) * (1_000_000_000 / exec.HZ)) } // else realtime set to Unix Epoch - effAddr := a1 & 0xFFffFFfc + effAddr := a1 & arch.AddressMask m.memoryTracker.TrackMemAccess(effAddr) - m.state.Memory.SetMemory(effAddr, secs) + m.state.Memory.SetWord(effAddr, secs) m.handleMemoryUpdate(effAddr) m.memoryTracker.TrackMemAccess2(effAddr + 4) - m.state.Memory.SetMemory(effAddr+4, nsecs) + m.state.Memory.SetWord(effAddr+4, nsecs) m.handleMemoryUpdate(effAddr + 4) default: v0 = exec.SysErrorSignal @@ -182,6 +185,8 @@ func (m *InstrumentedState) handleSyscall() error { case exec.SysSigaltstack: case exec.SysRtSigaction: case exec.SysPrlimit64: + // TODO(#12205): may be needed for 64-bit Cannon + // case exec.SysGetRtLimit: case exec.SysClose: case exec.SysPread64: case exec.SysFstat64: @@ -256,9 +261,9 @@ func (m *InstrumentedState) mipsStep() error { m.onWaitComplete(thread, true) return nil } else { - effAddr := thread.FutexAddr & 0xFFffFFfc + effAddr := thread.FutexAddr & arch.AddressMask m.memoryTracker.TrackMemAccess(effAddr) - mem := m.state.Memory.GetMemory(effAddr) + mem := m.state.Memory.GetWord(effAddr) if thread.FutexVal == mem { // still got expected value, continue sleeping, try next thread. m.preemptThread(thread) @@ -299,6 +304,12 @@ func (m *InstrumentedState) mipsStep() error { if opcode == exec.OpLoadLinked || opcode == exec.OpStoreConditional { return m.handleRMWOps(insn, opcode) } + if opcode == exec.OpLoadLinked64 || opcode == exec.OpStoreConditional64 { + if arch.IsMips32 { + panic(fmt.Sprintf("invalid instruction: %x", insn)) + } + return m.handleRMWOps(insn, opcode) + } // Exec the rest of the step logic memUpdated, memAddr, err := exec.ExecMipsCoreStepLogic(m.state.getCpuRef(), m.state.GetRegistersRef(), m.state.Memory, insn, opcode, fun, m.memoryTracker, m.stackTracker) @@ -312,7 +323,7 @@ func (m *InstrumentedState) mipsStep() error { return nil } -func (m *InstrumentedState) handleMemoryUpdate(memAddr uint32) { +func (m *InstrumentedState) handleMemoryUpdate(memAddr Word) { if memAddr == m.state.LLAddress { // Reserved address was modified, clear the reservation m.clearLLMemoryReservation() @@ -329,27 +340,32 @@ func (m *InstrumentedState) clearLLMemoryReservation() { func (m *InstrumentedState) handleRMWOps(insn, opcode uint32) error { baseReg := (insn >> 21) & 0x1F base := m.state.GetRegistersRef()[baseReg] - rtReg := (insn >> 16) & 0x1F + rtReg := Word((insn >> 16) & 0x1F) offset := exec.SignExtendImmediate(insn) - effAddr := (base + offset) & 0xFFFFFFFC + effAddr := (base + offset) & arch.AddressMask m.memoryTracker.TrackMemAccess(effAddr) - mem := m.state.Memory.GetMemory(effAddr) + mem := m.state.Memory.GetWord(effAddr) - var retVal uint32 + var retVal Word threadId := m.state.GetCurrentThread().ThreadId - if opcode == exec.OpLoadLinked { + if opcode == exec.OpLoadLinked || opcode == exec.OpLoadLinked64 { retVal = mem m.state.LLReservationActive = true m.state.LLAddress = effAddr m.state.LLOwnerThread = threadId - } else if opcode == exec.OpStoreConditional { + } else if opcode == exec.OpStoreConditional || opcode == exec.OpStoreConditional64 { + // TODO(#12205): Determine bits affected by coherence stores on 64-bits // Check if our memory reservation is still intact if m.state.LLReservationActive && m.state.LLOwnerThread == threadId && m.state.LLAddress == effAddr { // Complete atomic update: set memory and return 1 for success m.clearLLMemoryReservation() rt := m.state.GetRegistersRef()[rtReg] - m.state.Memory.SetMemory(effAddr, rt) + if opcode == exec.OpStoreConditional { + m.state.Memory.SetMemory(effAddr, uint32(rt)) + } else { + m.state.Memory.SetWord(effAddr, rt) + } retVal = 1 } else { // Atomic update failed, return 0 for failure @@ -370,8 +386,8 @@ func (m *InstrumentedState) onWaitComplete(thread *ThreadState, isTimedOut bool) thread.FutexTimeoutStep = 0 // Complete the FUTEX_WAIT syscall - v0 := uint32(0) - v1 := uint32(0) + v0 := Word(0) + v1 := Word(0) if isTimedOut { v0 = exec.SysErrorSignal v1 = exec.MipsETIMEDOUT diff --git a/cannon/mipsevm/multithreaded/stack.go b/cannon/mipsevm/multithreaded/stack.go index 4fc32c221ee8..099dc7351323 100644 --- a/cannon/mipsevm/multithreaded/stack.go +++ b/cannon/mipsevm/multithreaded/stack.go @@ -9,7 +9,7 @@ import ( type ThreadedStackTracker interface { exec.TraceableStackTracker - DropThread(threadId uint32) + DropThread(threadId Word) } type NoopThreadedStackTracker struct { @@ -18,12 +18,12 @@ type NoopThreadedStackTracker struct { var _ ThreadedStackTracker = (*ThreadedStackTrackerImpl)(nil) -func (n *NoopThreadedStackTracker) DropThread(threadId uint32) {} +func (n *NoopThreadedStackTracker) DropThread(threadId Word) {} type ThreadedStackTrackerImpl struct { meta mipsevm.Metadata state *State - trackersByThreadId map[uint32]exec.TraceableStackTracker + trackersByThreadId map[Word]exec.TraceableStackTracker } var _ ThreadedStackTracker = (*ThreadedStackTrackerImpl)(nil) @@ -36,11 +36,11 @@ func NewThreadedStackTracker(state *State, meta mipsevm.Metadata) (*ThreadedStac return &ThreadedStackTrackerImpl{ state: state, meta: meta, - trackersByThreadId: make(map[uint32]exec.TraceableStackTracker), + trackersByThreadId: make(map[Word]exec.TraceableStackTracker), }, nil } -func (t *ThreadedStackTrackerImpl) PushStack(caller uint32, target uint32) { +func (t *ThreadedStackTrackerImpl) PushStack(caller Word, target Word) { t.getCurrentTracker().PushStack(caller, target) } @@ -62,6 +62,6 @@ func (t *ThreadedStackTrackerImpl) getCurrentTracker() exec.TraceableStackTracke return tracker } -func (t *ThreadedStackTrackerImpl) DropThread(threadId uint32) { +func (t *ThreadedStackTrackerImpl) DropThread(threadId Word) { delete(t.trackersByThreadId, threadId) } diff --git a/cannon/mipsevm/multithreaded/state.go b/cannon/mipsevm/multithreaded/state.go index 7b4d545396a9..f88b5fb0186c 100644 --- a/cannon/mipsevm/multithreaded/state.go +++ b/cannon/mipsevm/multithreaded/state.go @@ -11,54 +11,57 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/serialize" ) // STATE_WITNESS_SIZE is the size of the state witness encoding in bytes. -const STATE_WITNESS_SIZE = 172 const ( MEMROOT_WITNESS_OFFSET = 0 PREIMAGE_KEY_WITNESS_OFFSET = MEMROOT_WITNESS_OFFSET + 32 PREIMAGE_OFFSET_WITNESS_OFFSET = PREIMAGE_KEY_WITNESS_OFFSET + 32 - HEAP_WITNESS_OFFSET = PREIMAGE_OFFSET_WITNESS_OFFSET + 4 - LL_RESERVATION_ACTIVE_OFFSET = HEAP_WITNESS_OFFSET + 4 + HEAP_WITNESS_OFFSET = PREIMAGE_OFFSET_WITNESS_OFFSET + arch.WordSizeBytes + LL_RESERVATION_ACTIVE_OFFSET = HEAP_WITNESS_OFFSET + arch.WordSizeBytes LL_ADDRESS_OFFSET = LL_RESERVATION_ACTIVE_OFFSET + 1 - LL_OWNER_THREAD_OFFSET = LL_ADDRESS_OFFSET + 4 - EXITCODE_WITNESS_OFFSET = LL_OWNER_THREAD_OFFSET + 4 + LL_OWNER_THREAD_OFFSET = LL_ADDRESS_OFFSET + arch.WordSizeBytes + EXITCODE_WITNESS_OFFSET = LL_OWNER_THREAD_OFFSET + arch.WordSizeBytes EXITED_WITNESS_OFFSET = EXITCODE_WITNESS_OFFSET + 1 STEP_WITNESS_OFFSET = EXITED_WITNESS_OFFSET + 1 STEPS_SINCE_CONTEXT_SWITCH_WITNESS_OFFSET = STEP_WITNESS_OFFSET + 8 WAKEUP_WITNESS_OFFSET = STEPS_SINCE_CONTEXT_SWITCH_WITNESS_OFFSET + 8 - TRAVERSE_RIGHT_WITNESS_OFFSET = WAKEUP_WITNESS_OFFSET + 4 + TRAVERSE_RIGHT_WITNESS_OFFSET = WAKEUP_WITNESS_OFFSET + arch.WordSizeBytes LEFT_THREADS_ROOT_WITNESS_OFFSET = TRAVERSE_RIGHT_WITNESS_OFFSET + 1 RIGHT_THREADS_ROOT_WITNESS_OFFSET = LEFT_THREADS_ROOT_WITNESS_OFFSET + 32 THREAD_ID_WITNESS_OFFSET = RIGHT_THREADS_ROOT_WITNESS_OFFSET + 32 + + // 172 and 196 bytes for 32 and 64-bit respectively + STATE_WITNESS_SIZE = THREAD_ID_WITNESS_OFFSET + arch.WordSizeBytes ) type State struct { Memory *memory.Memory PreimageKey common.Hash - PreimageOffset uint32 // note that the offset includes the 8-byte length prefix + PreimageOffset Word // note that the offset includes the 8-byte length prefix - Heap uint32 // to handle mmap growth - LLReservationActive bool // Whether there is an active memory reservation initiated via the LL (load linked) op - LLAddress uint32 // The "linked" memory address reserved via the LL (load linked) op - LLOwnerThread uint32 // The id of the thread that holds the reservation on LLAddress + Heap Word // to handle mmap growth + LLReservationActive bool // Whether there is an active memory reservation initiated via the LL (load linked) op + LLAddress Word // The "linked" memory address reserved via the LL (load linked) op + LLOwnerThread Word // The id of the thread that holds the reservation on LLAddress ExitCode uint8 Exited bool Step uint64 StepsSinceLastContextSwitch uint64 - Wakeup uint32 + Wakeup Word TraverseRight bool LeftThreadStack []*ThreadState RightThreadStack []*ThreadState - NextThreadId uint32 + NextThreadId Word // LastHint is optional metadata, and not part of the VM state itself. LastHint hexutil.Bytes @@ -86,7 +89,7 @@ func CreateEmptyState() *State { } } -func CreateInitialState(pc, heapStart uint32) *State { +func CreateInitialState(pc, heapStart Word) *State { state := CreateEmptyState() currentThread := state.GetCurrentThread() currentThread.Cpu.PC = pc @@ -97,6 +100,7 @@ func CreateInitialState(pc, heapStart uint32) *State { } func (s *State) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer, meta mipsevm.Metadata) mipsevm.FPVM { + logger.Info("Using cannon multithreaded VM", "is32", arch.IsMips32) return NewInstrumentedState(s, po, stdOut, stdErr, logger, meta) } @@ -139,7 +143,7 @@ func (s *State) calculateThreadStackRoot(stack []*ThreadState) common.Hash { return curRoot } -func (s *State) GetPC() uint32 { +func (s *State) GetPC() Word { activeThread := s.GetCurrentThread() return activeThread.Cpu.PC } @@ -153,7 +157,7 @@ func (s *State) getCpuRef() *mipsevm.CpuScalars { return &s.GetCurrentThread().Cpu } -func (s *State) GetRegistersRef() *[32]uint32 { +func (s *State) GetRegistersRef() *[32]Word { activeThread := s.GetCurrentThread() return &activeThread.Registers } @@ -176,7 +180,7 @@ func (s *State) GetMemory() *memory.Memory { return s.Memory } -func (s *State) GetHeap() uint32 { +func (s *State) GetHeap() Word { return s.Heap } @@ -184,7 +188,7 @@ func (s *State) GetPreimageKey() common.Hash { return s.PreimageKey } -func (s *State) GetPreimageOffset() uint32 { +func (s *State) GetPreimageOffset() Word { return s.PreimageOffset } @@ -193,24 +197,24 @@ func (s *State) EncodeWitness() ([]byte, common.Hash) { memRoot := s.Memory.MerkleRoot() out = append(out, memRoot[:]...) out = append(out, s.PreimageKey[:]...) - out = binary.BigEndian.AppendUint32(out, s.PreimageOffset) - out = binary.BigEndian.AppendUint32(out, s.Heap) + out = arch.ByteOrderWord.AppendWord(out, s.PreimageOffset) + out = arch.ByteOrderWord.AppendWord(out, s.Heap) out = mipsevm.AppendBoolToWitness(out, s.LLReservationActive) - out = binary.BigEndian.AppendUint32(out, s.LLAddress) - out = binary.BigEndian.AppendUint32(out, s.LLOwnerThread) + out = arch.ByteOrderWord.AppendWord(out, s.LLAddress) + out = arch.ByteOrderWord.AppendWord(out, s.LLOwnerThread) out = append(out, s.ExitCode) out = mipsevm.AppendBoolToWitness(out, s.Exited) out = binary.BigEndian.AppendUint64(out, s.Step) out = binary.BigEndian.AppendUint64(out, s.StepsSinceLastContextSwitch) - out = binary.BigEndian.AppendUint32(out, s.Wakeup) + out = arch.ByteOrderWord.AppendWord(out, s.Wakeup) leftStackRoot := s.getLeftThreadStackRoot() rightStackRoot := s.getRightThreadStackRoot() out = mipsevm.AppendBoolToWitness(out, s.TraverseRight) out = append(out, (leftStackRoot)[:]...) out = append(out, (rightStackRoot)[:]...) - out = binary.BigEndian.AppendUint32(out, s.NextThreadId) + out = arch.ByteOrderWord.AppendWord(out, s.NextThreadId) return out, stateHashFromWitness(out) } @@ -245,20 +249,20 @@ func (s *State) ThreadCount() int { // StateVersion uint8(1) // Memory As per Memory.Serialize // PreimageKey [32]byte -// PreimageOffset uint32 -// Heap uint32 +// PreimageOffset Word +// Heap Word // ExitCode uint8 // Exited uint8 - 0 for false, 1 for true // Step uint64 // StepsSinceLastContextSwitch uint64 -// Wakeup uint32 +// Wakeup Word // TraverseRight uint8 - 0 for false, 1 for true -// NextThreadId uint32 -// len(LeftThreadStack) uint32 +// NextThreadId Word +// len(LeftThreadStack) Word // LeftThreadStack entries as per ThreadState.Serialize -// len(RightThreadStack) uint32 +// len(RightThreadStack) Word // RightThreadStack entries as per ThreadState.Serialize -// len(LastHint) uint32 (0 when LastHint is nil) +// len(LastHint) Word (0 when LastHint is nil) // LastHint []byte func (s *State) Serialize(out io.Writer) error { bout := serialize.NewBinaryWriter(out) @@ -306,7 +310,7 @@ func (s *State) Serialize(out io.Writer) error { return err } - if err := bout.WriteUInt(uint32(len(s.LeftThreadStack))); err != nil { + if err := bout.WriteUInt(Word(len(s.LeftThreadStack))); err != nil { return err } for _, stack := range s.LeftThreadStack { @@ -314,7 +318,7 @@ func (s *State) Serialize(out io.Writer) error { return err } } - if err := bout.WriteUInt(uint32(len(s.RightThreadStack))); err != nil { + if err := bout.WriteUInt(Word(len(s.RightThreadStack))); err != nil { return err } for _, stack := range s.RightThreadStack { @@ -376,7 +380,7 @@ func (s *State) Deserialize(in io.Reader) error { return err } - var leftThreadStackSize uint32 + var leftThreadStackSize Word if err := bin.ReadUInt(&leftThreadStackSize); err != nil { return err } @@ -388,7 +392,7 @@ func (s *State) Deserialize(in io.Reader) error { } } - var rightThreadStackSize uint32 + var rightThreadStackSize Word if err := bin.ReadUInt(&rightThreadStackSize); err != nil { return err } @@ -423,7 +427,7 @@ func GetStateHashFn() mipsevm.HashFn { func stateHashFromWitness(sw []byte) common.Hash { if len(sw) != STATE_WITNESS_SIZE { - panic("Invalid witness length") + panic(fmt.Sprintf("Invalid witness length. Got %d, expected %d", len(sw), STATE_WITNESS_SIZE)) } hash := crypto.Keccak256Hash(sw) exitCode := sw[EXITCODE_WITNESS_OFFSET] diff --git a/cannon/mipsevm/multithreaded/state_test.go b/cannon/mipsevm/multithreaded/state_test.go index 6d776632bf0f..0beddb75b026 100644 --- a/cannon/mipsevm/multithreaded/state_test.go +++ b/cannon/mipsevm/multithreaded/state_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" @@ -41,11 +42,11 @@ func TestState_EncodeWitness(t *testing.T) { {exited: true, exitCode: 3}, } - heap := uint32(12) - llAddress := uint32(55) - llThreadOwner := uint32(99) + heap := Word(12) + llAddress := Word(55) + llThreadOwner := Word(99) preimageKey := crypto.Keccak256Hash([]byte{1, 2, 3, 4}) - preimageOffset := uint32(24) + preimageOffset := Word(24) step := uint64(33) stepsSinceContextSwitch := uint64(123) for _, c := range cases { @@ -207,7 +208,7 @@ func TestSerializeStateRoundTrip(t *testing.T) { LO: 0xbeef, HI: 0xbabe, }, - Registers: [32]uint32{ + Registers: [32]Word{ 0xdeadbeef, 0xdeadbeef, 0xc0ffee, @@ -230,7 +231,7 @@ func TestSerializeStateRoundTrip(t *testing.T) { LO: 0xeeef, HI: 0xeabe, }, - Registers: [32]uint32{ + Registers: [32]Word{ 0xabcdef, 0x123456, }, @@ -250,7 +251,7 @@ func TestSerializeStateRoundTrip(t *testing.T) { LO: 0xdeef, HI: 0xdabe, }, - Registers: [32]uint32{ + Registers: [32]Word{ 0x654321, }, }, @@ -267,7 +268,7 @@ func TestSerializeStateRoundTrip(t *testing.T) { LO: 0xceef, HI: 0xcabe, }, - Registers: [32]uint32{ + Registers: [32]Word{ 0x987653, 0xfedbca, }, @@ -302,7 +303,7 @@ func TestState_EncodeThreadProof_SingleThread(t *testing.T) { activeThread.Cpu.HI = 11 activeThread.Cpu.LO = 22 for i := 0; i < 32; i++ { - activeThread.Registers[i] = uint32(i) + activeThread.Registers[i] = Word(i) } expectedProof := append([]byte{}, activeThread.serializeThread()[:]...) @@ -324,12 +325,12 @@ func TestState_EncodeThreadProof_MultipleThreads(t *testing.T) { // Set some fields on our threads for i := 0; i < 3; i++ { curThread := state.LeftThreadStack[i] - curThread.Cpu.PC = uint32(4 * i) + curThread.Cpu.PC = Word(4 * i) curThread.Cpu.NextPC = curThread.Cpu.PC + 4 - curThread.Cpu.HI = uint32(11 + i) - curThread.Cpu.LO = uint32(22 + i) + curThread.Cpu.HI = Word(11 + i) + curThread.Cpu.LO = Word(22 + i) for j := 0; j < 32; j++ { - curThread.Registers[j] = uint32(j + i) + curThread.Registers[j] = Word(j + i) } } @@ -355,12 +356,12 @@ func TestState_EncodeThreadProof_MultipleThreads(t *testing.T) { func TestState_EncodeThreadProof_EmptyThreadStackPanic(t *testing.T) { cases := []struct { name string - wakeupAddr uint32 + wakeupAddr Word traverseRight bool }{ - {"traverse left during wakeup traversal", uint32(99), false}, + {"traverse left during wakeup traversal", Word(99), false}, {"traverse left during normal traversal", exec.FutexEmptyAddr, false}, - {"traverse right during wakeup traversal", uint32(99), true}, + {"traverse right during wakeup traversal", Word(99), true}, {"traverse right during normal traversal", exec.FutexEmptyAddr, true}, } @@ -382,3 +383,19 @@ func TestState_EncodeThreadProof_EmptyThreadStackPanic(t *testing.T) { }) } } + +func TestStateWitnessSize(t *testing.T) { + expectedWitnessSize := 172 + if !arch.IsMips32 { + expectedWitnessSize = 196 + } + require.Equal(t, expectedWitnessSize, STATE_WITNESS_SIZE) +} + +func TestThreadStateWitnessSize(t *testing.T) { + expectedWitnessSize := 166 + if !arch.IsMips32 { + expectedWitnessSize = 322 + } + require.Equal(t, expectedWitnessSize, SERIALIZED_THREAD_SIZE) +} diff --git a/cannon/mipsevm/multithreaded/testutil/expectations.go b/cannon/mipsevm/multithreaded/testutil/expectations.go index 559ed2de8c4f..05dfdb4474a6 100644 --- a/cannon/mipsevm/multithreaded/testutil/expectations.go +++ b/cannon/mipsevm/multithreaded/testutil/expectations.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" ) @@ -15,11 +16,11 @@ import ( // to define an expected post-state. The post-state is then validated with ExpectedMTState.Validate(t, postState) type ExpectedMTState struct { PreimageKey common.Hash - PreimageOffset uint32 - Heap uint32 + PreimageOffset arch.Word + Heap arch.Word LLReservationActive bool - LLAddress uint32 - LLOwnerThread uint32 + LLAddress arch.Word + LLOwnerThread arch.Word ExitCode uint8 Exited bool Step uint64 @@ -28,37 +29,37 @@ type ExpectedMTState struct { expectedMemory *memory.Memory // Threading-related expectations StepsSinceLastContextSwitch uint64 - Wakeup uint32 + Wakeup arch.Word TraverseRight bool - NextThreadId uint32 + NextThreadId arch.Word ThreadCount int RightStackSize int LeftStackSize int - prestateActiveThreadId uint32 + prestateActiveThreadId arch.Word prestateActiveThreadOrig ExpectedThreadState // Cached for internal use - ActiveThreadId uint32 - threadExpectations map[uint32]*ExpectedThreadState + ActiveThreadId arch.Word + threadExpectations map[arch.Word]*ExpectedThreadState } type ExpectedThreadState struct { - ThreadId uint32 + ThreadId arch.Word ExitCode uint8 Exited bool - FutexAddr uint32 - FutexVal uint32 + FutexAddr arch.Word + FutexVal arch.Word FutexTimeoutStep uint64 - PC uint32 - NextPC uint32 - HI uint32 - LO uint32 - Registers [32]uint32 + PC arch.Word + NextPC arch.Word + HI arch.Word + LO arch.Word + Registers [32]arch.Word Dropped bool } func NewExpectedMTState(fromState *multithreaded.State) *ExpectedMTState { currentThread := fromState.GetCurrentThread() - expectedThreads := make(map[uint32]*ExpectedThreadState) + expectedThreads := make(map[arch.Word]*ExpectedThreadState) for _, t := range GetAllThreads(fromState) { expectedThreads[t.ThreadId] = newExpectedThreadState(t) } @@ -118,12 +119,17 @@ func (e *ExpectedMTState) ExpectStep() { e.StepsSinceLastContextSwitch += 1 } -func (e *ExpectedMTState) ExpectMemoryWrite(addr uint32, val uint32) { +func (e *ExpectedMTState) ExpectMemoryWrite(addr arch.Word, val uint32) { e.expectedMemory.SetMemory(addr, val) e.MemoryRoot = e.expectedMemory.MerkleRoot() } -func (e *ExpectedMTState) ExpectMemoryWriteMultiple(addr uint32, val uint32, addr2 uint32, val2 uint32) { +func (e *ExpectedMTState) ExpectMemoryWordWrite(addr arch.Word, val arch.Word) { + e.expectedMemory.SetWord(addr, val) + e.MemoryRoot = e.expectedMemory.MerkleRoot() +} + +func (e *ExpectedMTState) ExpectMemoryWriteMultiple(addr arch.Word, val uint32, addr2 arch.Word, val2 uint32) { e.expectedMemory.SetMemory(addr, val) e.expectedMemory.SetMemory(addr2, val2) e.MemoryRoot = e.expectedMemory.MerkleRoot() @@ -166,7 +172,7 @@ func (e *ExpectedMTState) PrestateActiveThread() *ExpectedThreadState { return e.threadExpectations[e.prestateActiveThreadId] } -func (e *ExpectedMTState) Thread(threadId uint32) *ExpectedThreadState { +func (e *ExpectedMTState) Thread(threadId arch.Word) *ExpectedThreadState { return e.threadExpectations[threadId] } diff --git a/cannon/mipsevm/multithreaded/testutil/expectations_test.go b/cannon/mipsevm/multithreaded/testutil/expectations_test.go index a40e15e0f8d5..a17534fd5eea 100644 --- a/cannon/mipsevm/multithreaded/testutil/expectations_test.go +++ b/cannon/mipsevm/multithreaded/testutil/expectations_test.go @@ -7,6 +7,7 @@ import ( //"github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" ) @@ -45,10 +46,10 @@ func TestValidate_shouldCatchMutations(t *testing.T) { {name: "LeftStackSize", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.LeftStackSize += 1 }}, {name: "ActiveThreadId", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.ActiveThreadId += 1 }}, {name: "Empty thread expectations", mut: func(e *ExpectedMTState, st *multithreaded.State) { - e.threadExpectations = map[uint32]*ExpectedThreadState{} + e.threadExpectations = map[arch.Word]*ExpectedThreadState{} }}, {name: "Mismatched thread expectations", mut: func(e *ExpectedMTState, st *multithreaded.State) { - e.threadExpectations = map[uint32]*ExpectedThreadState{someThread.ThreadId: newExpectedThreadState(someThread)} + e.threadExpectations = map[arch.Word]*ExpectedThreadState{someThread.ThreadId: newExpectedThreadState(someThread)} }}, {name: "Active threadId", mut: func(e *ExpectedMTState, st *multithreaded.State) { e.threadExpectations[st.GetCurrentThread().ThreadId].ThreadId += 1 diff --git a/cannon/mipsevm/multithreaded/testutil/mutators.go b/cannon/mipsevm/multithreaded/testutil/mutators.go index a44ba23a4fac..62e22c237c8d 100644 --- a/cannon/mipsevm/multithreaded/testutil/mutators.go +++ b/cannon/mipsevm/multithreaded/testutil/mutators.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" @@ -27,18 +28,18 @@ func (m *StateMutatorMultiThreaded) Randomize(randSeed int64) { step := r.RandStep() m.state.PreimageKey = r.RandHash() - m.state.PreimageOffset = r.Uint32() + m.state.PreimageOffset = r.Word() m.state.Step = step m.state.LastHint = r.RandHint() m.state.StepsSinceLastContextSwitch = uint64(r.Intn(exec.SchedQuantum)) // Randomize memory-related fields halfMemory := math.MaxUint32 / 2 - m.state.Heap = uint32(r.Intn(halfMemory) + halfMemory) + m.state.Heap = arch.Word(r.Intn(halfMemory) + halfMemory) m.state.LLReservationActive = r.Intn(2) == 1 if m.state.LLReservationActive { - m.state.LLAddress = uint32(r.Intn(halfMemory)) - m.state.LLOwnerThread = uint32(r.Intn(10)) + m.state.LLAddress = arch.Word(r.Intn(halfMemory)) + m.state.LLOwnerThread = arch.Word(r.Intn(10)) } // Randomize threads @@ -48,11 +49,11 @@ func (m *StateMutatorMultiThreaded) Randomize(randSeed int64) { SetupThreads(randSeed+1, m.state, traverseRight, activeStackThreads, inactiveStackThreads) } -func (m *StateMutatorMultiThreaded) SetHI(val uint32) { +func (m *StateMutatorMultiThreaded) SetHI(val arch.Word) { m.state.GetCurrentThread().Cpu.HI = val } -func (m *StateMutatorMultiThreaded) SetLO(val uint32) { +func (m *StateMutatorMultiThreaded) SetLO(val arch.Word) { m.state.GetCurrentThread().Cpu.LO = val } @@ -64,16 +65,16 @@ func (m *StateMutatorMultiThreaded) SetExited(val bool) { m.state.Exited = val } -func (m *StateMutatorMultiThreaded) SetPC(val uint32) { +func (m *StateMutatorMultiThreaded) SetPC(val arch.Word) { thread := m.state.GetCurrentThread() thread.Cpu.PC = val } -func (m *StateMutatorMultiThreaded) SetHeap(val uint32) { +func (m *StateMutatorMultiThreaded) SetHeap(val arch.Word) { m.state.Heap = val } -func (m *StateMutatorMultiThreaded) SetNextPC(val uint32) { +func (m *StateMutatorMultiThreaded) SetNextPC(val arch.Word) { thread := m.state.GetCurrentThread() thread.Cpu.NextPC = val } @@ -86,7 +87,7 @@ func (m *StateMutatorMultiThreaded) SetPreimageKey(val common.Hash) { m.state.PreimageKey = val } -func (m *StateMutatorMultiThreaded) SetPreimageOffset(val uint32) { +func (m *StateMutatorMultiThreaded) SetPreimageOffset(val arch.Word) { m.state.PreimageOffset = val } diff --git a/cannon/mipsevm/multithreaded/testutil/thread.go b/cannon/mipsevm/multithreaded/testutil/thread.go index f5b1d29a8dd6..6cbd3752c613 100644 --- a/cannon/mipsevm/multithreaded/testutil/thread.go +++ b/cannon/mipsevm/multithreaded/testutil/thread.go @@ -1,6 +1,7 @@ package testutil import ( + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) @@ -14,8 +15,8 @@ func RandomThread(randSeed int64) *multithreaded.ThreadState { thread.Registers = *r.RandRegisters() thread.Cpu.PC = pc thread.Cpu.NextPC = pc + 4 - thread.Cpu.HI = r.Uint32() - thread.Cpu.LO = r.Uint32() + thread.Cpu.HI = r.Word() + thread.Cpu.LO = r.Word() return thread } @@ -37,7 +38,7 @@ func InitializeSingleThread(randSeed int, state *multithreaded.State, traverseRi func SetupThreads(randomSeed int64, state *multithreaded.State, traverseRight bool, activeStackSize, otherStackSize int) { var activeStack, otherStack []*multithreaded.ThreadState - tid := uint32(0) + tid := arch.Word(0) for i := 0; i < activeStackSize; i++ { thread := RandomThread(randomSeed + int64(i)) thread.ThreadId = tid @@ -129,13 +130,13 @@ func FindNextThreadFiltered(state *multithreaded.State, filter ThreadFilter) *mu return nil } -func FindNextThreadExcluding(state *multithreaded.State, threadId uint32) *multithreaded.ThreadState { +func FindNextThreadExcluding(state *multithreaded.State, threadId arch.Word) *multithreaded.ThreadState { return FindNextThreadFiltered(state, func(t *multithreaded.ThreadState) bool { return t.ThreadId != threadId }) } -func FindThread(state *multithreaded.State, threadId uint32) *multithreaded.ThreadState { +func FindThread(state *multithreaded.State, threadId arch.Word) *multithreaded.ThreadState { for _, t := range GetAllThreads(state) { if t.ThreadId == threadId { return t diff --git a/cannon/mipsevm/multithreaded/thread.go b/cannon/mipsevm/multithreaded/thread.go index f811a52be467..fbb49856e399 100644 --- a/cannon/mipsevm/multithreaded/thread.go +++ b/cannon/mipsevm/multithreaded/thread.go @@ -8,34 +8,47 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" ) -// SERIALIZED_THREAD_SIZE is the size of a serialized ThreadState object -const SERIALIZED_THREAD_SIZE = 166 - -// THREAD_WITNESS_SIZE is the size of a thread witness encoded in bytes. -// -// It consists of the active thread serialized and concatenated with the -// 32 byte hash onion of the active thread stack without the active thread -const THREAD_WITNESS_SIZE = SERIALIZED_THREAD_SIZE + 32 +const ( + THREAD_ID_STATE_WITNESS_OFFSET = 0 + THREAD_EXIT_CODE_WITNESS_OFFSET = THREAD_ID_STATE_WITNESS_OFFSET + arch.WordSizeBytes + THREAD_EXITED_WITNESS_OFFSET = THREAD_EXIT_CODE_WITNESS_OFFSET + 1 + THREAD_FUTEX_ADDR_WITNESS_OFFSET = THREAD_EXITED_WITNESS_OFFSET + 1 + THREAD_FUTEX_VAL_WITNESS_OFFSET = THREAD_FUTEX_ADDR_WITNESS_OFFSET + arch.WordSizeBytes + THREAD_FUTEX_TIMEOUT_STEP_WITNESS_OFFSET = THREAD_FUTEX_VAL_WITNESS_OFFSET + arch.WordSizeBytes + THREAD_FUTEX_CPU_WITNESS_OFFSET = THREAD_FUTEX_TIMEOUT_STEP_WITNESS_OFFSET + 8 + THREAD_REGISTERS_WITNESS_OFFSET = THREAD_FUTEX_CPU_WITNESS_OFFSET + (4 * arch.WordSizeBytes) + + // SERIALIZED_THREAD_SIZE is the size of a serialized ThreadState object + // 166 and 322 bytes for 32 and 64-bit respectively + SERIALIZED_THREAD_SIZE = THREAD_REGISTERS_WITNESS_OFFSET + (32 * arch.WordSizeBytes) + + // THREAD_WITNESS_SIZE is the size of a thread witness encoded in bytes. + // + // It consists of the active thread serialized and concatenated with the + // 32 byte hash onion of the active thread stack without the active thread + THREAD_WITNESS_SIZE = SERIALIZED_THREAD_SIZE + 32 +) // The empty thread root - keccak256(bytes32(0) ++ bytes32(0)) var EmptyThreadsRoot common.Hash = common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5") type ThreadState struct { - ThreadId uint32 `json:"threadId"` + ThreadId Word `json:"threadId"` ExitCode uint8 `json:"exit"` Exited bool `json:"exited"` - FutexAddr uint32 `json:"futexAddr"` - FutexVal uint32 `json:"futexVal"` + FutexAddr Word `json:"futexAddr"` + FutexVal Word `json:"futexVal"` FutexTimeoutStep uint64 `json:"futexTimeoutStep"` Cpu mipsevm.CpuScalars `json:"cpu"` - Registers [32]uint32 `json:"registers"` + Registers [32]Word `json:"registers"` } func CreateEmptyThread() *ThreadState { - initThreadId := uint32(0) + initThreadId := Word(0) return &ThreadState{ ThreadId: initThreadId, ExitCode: 0, @@ -49,27 +62,27 @@ func CreateEmptyThread() *ThreadState { FutexAddr: exec.FutexEmptyAddr, FutexVal: 0, FutexTimeoutStep: 0, - Registers: [32]uint32{}, + Registers: [32]Word{}, } } func (t *ThreadState) serializeThread() []byte { out := make([]byte, 0, SERIALIZED_THREAD_SIZE) - out = binary.BigEndian.AppendUint32(out, t.ThreadId) + out = arch.ByteOrderWord.AppendWord(out, t.ThreadId) out = append(out, t.ExitCode) out = mipsevm.AppendBoolToWitness(out, t.Exited) - out = binary.BigEndian.AppendUint32(out, t.FutexAddr) - out = binary.BigEndian.AppendUint32(out, t.FutexVal) + out = arch.ByteOrderWord.AppendWord(out, t.FutexAddr) + out = arch.ByteOrderWord.AppendWord(out, t.FutexVal) out = binary.BigEndian.AppendUint64(out, t.FutexTimeoutStep) - out = binary.BigEndian.AppendUint32(out, t.Cpu.PC) - out = binary.BigEndian.AppendUint32(out, t.Cpu.NextPC) - out = binary.BigEndian.AppendUint32(out, t.Cpu.LO) - out = binary.BigEndian.AppendUint32(out, t.Cpu.HI) + out = arch.ByteOrderWord.AppendWord(out, t.Cpu.PC) + out = arch.ByteOrderWord.AppendWord(out, t.Cpu.NextPC) + out = arch.ByteOrderWord.AppendWord(out, t.Cpu.LO) + out = arch.ByteOrderWord.AppendWord(out, t.Cpu.HI) for _, r := range t.Registers { - out = binary.BigEndian.AppendUint32(out, r) + out = arch.ByteOrderWord.AppendWord(out, r) } return out @@ -115,7 +128,7 @@ func (t *ThreadState) Deserialize(in io.Reader) error { if err := binary.Read(in, binary.BigEndian, &t.Cpu.HI); err != nil { return err } - // Read the registers as big endian uint32s + // Read the registers as big endian Words for i := range t.Registers { if err := binary.Read(in, binary.BigEndian, &t.Registers[i]); err != nil { return err diff --git a/cannon/mipsevm/program/load.go b/cannon/mipsevm/program/load.go index 5ff0b4098bc8..3cbba07d2bcd 100644 --- a/cannon/mipsevm/program/load.go +++ b/cannon/mipsevm/program/load.go @@ -7,19 +7,22 @@ import ( "io" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" ) const ( - HEAP_START = 0x05_00_00_00 - HEAP_END = 0x60_00_00_00 - PROGRAM_BREAK = 0x40_00_00_00 + HEAP_START = arch.HeapStart + HEAP_END = arch.HeapEnd + PROGRAM_BREAK = arch.ProgramBreak ) -type CreateInitialFPVMState[T mipsevm.FPVMState] func(pc, heapStart uint32) T +type Word = arch.Word + +type CreateInitialFPVMState[T mipsevm.FPVMState] func(pc, heapStart Word) T func LoadELF[T mipsevm.FPVMState](f *elf.File, initState CreateInitialFPVMState[T]) (T, error) { var empty T - s := initState(uint32(f.Entry), HEAP_START) + s := initState(Word(f.Entry), HEAP_START) for i, prog := range f.Progs { if prog.Type == 0x70000003 { // MIPS_ABIFLAGS @@ -39,13 +42,14 @@ func LoadELF[T mipsevm.FPVMState](f *elf.File, initState CreateInitialFPVMState[ } } + // TODO(#12205) if prog.Vaddr+prog.Memsz >= uint64(1<<32) { return empty, fmt.Errorf("program %d out of 32-bit mem range: %x - %x (size: %x)", i, prog.Vaddr, prog.Vaddr+prog.Memsz, prog.Memsz) } if prog.Vaddr+prog.Memsz >= HEAP_START { return empty, fmt.Errorf("program %d overlaps with heap: %x - %x (size: %x). The heap start offset must be reconfigured", i, prog.Vaddr, prog.Vaddr+prog.Memsz, prog.Memsz) } - if err := s.GetMemory().SetMemoryRange(uint32(prog.Vaddr), r); err != nil { + if err := s.GetMemory().SetMemoryRange(Word(prog.Vaddr), r); err != nil { return empty, fmt.Errorf("failed to read program segment %d: %w", i, err) } } diff --git a/cannon/mipsevm/program/metadata.go b/cannon/mipsevm/program/metadata.go index fb34da7694c7..ab1aea0842d0 100644 --- a/cannon/mipsevm/program/metadata.go +++ b/cannon/mipsevm/program/metadata.go @@ -10,8 +10,8 @@ import ( type Symbol struct { Name string `json:"name"` - Start uint32 `json:"start"` - Size uint32 `json:"size"` + Start Word `json:"start"` + Size Word `json:"size"` } type Metadata struct { @@ -31,12 +31,12 @@ func MakeMetadata(elfProgram *elf.File) (*Metadata, error) { }) out := &Metadata{Symbols: make([]Symbol, len(syms))} for i, s := range syms { - out.Symbols[i] = Symbol{Name: s.Name, Start: uint32(s.Value), Size: uint32(s.Size)} + out.Symbols[i] = Symbol{Name: s.Name, Start: Word(s.Value), Size: Word(s.Size)} } return out, nil } -func (m *Metadata) LookupSymbol(addr uint32) string { +func (m *Metadata) LookupSymbol(addr Word) string { if len(m.Symbols) == 0 { return "!unknown" } @@ -59,12 +59,12 @@ func (m *Metadata) CreateSymbolMatcher(name string) mipsevm.SymbolMatcher { if s.Name == name { start := s.Start end := s.Start + s.Size - return func(addr uint32) bool { + return func(addr Word) bool { return addr >= start && addr < end } } } - return func(addr uint32) bool { + return func(addr Word) bool { return false } } diff --git a/cannon/mipsevm/program/patch.go b/cannon/mipsevm/program/patch.go index e8e2e3ebc085..603bb41086ac 100644 --- a/cannon/mipsevm/program/patch.go +++ b/cannon/mipsevm/program/patch.go @@ -3,14 +3,16 @@ package program import ( "bytes" "debug/elf" - "encoding/binary" "errors" "fmt" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" ) +const WordSizeBytes = arch.WordSizeBytes + // PatchGoGC patches out garbage-collection-related symbols to disable garbage collection // and improves performance by patching out floating-point-related symbols func PatchGoGC(f *elf.File, st mipsevm.FPVMState) error { @@ -39,10 +41,10 @@ func PatchGoGC(f *elf.File, st mipsevm.FPVMState) error { "flag.init", // We need to patch this out, we don't pass float64nan because we don't support floats "runtime.check": - // MIPS32 patch: ret (pseudo instruction) + // MIPSx patch: ret (pseudo instruction) // 03e00008 = jr $ra = ret (pseudo instruction) // 00000000 = nop (executes with delay-slot, but does nothing) - if err := st.GetMemory().SetMemoryRange(uint32(s.Value), bytes.NewReader([]byte{ + if err := st.GetMemory().SetMemoryRange(Word(s.Value), bytes.NewReader([]byte{ 0x03, 0xe0, 0x00, 0x08, 0, 0, 0, 0, })); err != nil { @@ -56,41 +58,54 @@ func PatchGoGC(f *elf.File, st mipsevm.FPVMState) error { // PatchStack sets up the program's initial stack frame and stack pointer func PatchStack(st mipsevm.FPVMState) error { // setup stack pointer - sp := uint32(0x7f_ff_d0_00) + sp := Word(arch.HighMemoryStart) // allocate 1 page for the initial stack data, and 16KB = 4 pages for the stack to grow if err := st.GetMemory().SetMemoryRange(sp-4*memory.PageSize, bytes.NewReader(make([]byte, 5*memory.PageSize))); err != nil { return errors.New("failed to allocate page for stack content") } st.GetRegistersRef()[29] = sp - storeMem := func(addr uint32, v uint32) { - var dat [4]byte - binary.BigEndian.PutUint32(dat[:], v) + storeMem := func(addr Word, v Word) { + var dat [WordSizeBytes]byte + arch.ByteOrderWord.PutWord(dat[:], v) _ = st.GetMemory().SetMemoryRange(addr, bytes.NewReader(dat[:])) } - // init argc, argv, aux on stack - storeMem(sp+4*0, 1) // argc = 1 (argument count) - storeMem(sp+4*1, sp+4*21) // argv[0] - storeMem(sp+4*2, 0) // argv[1] = terminating - storeMem(sp+4*3, sp+4*14) // envp[0] = x (offset to first env var) - storeMem(sp+4*4, 0) // envp[1] = terminating - storeMem(sp+4*5, 6) // auxv[0] = _AT_PAGESZ = 6 (key) - storeMem(sp+4*6, 4096) // auxv[1] = page size of 4 KiB (value) - (== minPhysPageSize) - storeMem(sp+4*7, 25) // auxv[2] = AT_RANDOM - storeMem(sp+4*8, sp+4*10) // auxv[3] = address of 16 bytes containing random value - storeMem(sp+4*9, 0) // auxv[term] = 0 + auxv3Offset := sp + WordSizeBytes*10 + randomness := []byte("4;byfairdiceroll") + randomness = pad(randomness) + _ = st.GetMemory().SetMemoryRange(auxv3Offset, bytes.NewReader(randomness)) - _ = st.GetMemory().SetMemoryRange(sp+4*10, bytes.NewReader([]byte("4;byfairdiceroll"))) // 16 bytes of "randomness" + envp0Offset := auxv3Offset + Word(len(randomness)) + envar := append([]byte("GODEBUG=memprofilerate=0"), 0x0) + envar = pad(envar) + _ = st.GetMemory().SetMemoryRange(envp0Offset, bytes.NewReader(envar)) - // append 4 extra zero bytes to end at 4-byte alignment - envar := append([]byte("GODEBUG=memprofilerate=0"), 0x0, 0x0, 0x0, 0x0) - _ = st.GetMemory().SetMemoryRange(sp+4*14, bytes.NewReader(envar)) + argv0Offset := envp0Offset + Word(len(envar)) + programName := append([]byte("op-program"), 0x0) + programName = pad(programName) + _ = st.GetMemory().SetMemoryRange(argv0Offset, bytes.NewReader(programName)) - // 24 bytes for GODEBUG=memprofilerate=0 + 4 null bytes - // Then append program name + 2 null bytes for 4-byte alignment - programName := append([]byte("op-program"), 0x0, 0x0) - _ = st.GetMemory().SetMemoryRange(sp+4*21, bytes.NewReader(programName)) + // init argc, argv, aux on stack + storeMem(sp+WordSizeBytes*0, 1) // argc = 1 (argument count) + storeMem(sp+WordSizeBytes*1, argv0Offset) // argv[0] + storeMem(sp+WordSizeBytes*2, 0) // argv[1] = terminating + storeMem(sp+WordSizeBytes*3, envp0Offset) // envp[0] = x (offset to first env var) + storeMem(sp+WordSizeBytes*4, 0) // envp[1] = terminating + storeMem(sp+WordSizeBytes*5, 6) // auxv[0] = _AT_PAGESZ = 6 (key) + storeMem(sp+WordSizeBytes*6, 4096) // auxv[1] = page size of 4 KiB (value) - (== minPhysPageSize) + storeMem(sp+WordSizeBytes*7, 25) // auxv[2] = AT_RANDOM + storeMem(sp+WordSizeBytes*8, auxv3Offset) // auxv[3] = address of 16 bytes containing random value + storeMem(sp+WordSizeBytes*9, 0) // auxv[term] = 0 return nil } + +// pad adds appropriate padding to buf to end at Word alignment +func pad(buf []byte) []byte { + if len(buf)%WordSizeBytes == 0 { + return buf + } + bytesToAlignment := WordSizeBytes - len(buf)%WordSizeBytes + return append(buf, make([]byte, bytesToAlignment)...) +} diff --git a/cannon/mipsevm/singlethreaded/instrumented.go b/cannon/mipsevm/singlethreaded/instrumented.go index 800cc1a92f7a..7757ae390d6f 100644 --- a/cannon/mipsevm/singlethreaded/instrumented.go +++ b/cannon/mipsevm/singlethreaded/instrumented.go @@ -28,7 +28,7 @@ var _ mipsevm.FPVM = (*InstrumentedState)(nil) func NewInstrumentedState(state *State, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer, meta mipsevm.Metadata) *InstrumentedState { var sleepCheck mipsevm.SymbolMatcher if meta == nil { - sleepCheck = func(addr uint32) bool { return false } + sleepCheck = func(addr Word) bool { return false } } else { sleepCheck = meta.CreateSymbolMatcher("runtime.notesleep") } @@ -75,7 +75,7 @@ func (m *InstrumentedState) Step(proof bool) (wit *mipsevm.StepWitness, err erro memProof := m.memoryTracker.MemProof() wit.ProofData = append(wit.ProofData, memProof[:]...) lastPreimageKey, lastPreimage, lastPreimageOffset := m.preimageOracle.LastPreimage() - if lastPreimageOffset != ^uint32(0) { + if lastPreimageOffset != ^Word(0) { wit.PreimageOffset = lastPreimageOffset wit.PreimageKey = lastPreimageKey wit.PreimageValue = lastPreimage @@ -88,7 +88,7 @@ func (m *InstrumentedState) CheckInfiniteLoop() bool { return m.sleepCheck(m.state.GetPC()) } -func (m *InstrumentedState) LastPreimage() ([32]byte, []byte, uint32) { +func (m *InstrumentedState) LastPreimage() ([32]byte, []byte, Word) { return m.preimageOracle.LastPreimage() } @@ -109,7 +109,7 @@ func (m *InstrumentedState) Traceback() { m.stackTracker.Traceback() } -func (m *InstrumentedState) LookupSymbol(addr uint32) string { +func (m *InstrumentedState) LookupSymbol(addr Word) string { if m.meta == nil { return "" } diff --git a/cannon/mipsevm/singlethreaded/mips.go b/cannon/mipsevm/singlethreaded/mips.go index a88d0c66b0e6..afef3fb41586 100644 --- a/cannon/mipsevm/singlethreaded/mips.go +++ b/cannon/mipsevm/singlethreaded/mips.go @@ -6,24 +6,26 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" ) +type Word = arch.Word + func (m *InstrumentedState) handleSyscall() error { syscallNum, a0, a1, a2, _ := exec.GetSyscallArgs(&m.state.Registers) - v0 := uint32(0) - v1 := uint32(0) + v0 := Word(0) + v1 := Word(0) //fmt.Printf("syscall: %d\n", syscallNum) switch syscallNum { case exec.SysMmap: - var newHeap uint32 + var newHeap Word v0, v1, newHeap = exec.HandleSysMmap(a0, a1, m.state.Heap) m.state.Heap = newHeap case exec.SysBrk: - v0 = program.PROGRAM_BREAK + v0 = arch.ProgramBreak case exec.SysClone: // clone (not supported) v0 = 1 case exec.SysExitGroup: @@ -31,13 +33,13 @@ func (m *InstrumentedState) handleSyscall() error { m.state.ExitCode = uint8(a0) return nil case exec.SysRead: - var newPreimageOffset uint32 + var newPreimageOffset Word v0, v1, newPreimageOffset, _, _ = exec.HandleSysRead(a0, a1, a2, m.state.PreimageKey, m.state.PreimageOffset, m.preimageOracle, m.state.Memory, m.memoryTracker) m.state.PreimageOffset = newPreimageOffset case exec.SysWrite: var newLastHint hexutil.Bytes var newPreimageKey common.Hash - var newPreimageOffset uint32 + var newPreimageOffset Word v0, v1, newLastHint, newPreimageKey, newPreimageOffset = exec.HandleSysWrite(a0, a1, a2, m.state.LastHint, m.state.PreimageKey, m.state.PreimageOffset, m.preimageOracle, m.state.Memory, m.memoryTracker, m.stdOut, m.stdErr) m.state.LastHint = newLastHint m.state.PreimageKey = newPreimageKey @@ -78,19 +80,19 @@ func (m *InstrumentedState) mipsStep() error { func (m *InstrumentedState) handleRMWOps(insn, opcode uint32) error { baseReg := (insn >> 21) & 0x1F base := m.state.Registers[baseReg] - rtReg := (insn >> 16) & 0x1F + rtReg := Word((insn >> 16) & 0x1F) offset := exec.SignExtendImmediate(insn) - effAddr := (base + offset) & 0xFFFFFFFC + effAddr := (base + offset) & arch.AddressMask m.memoryTracker.TrackMemAccess(effAddr) - mem := m.state.Memory.GetMemory(effAddr) + mem := m.state.Memory.GetWord(effAddr) - var retVal uint32 + var retVal Word if opcode == exec.OpLoadLinked { retVal = mem } else if opcode == exec.OpStoreConditional { rt := m.state.Registers[rtReg] - m.state.Memory.SetMemory(effAddr, rt) + m.state.Memory.SetWord(effAddr, rt) retVal = 1 // 1 for success } else { panic(fmt.Sprintf("Invalid instruction passed to handleRMWOps (opcode %08x)", opcode)) diff --git a/cannon/mipsevm/singlethreaded/state.go b/cannon/mipsevm/singlethreaded/state.go index b7320131fb97..741f7f66bb09 100644 --- a/cannon/mipsevm/singlethreaded/state.go +++ b/cannon/mipsevm/singlethreaded/state.go @@ -13,28 +13,30 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" ) // STATE_WITNESS_SIZE is the size of the state witness encoding in bytes. +// ignoring 64-bit STATE_WITNESS_SIZE as it's not supported for singlethreaded const STATE_WITNESS_SIZE = 226 type State struct { Memory *memory.Memory `json:"memory"` PreimageKey common.Hash `json:"preimageKey"` - PreimageOffset uint32 `json:"preimageOffset"` // note that the offset includes the 8-byte length prefix + PreimageOffset Word `json:"preimageOffset"` // note that the offset includes the 8-byte length prefix Cpu mipsevm.CpuScalars `json:"cpu"` - Heap uint32 `json:"heap"` // to handle mmap growth + Heap Word `json:"heap"` // to handle mmap growth ExitCode uint8 `json:"exit"` Exited bool `json:"exited"` Step uint64 `json:"step"` - Registers [32]uint32 `json:"registers"` + Registers [32]Word `json:"registers"` // LastHint is optional metadata, and not part of the VM state itself. LastHint hexutil.Bytes `json:"lastHint,omitempty"` @@ -51,7 +53,7 @@ func CreateEmptyState() *State { HI: 0, }, Heap: 0, - Registers: [32]uint32{}, + Registers: [32]Word{}, Memory: memory.NewMemory(), ExitCode: 0, Exited: false, @@ -59,7 +61,7 @@ func CreateEmptyState() *State { } } -func CreateInitialState(pc, heapStart uint32) *State { +func CreateInitialState(pc, heapStart Word) *State { state := CreateEmptyState() state.Cpu.PC = pc state.Cpu.NextPC = pc + 4 @@ -75,16 +77,16 @@ func (s *State) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, stdOut, s type stateMarshaling struct { Memory *memory.Memory `json:"memory"` PreimageKey common.Hash `json:"preimageKey"` - PreimageOffset uint32 `json:"preimageOffset"` - PC uint32 `json:"pc"` - NextPC uint32 `json:"nextPC"` - LO uint32 `json:"lo"` - HI uint32 `json:"hi"` - Heap uint32 `json:"heap"` + PreimageOffset Word `json:"preimageOffset"` + PC Word `json:"pc"` + NextPC Word `json:"nextPC"` + LO Word `json:"lo"` + HI Word `json:"hi"` + Heap Word `json:"heap"` ExitCode uint8 `json:"exit"` Exited bool `json:"exited"` Step uint64 `json:"step"` - Registers [32]uint32 `json:"registers"` + Registers [32]Word `json:"registers"` LastHint hexutil.Bytes `json:"lastHint,omitempty"` } @@ -128,11 +130,11 @@ func (s *State) UnmarshalJSON(data []byte) error { return nil } -func (s *State) GetPC() uint32 { return s.Cpu.PC } +func (s *State) GetPC() Word { return s.Cpu.PC } func (s *State) GetCpu() mipsevm.CpuScalars { return s.Cpu } -func (s *State) GetRegistersRef() *[32]uint32 { return &s.Registers } +func (s *State) GetRegistersRef() *[32]Word { return &s.Registers } func (s *State) GetExitCode() uint8 { return s.ExitCode } @@ -152,7 +154,7 @@ func (s *State) GetMemory() *memory.Memory { return s.Memory } -func (s *State) GetHeap() uint32 { +func (s *State) GetHeap() Word { return s.Heap } @@ -160,7 +162,7 @@ func (s *State) GetPreimageKey() common.Hash { return s.PreimageKey } -func (s *State) GetPreimageOffset() uint32 { +func (s *State) GetPreimageOffset() Word { return s.PreimageOffset } @@ -169,17 +171,17 @@ func (s *State) EncodeWitness() ([]byte, common.Hash) { memRoot := s.Memory.MerkleRoot() out = append(out, memRoot[:]...) out = append(out, s.PreimageKey[:]...) - out = binary.BigEndian.AppendUint32(out, s.PreimageOffset) - out = binary.BigEndian.AppendUint32(out, s.Cpu.PC) - out = binary.BigEndian.AppendUint32(out, s.Cpu.NextPC) - out = binary.BigEndian.AppendUint32(out, s.Cpu.LO) - out = binary.BigEndian.AppendUint32(out, s.Cpu.HI) - out = binary.BigEndian.AppendUint32(out, s.Heap) + out = arch.ByteOrderWord.AppendWord(out, s.PreimageOffset) + out = arch.ByteOrderWord.AppendWord(out, s.Cpu.PC) + out = arch.ByteOrderWord.AppendWord(out, s.Cpu.NextPC) + out = arch.ByteOrderWord.AppendWord(out, s.Cpu.LO) + out = arch.ByteOrderWord.AppendWord(out, s.Cpu.HI) + out = arch.ByteOrderWord.AppendWord(out, s.Heap) out = append(out, s.ExitCode) out = mipsevm.AppendBoolToWitness(out, s.Exited) out = binary.BigEndian.AppendUint64(out, s.Step) for _, r := range s.Registers { - out = binary.BigEndian.AppendUint32(out, r) + out = arch.ByteOrderWord.AppendWord(out, r) } return out, stateHashFromWitness(out) } @@ -191,17 +193,17 @@ func (s *State) EncodeWitness() ([]byte, common.Hash) { // StateVersion uint8(0) // Memory As per Memory.Serialize // PreimageKey [32]byte -// PreimageOffset uint32 -// Cpu.PC uint32 -// Cpu.NextPC uint32 -// Cpu.LO uint32 -// Cpu.HI uint32 -// Heap uint32 +// PreimageOffset Word +// Cpu.PC Word +// Cpu.NextPC Word +// Cpu.LO Word +// Cpu.HI Word +// Heap Word // ExitCode uint8 // Exited uint8 - 0 for false, 1 for true // Step uint64 -// Registers [32]uint32 -// len(LastHint) uint32 (0 when LastHint is nil) +// Registers [32]Word +// len(LastHint) Word (0 when LastHint is nil) // LastHint []byte func (s *State) Serialize(out io.Writer) error { bout := serialize.NewBinaryWriter(out) diff --git a/cannon/mipsevm/singlethreaded/state_test.go b/cannon/mipsevm/singlethreaded/state_test.go index c3dfd5cd41af..e0639c3dbeec 100644 --- a/cannon/mipsevm/singlethreaded/state_test.go +++ b/cannon/mipsevm/singlethreaded/state_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" ) @@ -128,7 +129,7 @@ func TestSerializeStateRoundTrip(t *testing.T) { ExitCode: 1, Exited: true, Step: 0xdeadbeef, - Registers: [32]uint32{ + Registers: [32]arch.Word{ 0xdeadbeef, 0xdeadbeef, 0xc0ffee, diff --git a/cannon/mipsevm/singlethreaded/testutil/state.go b/cannon/mipsevm/singlethreaded/testutil/state.go index 079827500e45..b7203ead20b1 100644 --- a/cannon/mipsevm/singlethreaded/testutil/state.go +++ b/cannon/mipsevm/singlethreaded/testutil/state.go @@ -4,6 +4,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) @@ -19,12 +20,12 @@ func (m *StateMutatorSingleThreaded) Randomize(randSeed int64) { step := r.RandStep() m.state.PreimageKey = r.RandHash() - m.state.PreimageOffset = r.Uint32() + m.state.PreimageOffset = r.Word() m.state.Cpu.PC = pc m.state.Cpu.NextPC = pc + 4 - m.state.Cpu.HI = r.Uint32() - m.state.Cpu.LO = r.Uint32() - m.state.Heap = r.Uint32() + m.state.Cpu.HI = r.Word() + m.state.Cpu.LO = r.Word() + m.state.Heap = r.Word() m.state.Step = step m.state.LastHint = r.RandHint() m.state.Registers = *r.RandRegisters() @@ -36,23 +37,23 @@ func NewStateMutatorSingleThreaded(state *singlethreaded.State) testutil.StateMu return &StateMutatorSingleThreaded{state: state} } -func (m *StateMutatorSingleThreaded) SetPC(val uint32) { +func (m *StateMutatorSingleThreaded) SetPC(val arch.Word) { m.state.Cpu.PC = val } -func (m *StateMutatorSingleThreaded) SetNextPC(val uint32) { +func (m *StateMutatorSingleThreaded) SetNextPC(val arch.Word) { m.state.Cpu.NextPC = val } -func (m *StateMutatorSingleThreaded) SetHI(val uint32) { +func (m *StateMutatorSingleThreaded) SetHI(val arch.Word) { m.state.Cpu.HI = val } -func (m *StateMutatorSingleThreaded) SetLO(val uint32) { +func (m *StateMutatorSingleThreaded) SetLO(val arch.Word) { m.state.Cpu.LO = val } -func (m *StateMutatorSingleThreaded) SetHeap(val uint32) { +func (m *StateMutatorSingleThreaded) SetHeap(val arch.Word) { m.state.Heap = val } @@ -72,7 +73,7 @@ func (m *StateMutatorSingleThreaded) SetPreimageKey(val common.Hash) { m.state.PreimageKey = val } -func (m *StateMutatorSingleThreaded) SetPreimageOffset(val uint32) { +func (m *StateMutatorSingleThreaded) SetPreimageOffset(val arch.Word) { m.state.PreimageOffset = val } diff --git a/cannon/mipsevm/state.go b/cannon/mipsevm/state.go index 8ed6f265c894..731562f4fae2 100644 --- a/cannon/mipsevm/state.go +++ b/cannon/mipsevm/state.go @@ -1,10 +1,12 @@ package mipsevm +import "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" + type CpuScalars struct { - PC uint32 `json:"pc"` - NextPC uint32 `json:"nextPC"` - LO uint32 `json:"lo"` - HI uint32 `json:"hi"` + PC arch.Word `json:"pc"` + NextPC arch.Word `json:"nextPC"` + LO arch.Word `json:"lo"` + HI arch.Word `json:"hi"` } const ( diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index ad93014450dd..b0068cb993ba 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" @@ -98,13 +99,13 @@ func TestEVM(t *testing.T) { "mipsevm produced different state than EVM at step %d", state.GetStep()) } if exitGroup { - require.NotEqual(t, uint32(testutil.EndAddr), goVm.GetState().GetPC(), "must not reach end") + require.NotEqual(t, arch.Word(testutil.EndAddr), goVm.GetState().GetPC(), "must not reach end") require.True(t, goVm.GetState().GetExited(), "must set exited state") require.Equal(t, uint8(1), goVm.GetState().GetExitCode(), "must exit with 1") } else if expectPanic { - require.NotEqual(t, uint32(testutil.EndAddr), state.GetPC(), "must not reach end") + require.NotEqual(t, arch.Word(testutil.EndAddr), state.GetPC(), "must not reach end") } else { - require.Equal(t, uint32(testutil.EndAddr), state.GetPC(), "must reach end") + require.Equal(t, arch.Word(testutil.EndAddr), state.GetPC(), "must reach end") // inspect test result done, result := state.GetMemory().GetMemory(testutil.BaseAddrEnd+4), state.GetMemory().GetMemory(testutil.BaseAddrEnd+8) require.Equal(t, done, uint32(1), "must be done") @@ -121,10 +122,10 @@ func TestEVMSingleStep_Jump(t *testing.T) { versions := GetMipsVersionTestCases(t) cases := []struct { name string - pc uint32 - nextPC uint32 + pc arch.Word + nextPC arch.Word insn uint32 - expectNextPC uint32 + expectNextPC arch.Word expectLink bool }{ {name: "j MSB set target", pc: 0, nextPC: 4, insn: 0x0A_00_00_02, expectNextPC: 0x08_00_00_08}, // j 0x02_00_00_02 @@ -169,29 +170,29 @@ func TestEVMSingleStep_Operators(t *testing.T) { cases := []struct { name string isImm bool - rs uint32 - rt uint32 + rs Word + rt Word imm uint16 funct uint32 opcode uint32 - expectRes uint32 + expectRes Word }{ - {name: "add", funct: 0x20, isImm: false, rs: uint32(12), rt: uint32(20), expectRes: uint32(32)}, // add t0, s1, s2 - {name: "addu", funct: 0x21, isImm: false, rs: uint32(12), rt: uint32(20), expectRes: uint32(32)}, // addu t0, s1, s2 - {name: "addi", opcode: 0x8, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // addi t0, s1, 40 - {name: "addi sign", opcode: 0x8, isImm: true, rs: uint32(2), rt: uint32(1), imm: uint16(0xfffe), expectRes: uint32(0)}, // addi t0, s1, -2 - {name: "addiu", opcode: 0x9, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // addiu t0, s1, 40 - {name: "sub", funct: 0x22, isImm: false, rs: uint32(20), rt: uint32(12), expectRes: uint32(8)}, // sub t0, s1, s2 - {name: "subu", funct: 0x23, isImm: false, rs: uint32(20), rt: uint32(12), expectRes: uint32(8)}, // subu t0, s1, s2 - {name: "and", funct: 0x24, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(160)}, // and t0, s1, s2 - {name: "andi", opcode: 0xc, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(0)}, // andi t0, s1, 40 - {name: "or", funct: 0x25, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(1530)}, // or t0, s1, s2 - {name: "ori", opcode: 0xd, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // ori t0, s1, 40 - {name: "xor", funct: 0x26, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(1370)}, // xor t0, s1, s2 - {name: "xori", opcode: 0xe, isImm: true, rs: uint32(4), rt: uint32(1), imm: uint16(40), expectRes: uint32(44)}, // xori t0, s1, 40 - {name: "nor", funct: 0x27, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(4294965765)}, // nor t0, s1, s2 - {name: "slt", funct: 0x2a, isImm: false, rs: 0xFF_FF_FF_FE, rt: uint32(5), expectRes: uint32(1)}, // slt t0, s1, s2 - {name: "sltu", funct: 0x2b, isImm: false, rs: uint32(1200), rt: uint32(490), expectRes: uint32(0)}, // sltu t0, s1, s2 + {name: "add", funct: 0x20, isImm: false, rs: Word(12), rt: Word(20), expectRes: Word(32)}, // add t0, s1, s2 + {name: "addu", funct: 0x21, isImm: false, rs: Word(12), rt: Word(20), expectRes: Word(32)}, // addu t0, s1, s2 + {name: "addi", opcode: 0x8, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // addi t0, s1, 40 + {name: "addi sign", opcode: 0x8, isImm: true, rs: Word(2), rt: Word(1), imm: uint16(0xfffe), expectRes: Word(0)}, // addi t0, s1, -2 + {name: "addiu", opcode: 0x9, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // addiu t0, s1, 40 + {name: "sub", funct: 0x22, isImm: false, rs: Word(20), rt: Word(12), expectRes: Word(8)}, // sub t0, s1, s2 + {name: "subu", funct: 0x23, isImm: false, rs: Word(20), rt: Word(12), expectRes: Word(8)}, // subu t0, s1, s2 + {name: "and", funct: 0x24, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(160)}, // and t0, s1, s2 + {name: "andi", opcode: 0xc, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(0)}, // andi t0, s1, 40 + {name: "or", funct: 0x25, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(1530)}, // or t0, s1, s2 + {name: "ori", opcode: 0xd, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // ori t0, s1, 40 + {name: "xor", funct: 0x26, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(1370)}, // xor t0, s1, s2 + {name: "xori", opcode: 0xe, isImm: true, rs: Word(4), rt: Word(1), imm: uint16(40), expectRes: Word(44)}, // xori t0, s1, 40 + {name: "nor", funct: 0x27, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(4294965765)}, // nor t0, s1, s2 + {name: "slt", funct: 0x2a, isImm: false, rs: 0xFF_FF_FF_FE, rt: Word(5), expectRes: Word(1)}, // slt t0, s1, s2 + {name: "sltu", funct: 0x2b, isImm: false, rs: Word(1200), rt: Word(490), expectRes: Word(0)}, // sltu t0, s1, s2 } for _, v := range versions { @@ -237,17 +238,17 @@ func TestEVM_MMap(t *testing.T) { versions := GetMipsVersionTestCases(t) cases := []struct { name string - heap uint32 - address uint32 - size uint32 + heap arch.Word + address arch.Word + size arch.Word shouldFail bool - expectedHeap uint32 + expectedHeap arch.Word }{ - {name: "Increment heap by max value", heap: program.HEAP_START, address: 0, size: ^uint32(0), shouldFail: true}, - {name: "Increment heap to 0", heap: program.HEAP_START, address: 0, size: ^uint32(0) - program.HEAP_START + 1, shouldFail: true}, - {name: "Increment heap to previous page", heap: program.HEAP_START, address: 0, size: ^uint32(0) - program.HEAP_START - memory.PageSize + 1, shouldFail: true}, - {name: "Increment max page size", heap: program.HEAP_START, address: 0, size: ^uint32(0) & ^uint32(memory.PageAddrMask), shouldFail: true}, - {name: "Increment max page size from 0", heap: 0, address: 0, size: ^uint32(0) & ^uint32(memory.PageAddrMask), shouldFail: true}, + {name: "Increment heap by max value", heap: program.HEAP_START, address: 0, size: ^arch.Word(0), shouldFail: true}, + {name: "Increment heap to 0", heap: program.HEAP_START, address: 0, size: ^arch.Word(0) - program.HEAP_START + 1, shouldFail: true}, + {name: "Increment heap to previous page", heap: program.HEAP_START, address: 0, size: ^arch.Word(0) - program.HEAP_START - memory.PageSize + 1, shouldFail: true}, + {name: "Increment max page size", heap: program.HEAP_START, address: 0, size: ^arch.Word(0) & ^arch.Word(memory.PageAddrMask), shouldFail: true}, + {name: "Increment max page size from 0", heap: 0, address: 0, size: ^arch.Word(0) & ^arch.Word(memory.PageAddrMask), shouldFail: true}, {name: "Increment heap at limit", heap: program.HEAP_END, address: 0, size: 1, shouldFail: true}, {name: "Increment heap to limit", heap: program.HEAP_END - memory.PageSize, address: 0, size: 1, shouldFail: false, expectedHeap: program.HEAP_END}, {name: "Increment heap within limit", heap: program.HEAP_END - 2*memory.PageSize, address: 0, size: 1, shouldFail: false, expectedHeap: program.HEAP_END - memory.PageSize}, @@ -464,10 +465,10 @@ func TestEVMSysWriteHint(t *testing.T) { state := goVm.GetState() state.GetRegistersRef()[2] = exec.SysWrite state.GetRegistersRef()[4] = exec.FdHintWrite - state.GetRegistersRef()[5] = uint32(tt.memOffset) - state.GetRegistersRef()[6] = uint32(tt.bytesToWrite) + state.GetRegistersRef()[5] = arch.Word(tt.memOffset) + state.GetRegistersRef()[6] = arch.Word(tt.bytesToWrite) - err := state.GetMemory().SetMemoryRange(uint32(tt.memOffset), bytes.NewReader(tt.hintData)) + err := state.GetMemory().SetMemoryRange(arch.Word(tt.memOffset), bytes.NewReader(tt.hintData)) require.NoError(t, err) state.GetMemory().SetMemory(state.GetPC(), insn) step := state.GetStep() @@ -477,8 +478,8 @@ func TestEVMSysWriteHint(t *testing.T) { expected.PC = state.GetCpu().NextPC expected.NextPC = state.GetCpu().NextPC + 4 expected.LastHint = tt.expectedLastHint - expected.Registers[2] = uint32(tt.bytesToWrite) // Return count of bytes written - expected.Registers[7] = 0 // no Error + expected.Registers[2] = arch.Word(tt.bytesToWrite) // Return count of bytes written + expected.Registers[7] = 0 // no Error stepWitness, err := goVm.Step(true) require.NoError(t, err) @@ -497,7 +498,7 @@ func TestEVMFault(t *testing.T) { versions := GetMipsVersionTestCases(t) cases := []struct { name string - nextPC uint32 + nextPC arch.Word insn uint32 }{ {"illegal instruction", 0, 0xFF_FF_FF_FF}, diff --git a/cannon/mipsevm/tests/evm_multithreaded_test.go b/cannon/mipsevm/tests/evm_multithreaded_test.go index a26ebe96eb37..d0da7910fa0a 100644 --- a/cannon/mipsevm/tests/evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded_test.go @@ -14,6 +14,7 @@ import ( "golang.org/x/exp/maps" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mttestutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" @@ -21,15 +22,17 @@ import ( preimage "github.com/ethereum-optimism/optimism/op-preimage" ) +type Word = arch.Word + func TestEVM_MT_LL(t *testing.T) { var tracer *tracing.Hooks cases := []struct { name string - base uint32 + base Word offset int - value uint32 - effAddr uint32 + value Word + effAddr Word rtReg int }{ {name: "Aligned effAddr", base: 0x00_00_00_01, offset: 0x0133, value: 0xABCD, effAddr: 0x00_00_01_34, rtReg: 5}, @@ -44,7 +47,7 @@ func TestEVM_MT_LL(t *testing.T) { t.Run(tName, func(t *testing.T) { rtReg := c.rtReg baseReg := 6 - pc := uint32(0x44) + pc := Word(0x44) insn := uint32((0b11_0000 << 26) | (baseReg & 0x1F << 21) | (rtReg & 0x1F << 16) | (0xFFFF & c.offset)) goVm, state, contracts := setup(t, i, nil) step := state.GetStep() @@ -53,11 +56,11 @@ func TestEVM_MT_LL(t *testing.T) { state.GetCurrentThread().Cpu.PC = pc state.GetCurrentThread().Cpu.NextPC = pc + 4 state.GetMemory().SetMemory(pc, insn) - state.GetMemory().SetMemory(c.effAddr, c.value) + state.GetMemory().SetWord(c.effAddr, c.value) state.GetRegistersRef()[baseReg] = c.base if withExistingReservation { state.LLReservationActive = true - state.LLAddress = c.effAddr + uint32(4) + state.LLAddress = c.effAddr + Word(4) state.LLOwnerThread = 123 } else { state.LLReservationActive = false @@ -105,12 +108,12 @@ func TestEVM_MT_SC(t *testing.T) { cases := []struct { name string - base uint32 + base Word offset int - value uint32 - effAddr uint32 + value Word + effAddr Word rtReg int - threadId uint32 + threadId Word }{ {name: "Aligned effAddr", base: 0x00_00_00_01, offset: 0x0133, value: 0xABCD, effAddr: 0x00_00_01_34, rtReg: 5, threadId: 4}, {name: "Aligned effAddr, signed extended", base: 0x00_00_00_01, offset: 0xFF33, value: 0xABCD, effAddr: 0xFF_FF_FF_34, rtReg: 5, threadId: 4}, @@ -125,14 +128,14 @@ func TestEVM_MT_SC(t *testing.T) { t.Run(tName, func(t *testing.T) { rtReg := c.rtReg baseReg := 6 - pc := uint32(0x44) + pc := Word(0x44) insn := uint32((0b11_1000 << 26) | (baseReg & 0x1F << 21) | (rtReg & 0x1F << 16) | (0xFFFF & c.offset)) goVm, state, contracts := setup(t, i, nil) mttestutil.InitializeSingleThread(i*23456, state, i%2 == 1) step := state.GetStep() // Define LL-related params - var llAddress, llOwnerThread uint32 + var llAddress, llOwnerThread Word if v.matchEffAddr { llAddress = c.effAddr } else { @@ -158,10 +161,10 @@ func TestEVM_MT_SC(t *testing.T) { // Setup expectations expected := mttestutil.NewExpectedMTState(state) expected.ExpectStep() - var retVal uint32 + var retVal Word if v.shouldSucceed { retVal = 1 - expected.ExpectMemoryWrite(c.effAddr, c.value) + expected.ExpectMemoryWordWrite(c.effAddr, c.value) expected.LLReservationActive = false expected.LLAddress = 0 expected.LLOwnerThread = 0 @@ -207,10 +210,10 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) { cases := []struct { name string - addr uint32 - count uint32 - writeLen uint32 - preimageOffset uint32 + addr Word + count Word + writeLen Word + preimageOffset Word prestateMem uint32 postateMem uint32 shouldPanic bool @@ -236,14 +239,14 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) { for _, v := range llVariations { tName := fmt.Sprintf("%v (%v)", c.name, v.name) t.Run(tName, func(t *testing.T) { - effAddr := 0xFFffFFfc & c.addr + effAddr := arch.AddressMask & c.addr preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageValue)).PreimageKey() oracle := testutil.StaticOracle(t, preimageValue) goVm, state, contracts := setup(t, i, oracle) step := state.GetStep() // Define LL-related params - var llAddress, llOwnerThread uint32 + var llAddress, llOwnerThread Word if v.matchEffAddr { llAddress = effAddr } else { @@ -315,16 +318,16 @@ func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) { {name: "no reservation, mismatched addr", llReservationActive: false, matchThreadId: true, matchEffAddr: false, shouldClearReservation: false}, } - pc := uint32(0x04) - rt := uint32(0x12_34_56_78) + pc := Word(0x04) + rt := Word(0x12_34_56_78) baseReg := 5 rtReg := 6 cases := []struct { name string opcode int offset int - base uint32 - effAddr uint32 + base Word + effAddr Word preMem uint32 postMem uint32 }{ @@ -343,7 +346,7 @@ func TestEVM_MT_StoreOpsClearMemReservation(t *testing.T) { step := state.GetStep() // Define LL-related params - var llAddress, llOwnerThread uint32 + var llAddress, llOwnerThread Word if v.matchEffAddr { llAddress = c.effAddr } else { @@ -393,13 +396,13 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) { cases := []struct { name string - flags uint32 + flags Word valid bool }{ {"the supported flags bitmask", exec.ValidCloneFlags, true}, {"no flags", 0, false}, - {"all flags", ^uint32(0), false}, - {"all unsupported flags", ^uint32(exec.ValidCloneFlags), false}, + {"all flags", ^Word(0), false}, + {"all unsupported flags", ^Word(exec.ValidCloneFlags), false}, {"a few supported flags", exec.CloneFs | exec.CloneSysvsem, false}, {"one supported flag", exec.CloneFs, false}, {"mixed supported and unsupported flags", exec.CloneFs | exec.CloneParentSettid, false}, @@ -459,7 +462,7 @@ func TestEVM_SysClone_Successful(t *testing.T) { for i, c := range cases { t.Run(c.name, func(t *testing.T) { - stackPtr := uint32(100) + stackPtr := Word(100) goVm, state, contracts := setup(t, i, nil) mttestutil.InitializeSingleThread(i*333, state, c.traverseRight) @@ -470,7 +473,7 @@ func TestEVM_SysClone_Successful(t *testing.T) { step := state.GetStep() // Sanity-check assumptions - require.Equal(t, uint32(1), state.NextThreadId) + require.Equal(t, Word(1), state.NextThreadId) // Setup expectations expected := mttestutil.NewExpectedMTState(state) @@ -514,7 +517,7 @@ func TestEVM_SysGetTID(t *testing.T) { var tracer *tracing.Hooks cases := []struct { name string - threadId uint32 + threadId Word }{ {"zero", 0}, {"non-zero", 11}, @@ -570,8 +573,8 @@ func TestEVM_SysExit(t *testing.T) { mttestutil.SetupThreads(int64(i*1111), state, i%2 == 0, c.threadCount, 0) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysExit // Set syscall number - state.GetRegistersRef()[4] = uint32(exitCode) // The first argument (exit code) + state.GetRegistersRef()[2] = exec.SysExit // Set syscall number + state.GetRegistersRef()[4] = Word(exitCode) // The first argument (exit code) step := state.Step // Set up expectations @@ -654,11 +657,11 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) { var tracer *tracing.Hooks cases := []struct { name string - addressParam uint32 - effAddr uint32 - targetValue uint32 - actualValue uint32 - timeout uint32 + addressParam Word + effAddr Word + targetValue Word + actualValue Word + timeout Word shouldFail bool shouldSetTimeout bool }{ @@ -678,7 +681,7 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) { step := state.GetStep() state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.Memory.SetMemory(c.effAddr, c.actualValue) + state.Memory.SetWord(c.effAddr, c.actualValue) state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number state.GetRegistersRef()[4] = c.addressParam state.GetRegistersRef()[5] = exec.FutexWaitPrivate @@ -721,8 +724,8 @@ func TestEVM_SysFutex_WakePrivate(t *testing.T) { var tracer *tracing.Hooks cases := []struct { name string - addressParam uint32 - effAddr uint32 + addressParam Word + effAddr Word activeThreadCount int inactiveThreadCount int traverseRight bool @@ -800,7 +803,7 @@ func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { const FUTEX_CMP_REQUEUE_PI = 12 const FUTEX_LOCK_PI2 = 13 - unsupportedFutexOps := map[string]uint32{ + unsupportedFutexOps := map[string]Word{ "FUTEX_WAIT": FUTEX_WAIT, "FUTEX_WAKE": FUTEX_WAKE, "FUTEX_FD": FUTEX_FD, @@ -889,7 +892,7 @@ func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) { mttestutil.SetupThreads(int64(i*3259), state, traverseRight, c.activeThreads, c.inactiveThreads) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = syscallNum // Set syscall number + state.GetRegistersRef()[2] = Word(syscallNum) // Set syscall number step := state.Step // Set up post-state expectations @@ -972,7 +975,7 @@ func TestEVM_SysClockGettimeRealtime(t *testing.T) { testEVM_SysClockGettime(t, exec.ClockGettimeRealtimeFlag) } -func testEVM_SysClockGettime(t *testing.T, clkid uint32) { +func testEVM_SysClockGettime(t *testing.T, clkid Word) { var tracer *tracing.Hooks llVariations := []struct { @@ -996,7 +999,7 @@ func testEVM_SysClockGettime(t *testing.T, clkid uint32) { cases := []struct { name string - timespecAddr uint32 + timespecAddr Word }{ {"aligned timespec address", 0x1000}, {"unaligned timespec address", 0x1003}, @@ -1007,12 +1010,12 @@ func testEVM_SysClockGettime(t *testing.T, clkid uint32) { t.Run(tName, func(t *testing.T) { goVm, state, contracts := setup(t, 2101, nil) mttestutil.InitializeSingleThread(2101+i, state, i%2 == 1) - effAddr := c.timespecAddr & 0xFFffFFfc + effAddr := c.timespecAddr & arch.AddressMask effAddr2 := effAddr + 4 step := state.Step // Define LL-related params - var llAddress, llOwnerThread uint32 + var llAddress, llOwnerThread Word if v.matchEffAddr { llAddress = effAddr } else if v.matchEffAddr2 { @@ -1039,13 +1042,13 @@ func testEVM_SysClockGettime(t *testing.T, clkid uint32) { expected.ActiveThread().Registers[2] = 0 expected.ActiveThread().Registers[7] = 0 next := state.Step + 1 - var secs, nsecs uint32 + var secs, nsecs Word if clkid == exec.ClockGettimeMonotonicFlag { - secs = uint32(next / exec.HZ) - nsecs = uint32((next % exec.HZ) * (1_000_000_000 / exec.HZ)) + secs = Word(next / exec.HZ) + nsecs = Word((next % exec.HZ) * (1_000_000_000 / exec.HZ)) } - expected.ExpectMemoryWrite(effAddr, secs) - expected.ExpectMemoryWrite(effAddr2, nsecs) + expected.ExpectMemoryWordWrite(effAddr, secs) + expected.ExpectMemoryWordWrite(effAddr2, nsecs) if v.shouldClearReservation { expected.LLReservationActive = false expected.LLAddress = 0 @@ -1069,7 +1072,7 @@ func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) { var tracer *tracing.Hooks goVm, state, contracts := setup(t, 2101, nil) - timespecAddr := uint32(0x1000) + timespecAddr := Word(0x1000) state.Memory.SetMemory(state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = exec.SysClockGetTime // Set syscall number state.GetRegistersRef()[4] = 0xDEAD // a0 - invalid clockid @@ -1131,7 +1134,7 @@ func TestEVM_NoopSyscall(t *testing.T) { goVm, state, contracts := setup(t, int(noopVal), nil) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = noopVal // Set syscall number + state.GetRegistersRef()[2] = Word(noopVal) // Set syscall number step := state.Step // Set up post-state expectations @@ -1178,7 +1181,7 @@ func TestEVM_UnsupportedSyscall(t *testing.T) { goVm, state, contracts := setup(t, i*3434, nil) // Setup basic getThreadId syscall instruction state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = syscallNum + state.GetRegistersRef()[2] = Word(syscallNum) // Set up post-state expectations require.Panics(t, func() { _, _ = goVm.Step(true) }) @@ -1194,9 +1197,9 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { step uint64 activeStackSize int otherStackSize int - futexAddr uint32 - targetValue uint32 - actualValue uint32 + futexAddr Word + targetValue Word + actualValue Word timeoutStep uint64 shouldWakeup bool shouldTimeout bool @@ -1225,7 +1228,7 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { if !c.shouldWakeup && c.shouldTimeout { require.Fail(t, "Invalid test case - cannot expect a timeout with no wakeup") } - effAddr := c.futexAddr & 0xFF_FF_FF_Fc + effAddr := c.futexAddr & arch.AddressMask goVm, state, contracts := setup(t, i, nil) mttestutil.SetupThreads(int64(i*101), state, traverseRight, c.activeStackSize, c.otherStackSize) state.Step = c.step @@ -1234,7 +1237,7 @@ func TestEVM_NormalTraversalStep_HandleWaitingThread(t *testing.T) { activeThread.FutexAddr = c.futexAddr activeThread.FutexVal = c.targetValue activeThread.FutexTimeoutStep = c.timeoutStep - state.GetMemory().SetMemory(effAddr, c.actualValue) + state.GetMemory().SetWord(effAddr, c.actualValue) // Set up post-state expectations expected := mttestutil.NewExpectedMTState(state) @@ -1328,14 +1331,14 @@ func TestEVM_NormalTraversal_Full(t *testing.T) { } func TestEVM_WakeupTraversalStep(t *testing.T) { - addr := uint32(0x1234) - wakeupVal := uint32(0x999) + addr := Word(0x1234) + wakeupVal := Word(0x999) var tracer *tracing.Hooks cases := []struct { name string - wakeupAddr uint32 - futexAddr uint32 - targetVal uint32 + wakeupAddr Word + futexAddr Word + targetVal Word traverseRight bool activeStackSize int otherStackSize int @@ -1373,7 +1376,7 @@ func TestEVM_WakeupTraversalStep(t *testing.T) { step := state.Step state.Wakeup = c.wakeupAddr - state.GetMemory().SetMemory(c.wakeupAddr&0xFF_FF_FF_FC, wakeupVal) + state.GetMemory().SetWord(c.wakeupAddr&arch.AddressMask, wakeupVal) activeThread := state.GetCurrentThread() activeThread.FutexAddr = c.futexAddr activeThread.FutexVal = c.targetVal diff --git a/cannon/mipsevm/tests/evm_singlethreaded_test.go b/cannon/mipsevm/tests/evm_singlethreaded_test.go index 32cad32cc00e..73613d3590f4 100644 --- a/cannon/mipsevm/tests/evm_singlethreaded_test.go +++ b/cannon/mipsevm/tests/evm_singlethreaded_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" @@ -20,10 +21,10 @@ func TestEVM_LL(t *testing.T) { cases := []struct { name string - base uint32 + base Word offset int - value uint32 - effAddr uint32 + value Word + effAddr Word rtReg int }{ {name: "Aligned effAddr", base: 0x00_00_00_01, offset: 0x0133, value: 0xABCD, effAddr: 0x00_00_01_34, rtReg: 5}, @@ -37,12 +38,12 @@ func TestEVM_LL(t *testing.T) { t.Run(c.name, func(t *testing.T) { rtReg := c.rtReg baseReg := 6 - pc := uint32(0x44) + pc := Word(0x44) insn := uint32((0b11_0000 << 26) | (baseReg & 0x1F << 21) | (rtReg & 0x1F << 16) | (0xFFFF & c.offset)) goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(pc), testutil.WithNextPC(pc+4)) state := goVm.GetState() state.GetMemory().SetMemory(pc, insn) - state.GetMemory().SetMemory(c.effAddr, c.value) + state.GetMemory().SetWord(c.effAddr, c.value) state.GetRegistersRef()[baseReg] = c.base step := state.GetStep() @@ -70,10 +71,10 @@ func TestEVM_SC(t *testing.T) { cases := []struct { name string - base uint32 + base Word offset int - value uint32 - effAddr uint32 + value Word + effAddr Word rtReg int }{ {name: "Aligned effAddr", base: 0x00_00_00_01, offset: 0x0133, value: 0xABCD, effAddr: 0x00_00_01_34, rtReg: 5}, @@ -87,7 +88,7 @@ func TestEVM_SC(t *testing.T) { t.Run(c.name, func(t *testing.T) { rtReg := c.rtReg baseReg := 6 - pc := uint32(0x44) + pc := Word(0x44) insn := uint32((0b11_1000 << 26) | (baseReg & 0x1F << 21) | (rtReg & 0x1F << 16) | (0xFFFF & c.offset)) goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(pc), testutil.WithNextPC(pc+4)) state := goVm.GetState() @@ -103,7 +104,7 @@ func TestEVM_SC(t *testing.T) { expected.NextPC = pc + 8 expectedMemory := memory.NewMemory() expectedMemory.SetMemory(pc, insn) - expectedMemory.SetMemory(c.effAddr, c.value) + expectedMemory.SetWord(c.effAddr, c.value) expected.MemoryRoot = expectedMemory.MerkleRoot() if rtReg != 0 { expected.Registers[rtReg] = 1 // 1 for success @@ -130,10 +131,10 @@ func TestEVM_SysRead_Preimage(t *testing.T) { cases := []struct { name string - addr uint32 - count uint32 - writeLen uint32 - preimageOffset uint32 + addr Word + count Word + writeLen Word + preimageOffset Word prestateMem uint32 postateMem uint32 shouldPanic bool @@ -157,7 +158,7 @@ func TestEVM_SysRead_Preimage(t *testing.T) { } for i, c := range cases { t.Run(c.name, func(t *testing.T) { - effAddr := 0xFFffFFfc & c.addr + effAddr := arch.AddressMask & c.addr preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageValue)).PreimageKey() oracle := testutil.StaticOracle(t, preimageValue) goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPreimageKey(preimageKey), testutil.WithPreimageOffset(c.preimageOffset)) diff --git a/cannon/mipsevm/tests/fuzz_evm_common_test.go b/cannon/mipsevm/tests/fuzz_evm_common_test.go index 15b29a2b9e50..712b7d4875d3 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common_test.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" @@ -50,13 +51,13 @@ func FuzzStateSyscallBrk(f *testing.F) { func FuzzStateSyscallMmap(f *testing.F) { // Add special cases for large memory allocation - f.Add(uint32(0), uint32(0x1000), uint32(program.HEAP_END), int64(1)) - f.Add(uint32(0), uint32(1<<31), uint32(program.HEAP_START), int64(2)) + f.Add(Word(0), Word(0x1000), Word(program.HEAP_END), int64(1)) + f.Add(Word(0), Word(1<<31), Word(program.HEAP_START), int64(2)) // Check edge case - just within bounds - f.Add(uint32(0), uint32(0x1000), uint32(program.HEAP_END-4096), int64(3)) + f.Add(Word(0), Word(0x1000), Word(program.HEAP_END-4096), int64(3)) versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr uint32, siz uint32, heap uint32, seed int64) { + f.Fuzz(func(t *testing.T, addr Word, siz Word, heap Word, seed int64) { for _, v := range versions { t.Run(v.Name, func(t *testing.T) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), @@ -112,7 +113,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) { testutil.WithRandomization(seed)) state := goVm.GetState() state.GetRegistersRef()[2] = exec.SysExitGroup - state.GetRegistersRef()[4] = uint32(exitCode) + state.GetRegistersRef()[4] = Word(exitCode) state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() @@ -134,7 +135,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) { func FuzzStateSyscallFcntl(f *testing.F) { versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, fd uint32, cmd uint32, seed int64) { + f.Fuzz(func(t *testing.T, fd Word, cmd Word, seed int64) { for _, v := range versions { t.Run(v.Name, func(t *testing.T) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), @@ -190,7 +191,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { func FuzzStateHintRead(f *testing.F) { versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr uint32, count uint32, seed int64) { + f.Fuzz(func(t *testing.T, addr Word, count Word, seed int64) { for _, v := range versions { t.Run(v.Name, func(t *testing.T) { preimageData := []byte("hello world") @@ -227,15 +228,15 @@ func FuzzStateHintRead(f *testing.F) { func FuzzStatePreimageRead(f *testing.F) { versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr uint32, pc uint32, count uint32, preimageOffset uint32, seed int64) { + f.Fuzz(func(t *testing.T, addr arch.Word, pc arch.Word, count arch.Word, preimageOffset arch.Word, seed int64) { for _, v := range versions { t.Run(v.Name, func(t *testing.T) { - effAddr := addr & 0xFF_FF_FF_FC - pc = pc & 0xFF_FF_FF_FC + effAddr := addr & arch.AddressMask + pc = pc & arch.AddressMask preexistingMemoryVal := [4]byte{0xFF, 0xFF, 0xFF, 0xFF} preimageValue := []byte("hello world") preimageData := testutil.AddPreimageLengthPrefix(preimageValue) - if preimageOffset >= uint32(len(preimageData)) || pc == effAddr { + if preimageOffset >= Word(len(preimageData)) || pc == effAddr { t.SkipNow() } preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageValue)).PreimageKey() @@ -252,13 +253,13 @@ func FuzzStatePreimageRead(f *testing.F) { state.GetMemory().SetMemory(effAddr, binary.BigEndian.Uint32(preexistingMemoryVal[:])) step := state.GetStep() - alignment := addr & 3 + alignment := addr & arch.ExtMask writeLen := 4 - alignment if count < writeLen { writeLen = count } // Cap write length to remaining bytes of the preimage - preimageDataLen := uint32(len(preimageData)) + preimageDataLen := Word(len(preimageData)) if preimageOffset+writeLen > preimageDataLen { writeLen = preimageDataLen - preimageOffset } @@ -290,11 +291,11 @@ func FuzzStatePreimageRead(f *testing.F) { func FuzzStateHintWrite(f *testing.F) { versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr uint32, count uint32, hint1, hint2, hint3 []byte, randSeed int64) { + f.Fuzz(func(t *testing.T, addr Word, count Word, hint1, hint2, hint3 []byte, randSeed int64) { for _, v := range versions { t.Run(v.Name, func(t *testing.T) { // Make sure pc does not overlap with hint data in memory - pc := uint32(0) + pc := Word(0) if addr <= 8 { addr += 8 } @@ -372,15 +373,15 @@ func FuzzStateHintWrite(f *testing.F) { func FuzzStatePreimageWrite(f *testing.F) { versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr uint32, count uint32, seed int64) { + f.Fuzz(func(t *testing.T, addr arch.Word, count arch.Word, seed int64) { for _, v := range versions { t.Run(v.Name, func(t *testing.T) { // Make sure pc does not overlap with preimage data in memory - pc := uint32(0) + pc := Word(0) if addr <= 8 { addr += 8 } - effAddr := addr & 0xFF_FF_FF_FC + effAddr := addr & arch.AddressMask preexistingMemoryVal := [4]byte{0x12, 0x34, 0x56, 0x78} preimageData := []byte("hello world") preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() @@ -398,7 +399,7 @@ func FuzzStatePreimageWrite(f *testing.F) { step := state.GetStep() expectBytesWritten := count - alignment := addr & 0x3 + alignment := addr & arch.ExtMask sz := 4 - alignment if sz < expectBytesWritten { expectBytesWritten = sz diff --git a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go index 828f9c558739..c64658934421 100644 --- a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go @@ -14,13 +14,13 @@ import ( func FuzzStateSyscallCloneMT(f *testing.F) { v := GetMultiThreadedTestCase(f) - f.Fuzz(func(t *testing.T, nextThreadId, stackPtr uint32, seed int64) { + f.Fuzz(func(t *testing.T, nextThreadId, stackPtr Word, seed int64) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed)) state := mttestutil.GetMtState(t, goVm) // Update existing threads to avoid collision with nextThreadId if mttestutil.FindThread(state, nextThreadId) != nil { for i, t := range mttestutil.GetAllThreads(state) { - t.ThreadId = nextThreadId - uint32(i+1) + t.ThreadId = nextThreadId - Word(i+1) } } diff --git a/cannon/mipsevm/testutil/mips.go b/cannon/mipsevm/testutil/mips.go index 33ada41869d7..50d0ac48a608 100644 --- a/cannon/mipsevm/testutil/mips.go +++ b/cannon/mipsevm/testutil/mips.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" preimage "github.com/ethereum-optimism/optimism/op-preimage" ) @@ -97,7 +98,7 @@ func EncodeStepInput(t *testing.T, wit *mipsevm.StepWitness, localContext mipsev return input } -func (m *MIPSEVM) encodePreimageOracleInput(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset uint32, localContext mipsevm.LocalContext) ([]byte, error) { +func (m *MIPSEVM) encodePreimageOracleInput(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset arch.Word, localContext mipsevm.LocalContext) ([]byte, error) { if preimageKey == ([32]byte{}) { return nil, errors.New("cannot encode pre-image oracle input, witness has no pre-image to proof") } @@ -151,7 +152,7 @@ func (m *MIPSEVM) encodePreimageOracleInput(t *testing.T, preimageKey [32]byte, } } -func (m *MIPSEVM) assertPreimageOracleReverts(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset uint32) { +func (m *MIPSEVM) assertPreimageOracleReverts(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset arch.Word) { poInput, err := m.encodePreimageOracleInput(t, preimageKey, preimageValue, preimageOffset, mipsevm.LocalContext{}) require.NoError(t, err, "encode preimage oracle input") _, _, evmErr := m.env.Call(m.sender, m.addrs.Oracle, poInput, m.startingGas, common.U2560) @@ -200,7 +201,7 @@ func AssertEVMReverts(t *testing.T, state mipsevm.FPVMState, contracts *Contract require.Equal(t, 0, len(logs)) } -func AssertPreimageOracleReverts(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset uint32, contracts *ContractMetadata, tracer *tracing.Hooks) { +func AssertPreimageOracleReverts(t *testing.T, preimageKey [32]byte, preimageValue []byte, preimageOffset arch.Word, contracts *ContractMetadata, tracer *tracing.Hooks) { evm := NewMIPSEVM(contracts) evm.SetTracer(tracer) LogStepFailureAtCleanup(t, evm) diff --git a/cannon/mipsevm/testutil/rand.go b/cannon/mipsevm/testutil/rand.go index 96ff0eb6318b..da0b6d113b8b 100644 --- a/cannon/mipsevm/testutil/rand.go +++ b/cannon/mipsevm/testutil/rand.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "math/rand" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -21,6 +22,14 @@ func (h *RandHelper) Uint32() uint32 { return h.r.Uint32() } +func (h *RandHelper) Word() arch.Word { + if arch.IsMips32 { + return arch.Word(h.r.Uint32()) + } else { + return arch.Word(h.r.Uint64()) + } +} + func (h *RandHelper) Fraction() float64 { return h.r.Float64() } @@ -57,10 +66,10 @@ func (h *RandHelper) RandHint() []byte { return bytes } -func (h *RandHelper) RandRegisters() *[32]uint32 { - registers := new([32]uint32) +func (h *RandHelper) RandRegisters() *[32]arch.Word { + registers := new([32]arch.Word) for i := 0; i < 32; i++ { - registers[i] = h.r.Uint32() + registers[i] = h.Word() } return registers } @@ -73,8 +82,8 @@ func (h *RandHelper) RandomBytes(t require.TestingT, length int) []byte { return randBytes } -func (h *RandHelper) RandPC() uint32 { - return AlignPC(h.r.Uint32()) +func (h *RandHelper) RandPC() arch.Word { + return AlignPC(h.Word()) } func (h *RandHelper) RandStep() uint64 { diff --git a/cannon/mipsevm/testutil/state.go b/cannon/mipsevm/testutil/state.go index 86d5cfb2b6ad..4513d1424692 100644 --- a/cannon/mipsevm/testutil/state.go +++ b/cannon/mipsevm/testutil/state.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" ) @@ -33,12 +34,12 @@ func AddPreimageLengthPrefix(data []byte) []byte { type StateMutator interface { SetPreimageKey(val common.Hash) - SetPreimageOffset(val uint32) - SetPC(val uint32) - SetNextPC(val uint32) - SetHI(val uint32) - SetLO(val uint32) - SetHeap(addr uint32) + SetPreimageOffset(val arch.Word) + SetPC(val arch.Word) + SetNextPC(val arch.Word) + SetHI(val arch.Word) + SetLO(val arch.Word) + SetHeap(addr arch.Word) SetExitCode(val uint8) SetExited(val bool) SetStep(val uint64) @@ -48,26 +49,26 @@ type StateMutator interface { type StateOption func(state StateMutator) -func WithPC(pc uint32) StateOption { +func WithPC(pc arch.Word) StateOption { return func(state StateMutator) { state.SetPC(pc) } } -func WithNextPC(nextPC uint32) StateOption { +func WithNextPC(nextPC arch.Word) StateOption { return func(state StateMutator) { state.SetNextPC(nextPC) } } -func WithPCAndNextPC(pc uint32) StateOption { +func WithPCAndNextPC(pc arch.Word) StateOption { return func(state StateMutator) { state.SetPC(pc) state.SetNextPC(pc + 4) } } -func WithHeap(addr uint32) StateOption { +func WithHeap(addr arch.Word) StateOption { return func(state StateMutator) { state.SetHeap(addr) } @@ -85,7 +86,7 @@ func WithPreimageKey(key common.Hash) StateOption { } } -func WithPreimageOffset(offset uint32) StateOption { +func WithPreimageOffset(offset arch.Word) StateOption { return func(state StateMutator) { state.SetPreimageOffset(offset) } @@ -103,12 +104,12 @@ func WithRandomization(seed int64) StateOption { } } -func AlignPC(pc uint32) uint32 { +func AlignPC(pc arch.Word) arch.Word { // Memory-align random pc and leave room for nextPC - pc = pc & 0xFF_FF_FF_FC // Align address - if pc >= 0xFF_FF_FF_FC { + pc = pc & arch.AddressMask // Align address + if pc >= arch.AddressMask && arch.IsMips32 { // Leave room to set and then increment nextPC - pc = 0xFF_FF_FF_FC - 8 + pc = arch.AddressMask - 8 } return pc } @@ -123,17 +124,17 @@ func BoundStep(step uint64) uint64 { type ExpectedState struct { PreimageKey common.Hash - PreimageOffset uint32 - PC uint32 - NextPC uint32 - HI uint32 - LO uint32 - Heap uint32 + PreimageOffset arch.Word + PC arch.Word + NextPC arch.Word + HI arch.Word + LO arch.Word + Heap arch.Word ExitCode uint8 Exited bool Step uint64 LastHint hexutil.Bytes - Registers [32]uint32 + Registers [32]arch.Word MemoryRoot common.Hash expectedMemory *memory.Memory } @@ -164,7 +165,7 @@ func (e *ExpectedState) ExpectStep() { e.NextPC += 4 } -func (e *ExpectedState) ExpectMemoryWrite(addr uint32, val uint32) { +func (e *ExpectedState) ExpectMemoryWrite(addr arch.Word, val uint32) { e.expectedMemory.SetMemory(addr, val) e.MemoryRoot = e.expectedMemory.MerkleRoot() } diff --git a/cannon/mipsevm/testutil/vmtests.go b/cannon/mipsevm/testutil/vmtests.go index 0c5c325a8b55..559db317cef0 100644 --- a/cannon/mipsevm/testutil/vmtests.go +++ b/cannon/mipsevm/testutil/vmtests.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" ) @@ -76,13 +77,13 @@ func RunVMTests_OpenMips[T mipsevm.FPVMState](t *testing.T, stateFactory StateFa } if exitGroup { - require.NotEqual(t, uint32(EndAddr), us.GetState().GetPC(), "must not reach end") + require.NotEqual(t, arch.Word(EndAddr), us.GetState().GetPC(), "must not reach end") require.True(t, us.GetState().GetExited(), "must set exited state") require.Equal(t, uint8(1), us.GetState().GetExitCode(), "must exit with 1") } else if expectPanic { - require.NotEqual(t, uint32(EndAddr), us.GetState().GetPC(), "must not reach end") + require.NotEqual(t, arch.Word(EndAddr), us.GetState().GetPC(), "must not reach end") } else { - require.Equal(t, uint32(EndAddr), us.GetState().GetPC(), "must reach end") + require.Equal(t, arch.Word(EndAddr), us.GetState().GetPC(), "must reach end") done, result := state.GetMemory().GetMemory(BaseAddrEnd+4), state.GetMemory().GetMemory(BaseAddrEnd+8) // inspect test result require.Equal(t, done, uint32(1), "must be done") diff --git a/cannon/mipsevm/versions/detect.go b/cannon/mipsevm/versions/detect.go index cb1efcc06eb3..1f1f4147d695 100644 --- a/cannon/mipsevm/versions/detect.go +++ b/cannon/mipsevm/versions/detect.go @@ -27,7 +27,7 @@ func DetectVersion(path string) (StateVersion, error) { } switch ver { - case VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2: + case VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2, VersionMultiThreaded64: return ver, nil default: return 0, fmt.Errorf("%w: %d", ErrUnknownVersion, ver) diff --git a/cannon/mipsevm/versions/detect_test.go b/cannon/mipsevm/versions/detect_test.go index 993fb4adcb81..be849269fff9 100644 --- a/cannon/mipsevm/versions/detect_test.go +++ b/cannon/mipsevm/versions/detect_test.go @@ -34,6 +34,9 @@ func TestDetectVersion(t *testing.T) { // Iterate all known versions to ensure we have a test case to detect every state version for _, version := range StateVersionTypes { version := version + if version == VersionMultiThreaded64 { + t.Skip("TODO(#12205)") + } t.Run(version.String(), func(t *testing.T) { testDetection(t, version, ".bin.gz") }) diff --git a/cannon/mipsevm/versions/state.go b/cannon/mipsevm/versions/state.go index 97fceadd43e4..c33c5d4d756c 100644 --- a/cannon/mipsevm/versions/state.go +++ b/cannon/mipsevm/versions/state.go @@ -7,6 +7,7 @@ import ( "io" "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" "github.com/ethereum-optimism/optimism/cannon/mipsevm/singlethreaded" "github.com/ethereum-optimism/optimism/cannon/serialize" @@ -21,14 +22,16 @@ const ( VersionMultiThreaded // VersionSingleThreaded2 is based on VersionSingleThreaded with the addition of support for fcntl(F_GETFD) syscall VersionSingleThreaded2 + VersionMultiThreaded64 ) var ( - ErrUnknownVersion = errors.New("unknown version") - ErrJsonNotSupported = errors.New("json not supported") + ErrUnknownVersion = errors.New("unknown version") + ErrJsonNotSupported = errors.New("json not supported") + ErrUnsupportedMipsArch = errors.New("mips architecture is not supported") ) -var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2} +var StateVersionTypes = []StateVersion{VersionSingleThreaded, VersionMultiThreaded, VersionSingleThreaded2, VersionMultiThreaded64} func LoadStateFromFile(path string) (*VersionedState, error) { if !serialize.IsBinaryFile(path) { @@ -45,15 +48,25 @@ func LoadStateFromFile(path string) (*VersionedState, error) { func NewFromState(state mipsevm.FPVMState) (*VersionedState, error) { switch state := state.(type) { case *singlethreaded.State: + if !arch.IsMips32 { + return nil, ErrUnsupportedMipsArch + } return &VersionedState{ Version: VersionSingleThreaded2, FPVMState: state, }, nil case *multithreaded.State: - return &VersionedState{ - Version: VersionMultiThreaded, - FPVMState: state, - }, nil + if arch.IsMips32 { + return &VersionedState{ + Version: VersionMultiThreaded, + FPVMState: state, + }, nil + } else { + return &VersionedState{ + Version: VersionMultiThreaded64, + FPVMState: state, + }, nil + } default: return nil, fmt.Errorf("%w: %T", ErrUnknownVersion, state) } @@ -82,6 +95,9 @@ func (s *VersionedState) Deserialize(in io.Reader) error { switch s.Version { case VersionSingleThreaded2: + if !arch.IsMips32 { + return ErrUnsupportedMipsArch + } state := &singlethreaded.State{} if err := state.Deserialize(in); err != nil { return err @@ -89,6 +105,19 @@ func (s *VersionedState) Deserialize(in io.Reader) error { s.FPVMState = state return nil case VersionMultiThreaded: + if !arch.IsMips32 { + return ErrUnsupportedMipsArch + } + state := &multithreaded.State{} + if err := state.Deserialize(in); err != nil { + return err + } + s.FPVMState = state + return nil + case VersionMultiThreaded64: + if arch.IsMips32 { + return ErrUnsupportedMipsArch + } state := &multithreaded.State{} if err := state.Deserialize(in); err != nil { return err @@ -106,6 +135,9 @@ func (s *VersionedState) MarshalJSON() ([]byte, error) { if s.Version != VersionSingleThreaded { return nil, fmt.Errorf("%w for type %T", ErrJsonNotSupported, s.FPVMState) } + if !arch.IsMips32 { + return nil, ErrUnsupportedMipsArch + } return json.Marshal(s.FPVMState) } @@ -117,6 +149,8 @@ func (s StateVersion) String() string { return "multithreaded" case VersionSingleThreaded2: return "singlethreaded-2" + case VersionMultiThreaded64: + return "multithreaded64" default: return "unknown" } @@ -130,6 +164,8 @@ func ParseStateVersion(ver string) (StateVersion, error) { return VersionMultiThreaded, nil case "singlethreaded-2": return VersionSingleThreaded2, nil + case "multithreaded64": + return VersionMultiThreaded64, nil default: return StateVersion(0), errors.New("unknown state version") } diff --git a/cannon/mipsevm/versions/state_test.go b/cannon/mipsevm/versions/state_test.go index 8740d51d2929..27892c7c0552 100644 --- a/cannon/mipsevm/versions/state_test.go +++ b/cannon/mipsevm/versions/state_test.go @@ -49,6 +49,10 @@ func TestLoadStateFromFile(t *testing.T) { }) } +func TestLoadStateFromFile64(t *testing.T) { + t.Skip("TODO(#12205): Test asserting that cannon64 fails to decode a 32-bit state") +} + func TestVersionsOtherThanZeroDoNotSupportJSON(t *testing.T) { tests := []struct { version StateVersion diff --git a/cannon/mipsevm/witness.go b/cannon/mipsevm/witness.go index b7bf38fa528e..6807bc91c2f6 100644 --- a/cannon/mipsevm/witness.go +++ b/cannon/mipsevm/witness.go @@ -1,6 +1,9 @@ package mipsevm -import "github.com/ethereum/go-ethereum/common" +import ( + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" + "github.com/ethereum/go-ethereum/common" +) type LocalContext common.Hash @@ -13,7 +16,7 @@ type StepWitness struct { PreimageKey [32]byte // zeroed when no pre-image is accessed PreimageValue []byte // including the 8-byte length prefix - PreimageOffset uint32 + PreimageOffset arch.Word } func (wit *StepWitness) HasPreimage() bool { diff --git a/cannon/multicannon/exec.go b/cannon/multicannon/exec.go index cc06cad9a685..982b83c55692 100644 --- a/cannon/multicannon/exec.go +++ b/cannon/multicannon/exec.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "path/filepath" + "slices" "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) @@ -20,9 +21,7 @@ var vmFS embed.FS const baseDir = "embeds" func ExecuteCannon(ctx context.Context, args []string, ver versions.StateVersion) error { - switch ver { - case versions.VersionSingleThreaded, versions.VersionSingleThreaded2, versions.VersionMultiThreaded: - default: + if !slices.Contains(versions.StateVersionTypes, ver) { return errors.New("unsupported version") } diff --git a/cannon/multicannon/run.go b/cannon/multicannon/run.go index 7139436899ab..fabd4d71df38 100644 --- a/cannon/multicannon/run.go +++ b/cannon/multicannon/run.go @@ -10,7 +10,6 @@ import ( ) func Run(ctx *cli.Context) error { - fmt.Printf("args %v\n", os.Args[:]) if len(os.Args) == 3 && os.Args[2] == "--help" { if err := list(); err != nil { return err diff --git a/go.mod b/go.mod index 92a75cf442fa..37c8a09cf77f 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,7 @@ require ( golang.org/x/sync v0.8.0 golang.org/x/term v0.24.0 golang.org/x/time v0.6.0 + lukechampine.com/uint128 v1.3.0 ) require ( diff --git a/go.sum b/go.sum index 5cd91613853f..6c07be129d85 100644 --- a/go.sum +++ b/go.sum @@ -1098,6 +1098,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= From 7feffce8d3a87ac7fe41b19154f50379606d4b3e Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Tue, 1 Oct 2024 14:46:19 -0400 Subject: [PATCH 097/116] Configurable GasLimit for op-deployer (#12224) * Configurable GasLimit for op-deployer * fix: OPContractsManager.t.sol fixed. * fix: OPContractsManagerInterop.sol * fix: make gas limit configurable. --- op-chain-ops/deployer/opcm/opchain.go | 3 +++ op-chain-ops/deployer/pipeline/opchain.go | 1 + op-chain-ops/interopgen/configs.go | 1 + op-chain-ops/interopgen/deploy.go | 1 + op-chain-ops/interopgen/recipe.go | 4 +++- .../contracts-bedrock/scripts/DeployOPChain.s.sol | 12 ++++++++++-- packages/contracts-bedrock/semver-lock.json | 4 ++-- .../snapshots/abi/OPContractsManager.json | 5 +++++ .../snapshots/abi/OPContractsManagerInterop.json | 5 +++++ .../contracts-bedrock/src/L1/OPContractsManager.sol | 9 +++++---- .../src/L1/OPContractsManagerInterop.sol | 2 +- .../test/L1/OPContractsManager.t.sol | 4 +++- .../contracts-bedrock/test/opcm/DeployOPChain.t.sol | 3 +++ 13 files changed, 43 insertions(+), 11 deletions(-) diff --git a/op-chain-ops/deployer/opcm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go index ac118302e5a9..bf28ed0d4efd 100644 --- a/op-chain-ops/deployer/opcm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -35,6 +35,7 @@ type DeployOPChainInput struct { L2ChainId *big.Int OpcmProxy common.Address SaltMixer string + GasLimit uint64 } func (input *DeployOPChainInput) InputSet() bool { @@ -124,6 +125,7 @@ type opcmDeployInput struct { L2ChainId *big.Int StartingAnchorRoots []byte SaltMixer string + GasLimit uint64 } // decodeOutputABIJSON defines an ABI for a fake method called "decodeOutput" that returns the @@ -243,6 +245,7 @@ func DeployOPChainRaw( L2ChainId: input.L2ChainId, StartingAnchorRoots: input.StartingAnchorRoots(), SaltMixer: input.SaltMixer, + GasLimit: input.GasLimit, }) if err != nil { return out, fmt.Errorf("failed to pack deploy input: %w", err) diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index cc375382b2f7..9117cddb2673 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -40,6 +40,7 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, L2ChainId: chainID.Big(), OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization + GasLimit: 30_000_000, // TODO: make this configurable } var dco opcm.DeployOPChainOutput diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index d98b6429a872..6310b3c068ef 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -77,6 +77,7 @@ type L2Config struct { genesis.L2InitializationConfig Prefund map[common.Address]*big.Int SaltMixer string + GasLimit uint64 } func (c *L2Config) Check(log log.Logger) error { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 7550b599aa94..1bcef25ea0f7 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -208,6 +208,7 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), OpcmProxy: superDeployment.OpcmProxy, SaltMixer: cfg.SaltMixer, + GasLimit: cfg.GasLimit, }) if err != nil { return nil, fmt.Errorf("failed to deploy L2 OP chain: %w", err) diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 53f27a3af36e..dc27e9d7d100 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -249,7 +249,9 @@ func InteropL2DevConfig(l1ChainID, l2ChainID uint64, addrs devkeys.Addresses) (* UseAltDA: false, }, }, - Prefund: make(map[common.Address]*big.Int), + Prefund: make(map[common.Address]*big.Int), + SaltMixer: "", + GasLimit: 30_000_000, } // TODO(#11887): consider making the number of prefunded keys configurable. diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 152885170cb0..6c4360b8d666 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -48,6 +48,7 @@ contract DeployOPChainInput is BaseDeployIO { uint256 internal _l2ChainId; OPContractsManager internal _opcmProxy; string internal _saltMixer; + uint64 internal _gasLimit; function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployOPChainInput: cannot set zero address"); @@ -69,6 +70,8 @@ contract DeployOPChainInput is BaseDeployIO { } else if (_sel == this.l2ChainId.selector) { require(_value != 0 && _value != block.chainid, "DeployOPChainInput: invalid l2ChainId"); _l2ChainId = _value; + } else if (_sel == this.gasLimit.selector) { + _gasLimit = SafeCast.toUint64(_value); } else { revert("DeployOPChainInput: unknown selector"); } @@ -156,6 +159,10 @@ contract DeployOPChainInput is BaseDeployIO { function saltMixer() public view returns (string memory) { return _saltMixer; } + + function gasLimit() public view returns (uint64) { + return _gasLimit; + } } contract DeployOPChainOutput is BaseDeployIO { @@ -374,7 +381,7 @@ contract DeployOPChainOutput is BaseDeployIO { require(systemConfig.basefeeScalar() == _doi.basefeeScalar(), "SYSCON-20"); require(systemConfig.blobbasefeeScalar() == _doi.blobBaseFeeScalar(), "SYSCON-30"); require(systemConfig.batcherHash() == bytes32(uint256(uint160(_doi.batcher()))), "SYSCON-40"); - require(systemConfig.gasLimit() == uint64(30000000), "SYSCON-50"); // TODO allow other gas limits? + require(systemConfig.gasLimit() == uint64(30_000_000), "SYSCON-50"); require(systemConfig.unsafeBlockSigner() == _doi.unsafeBlockSigner(), "SYSCON-60"); require(systemConfig.scalar() >> 248 == 1, "SYSCON-70"); @@ -514,7 +521,8 @@ contract DeployOPChain is Script { blobBasefeeScalar: _doi.blobBaseFeeScalar(), l2ChainId: _doi.l2ChainId(), startingAnchorRoots: _doi.startingAnchorRoots(), - saltMixer: _doi.saltMixer() + saltMixer: _doi.saltMixer(), + gasLimit: _doi.gasLimit() }); vm.broadcast(msg.sender); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 3ded81206c65..73a9df9a7995 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xa0c1139a01cef2445266c71175eff2d36e4b3a7584b198835ed8cba4f7143704", - "sourceCodeHash": "0x67f9846a215d0817a75b4beee50925861d14da2cab1b699bb4e8ae89fa12d01b" + "initCodeHash": "0xfaab186a660764265a837fac689a6d8602454c6ca9f39b5244282768b8d86b3a", + "sourceCodeHash": "0x831b7268e1beb93050dbaae1e83e17635385bd101779146a95150084f69d2835" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index dc4ef6142c57..3ca8074ff273 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -167,6 +167,11 @@ "internalType": "string", "name": "saltMixer", "type": "string" + }, + { + "internalType": "uint64", + "name": "gasLimit", + "type": "uint64" } ], "internalType": "struct OPContractsManager.DeployInput", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index dc4ef6142c57..3ca8074ff273 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -167,6 +167,11 @@ "internalType": "string", "name": "saltMixer", "type": "string" + }, + { + "internalType": "uint64", + "name": "gasLimit", + "type": "uint64" } ], "internalType": "struct OPContractsManager.DeployInput", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 19c5283cc332..08703db158ad 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -62,6 +62,7 @@ contract OPContractsManager is ISemver, Initializable { bytes startingAnchorRoots; // The salt mixer is used as part of making the resulting salt unique. string saltMixer; + uint64 gasLimit; } /// @notice The full set of outputs from deploying a new OP Stack chain. @@ -124,8 +125,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.17 - string public constant version = "1.0.0-beta.17"; + /// @custom:semver 1.0.0-beta.18 + string public constant version = "1.0.0-beta.18"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -467,7 +468,7 @@ contract OPContractsManager is ISemver, Initializable { _input.basefeeScalar, _input.blobBasefeeScalar, bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, + _input.gasLimit, _input.roles.unsafeBlockSigner, referenceResourceConfig, chainIdToBatchInboxAddress(_input.l2ChainId), @@ -486,7 +487,7 @@ contract OPContractsManager is ISemver, Initializable { _input.basefeeScalar, _input.blobBasefeeScalar, bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, + _input.gasLimit, _input.roles.unsafeBlockSigner, referenceResourceConfig, chainIdToBatchInboxAddress(_input.l2ChainId), diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index 133f2d629a5e..a9dad0d90020 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -46,7 +46,7 @@ contract OPContractsManagerInterop is OPContractsManager { _input.basefeeScalar, _input.blobBasefeeScalar, bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - 30_000_000, // gasLimit TODO make this configurable? + _input.gasLimit, _input.roles.unsafeBlockSigner, referenceResourceConfig, chainIdToBatchInboxAddress(_input.l2ChainId), diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 52c49e4dccf1..2b3a254c8bbe 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -53,6 +53,7 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); doi.set(doi.opcmProxy.selector, address(opcm)); + doi.set(doi.gasLimit.selector, gasLimit); } // This helper function is used to convert the input struct type defined in DeployOPChain.s.sol @@ -71,7 +72,8 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { blobBasefeeScalar: _doi.blobBaseFeeScalar(), l2ChainId: _doi.l2ChainId(), startingAnchorRoots: _doi.startingAnchorRoots(), - saltMixer: _doi.saltMixer() + saltMixer: _doi.saltMixer(), + gasLimit: _doi.gasLimit() }); } diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index bd17e43bd26b..890f8143cfad 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -362,6 +362,7 @@ contract DeployOPChain_TestBase is Test { AnchorStateRegistry.StartingAnchorRoot[] startingAnchorRoots; OPContractsManager opcm = OPContractsManager(address(0)); string saltMixer = "defaultSaltMixer"; + uint64 gasLimit = 30_000_000; function setUp() public virtual { // Set defaults for reference types @@ -481,6 +482,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.l2ChainId.selector, l2ChainId); doi.set(doi.opcmProxy.selector, address(opcm)); // Not fuzzed since it must be an actual instance. doi.set(doi.saltMixer.selector, saltMixer); + doi.set(doi.gasLimit.selector, gasLimit); deployOPChain.run(doi, doo); @@ -497,6 +499,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "800"); assertEq(l2ChainId, doi.l2ChainId(), "900"); assertEq(saltMixer, doi.saltMixer(), "1000"); + assertEq(gasLimit, doi.gasLimit(), "1100"); // Assert inputs were properly passed through to the contract initializers. assertEq(address(doo.opChainProxyAdmin().owner()), opChainProxyAdminOwner, "2100"); From 8a7db41926e3c3685282b789592d159fa6fba5fe Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 1 Oct 2024 13:14:55 -0600 Subject: [PATCH 098/116] op-deployer: Fix init bugs (#12230) - The array was being appended to, but it was already a fixed length. This cause an empty chain to be added during `init`. - The outdir will now be created if it does not exist and is not a file - Hardcodes the contract artifacts to a good URL --- op-chain-ops/deployer/init.go | 26 ++++++++++++++++---- op-chain-ops/deployer/state/artifacts_url.go | 16 ++++++++++++ 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/op-chain-ops/deployer/init.go b/op-chain-ops/deployer/init.go index a74f7ffa69bd..eadc27b47cd3 100644 --- a/op-chain-ops/deployer/init.go +++ b/op-chain-ops/deployer/init.go @@ -1,7 +1,9 @@ package deployer import ( + "errors" "fmt" + "os" "path" "strings" @@ -13,6 +15,8 @@ import ( "github.com/urfave/cli/v2" ) +var V160ArtifactsURL = state.MustParseArtifactsURL("https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-155f65e7dcbea1b7b3d37a0fc39cc8b6a1c03b6c5b677886ca2420e10e9c1ea6.tar.gz") + type InitConfig struct { L1ChainID uint64 Outdir string @@ -43,12 +47,12 @@ func InitCLI() func(ctx *cli.Context) error { l2ChainIDsRaw := ctx.String(L2ChainIDsFlagName) l2ChainIDsStr := strings.Split(strings.TrimSpace(l2ChainIDsRaw), ",") l2ChainIDs := make([]common.Hash, len(l2ChainIDsStr)) - for _, idStr := range l2ChainIDsStr { + for i, idStr := range l2ChainIDsStr { id, err := op_service.Parse256BitChainID(idStr) if err != nil { return fmt.Errorf("invalid chain ID: %w", err) } - l2ChainIDs = append(l2ChainIDs, id) + l2ChainIDs[i] = id } return Init(InitConfig{ @@ -65,9 +69,10 @@ func Init(cfg InitConfig) error { } intent := &state.Intent{ - L1ChainID: cfg.L1ChainID, - FundDevAccounts: true, - ContractsRelease: "dev", + L1ChainID: cfg.L1ChainID, + FundDevAccounts: true, + ContractsRelease: "op-contracts/v1.6.0", + ContractArtifactsURL: V160ArtifactsURL, } l1ChainIDBig := intent.L1ChainIDBig() @@ -111,6 +116,17 @@ func Init(cfg InitConfig) error { Version: 1, } + stat, err := os.Stat(cfg.Outdir) + if errors.Is(err, os.ErrNotExist) { + if err := os.MkdirAll(cfg.Outdir, 0755); err != nil { + return fmt.Errorf("failed to create outdir: %w", err) + } + } else if err != nil { + return fmt.Errorf("failed to stat outdir: %w", err) + } else if !stat.IsDir() { + return fmt.Errorf("outdir is not a directory") + } + if err := intent.WriteToFile(path.Join(cfg.Outdir, "intent.toml")); err != nil { return fmt.Errorf("failed to write intent to file: %w", err) } diff --git a/op-chain-ops/deployer/state/artifacts_url.go b/op-chain-ops/deployer/state/artifacts_url.go index 5ea576d79eec..55910c9f0112 100644 --- a/op-chain-ops/deployer/state/artifacts_url.go +++ b/op-chain-ops/deployer/state/artifacts_url.go @@ -16,3 +16,19 @@ func (a *ArtifactsURL) UnmarshalText(text []byte) error { *a = ArtifactsURL(*u) return nil } + +func ParseArtifactsURL(in string) (*ArtifactsURL, error) { + u, err := url.Parse(in) + if err != nil { + return nil, err + } + return (*ArtifactsURL)(u), nil +} + +func MustParseArtifactsURL(in string) *ArtifactsURL { + u, err := ParseArtifactsURL(in) + if err != nil { + panic(err) + } + return u +} From dd2b21ce786f4c1b722bda270348597182153c8e Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Tue, 1 Oct 2024 12:19:34 -0700 Subject: [PATCH 099/116] make check-interfaces.sh warn on old versions of bash (#12220) --- .../contracts-bedrock/scripts/checks/check-interfaces.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh index 2a4a566f34e9..24c584690f10 100755 --- a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh +++ b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh @@ -1,6 +1,12 @@ #!/usr/bin/env bash set -euo pipefail +# Warn users of Mac OSX who have not ever upgraded bash from the default that they may experience +# performance issues. +if [ "${BASH_VERSINFO[0]}" -lt 5 ]; then + echo "WARNING: your bash installation is very old, and may cause this script to run extremely slowly. Please upgrade bash to at least version 5 if you have performance issues." +fi + # This script checks for ABI consistency between interfaces and their corresponding contracts. # It compares the ABIs of interfaces (files starting with 'I') with their implementation contracts, # excluding certain predefined files. Constructors are expected to be represented in interfaces by a From d8cde654fdb13a5dcf053a5db6a2be9c60c8e27a Mon Sep 17 00:00:00 2001 From: Inphi Date: Tue, 1 Oct 2024 17:44:05 -0400 Subject: [PATCH 100/116] cannon: Fix cli --stop-at-preimage value parsing (#12234) * cannon: Fix cli --stop-at-preimage value parsing * Setup alloc type in system config --- cannon/cmd/run.go | 2 +- op-e2e/faultproofs/cannon_benchmark_test.go | 3 ++- op-e2e/faultproofs/util.go | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cannon/cmd/run.go b/cannon/cmd/run.go index b9854082c5a9..eea8fd4d3a74 100644 --- a/cannon/cmd/run.go +++ b/cannon/cmd/run.go @@ -297,7 +297,7 @@ func Run(ctx *cli.Context) error { } stopAtPreimageKeyPrefix = common.FromHex(parts[0]) if len(parts) == 2 { - x, err := strconv.ParseUint(parts[1], 10, arch.WordSizeBytes) + x, err := strconv.ParseUint(parts[1], 10, arch.WordSize) if err != nil { return fmt.Errorf("invalid preimage offset: %w", err) } diff --git a/op-e2e/faultproofs/cannon_benchmark_test.go b/op-e2e/faultproofs/cannon_benchmark_test.go index 7171d1211764..71b4d0f9a3b4 100644 --- a/op-e2e/faultproofs/cannon_benchmark_test.go +++ b/op-e2e/faultproofs/cannon_benchmark_test.go @@ -12,6 +12,7 @@ import ( "time" op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -37,7 +38,7 @@ func TestBenchmarkCannon_FPP(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - cfg := e2esys.DefaultSystemConfig(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(config.AllocTypeFromEnv())) // We don't need a verifier - just the sequencer is enough delete(cfg.Nodes, "verifier") // Use a small sequencer window size to avoid test timeout while waiting for empty blocks diff --git a/op-e2e/faultproofs/util.go b/op-e2e/faultproofs/util.go index 5beebafd88a6..bbe20b3cde16 100644 --- a/op-e2e/faultproofs/util.go +++ b/op-e2e/faultproofs/util.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "testing" + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" @@ -50,7 +51,7 @@ func WithSequencerWindowSize(size uint64) faultDisputeConfigOpts { } func StartFaultDisputeSystem(t *testing.T, opts ...faultDisputeConfigOpts) (*e2esys.System, *ethclient.Client) { - cfg := e2esys.DefaultSystemConfig(t) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(config.AllocTypeFromEnv())) delete(cfg.Nodes, "verifier") cfg.Nodes["sequencer"].SafeDBPath = t.TempDir() cfg.DeployConfig.SequencerWindowSize = 4 From a4d81ba51507ba3fe86f47d675c5f37a5b8ab017 Mon Sep 17 00:00:00 2001 From: Michael Amadi Date: Tue, 1 Oct 2024 22:46:10 +0100 Subject: [PATCH 101/116] use interfaces in opcm contracts (#12217) * use interfaces in opcm contracts * fix change * fix encodeConstructor omission * fix wrong interface usage * fix change * fix change * remove commented fn * fix semver conflict issue --- .../scripts/DeployImplementations.s.sol | 310 +++++++++++------- .../scripts/DeployOPChain.s.sol | 152 ++++----- .../scripts/DeploySuperchain.s.sol | 107 +++--- .../scripts/checks/check-interfaces.sh | 1 + .../scripts/deploy/Deploy.s.sol | 5 +- .../scripts/fpac/FPACOPS.s.sol | 2 +- .../scripts/fpac/FPACOPS2.s.sol | 2 +- packages/contracts-bedrock/semver-lock.json | 4 +- .../snapshots/abi/OPContractsManager.json | 4 +- .../abi/OPContractsManagerInterop.json | 4 +- .../src/L1/OPContractsManager.sol | 27 +- .../src/L1/OPContractsManagerInterop.sol | 1 - .../L1/interfaces/ISystemConfigInterop.sol | 12 - .../test/L1/OPContractsManager.t.sol | 3 - .../test/opcm/DeployImplementations.t.sol | 92 +++--- .../test/opcm/DeployOPChain.t.sol | 89 ++--- .../test/opcm/DeploySuperchain.t.sol | 6 +- 17 files changed, 466 insertions(+), 355 deletions(-) diff --git a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol index 8e7a38ca2eb5..c8476b8a2e50 100644 --- a/packages/contracts-bedrock/scripts/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/DeployImplementations.s.sol @@ -16,31 +16,24 @@ import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { Proxy } from "src/universal/Proxy.sol"; -import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; -import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; - -import { DelayedWETH } from "src/dispute/DelayedWETH.sol"; -import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; + +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; -import { MIPS } from "src/cannon/MIPS.sol"; -import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; -import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; -import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; +import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; -import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; -import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; -import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; +import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; import { OPContractsManagerInterop } from "src/L1/OPContractsManagerInterop.sol"; -import { OptimismPortalInterop } from "src/L1/OptimismPortalInterop.sol"; -import { SystemConfigInterop } from "src/L1/SystemConfigInterop.sol"; +import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; +import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; import { Blueprint } from "src/libraries/Blueprint.sol"; @@ -169,16 +162,16 @@ contract DeployImplementationsInput is BaseDeployIO { contract DeployImplementationsOutput is BaseDeployIO { OPContractsManager internal _opcmProxy; OPContractsManager internal _opcmImpl; - DelayedWETH internal _delayedWETHImpl; - OptimismPortal2 internal _optimismPortalImpl; - PreimageOracle internal _preimageOracleSingleton; - MIPS internal _mipsSingleton; - SystemConfig internal _systemConfigImpl; - L1CrossDomainMessenger internal _l1CrossDomainMessengerImpl; - L1ERC721Bridge internal _l1ERC721BridgeImpl; - L1StandardBridge internal _l1StandardBridgeImpl; - OptimismMintableERC20Factory internal _optimismMintableERC20FactoryImpl; - DisputeGameFactory internal _disputeGameFactoryImpl; + IDelayedWETH internal _delayedWETHImpl; + IOptimismPortal2 internal _optimismPortalImpl; + IPreimageOracle internal _preimageOracleSingleton; + IMIPS internal _mipsSingleton; + ISystemConfig internal _systemConfigImpl; + IL1CrossDomainMessenger internal _l1CrossDomainMessengerImpl; + IL1ERC721Bridge internal _l1ERC721BridgeImpl; + IL1StandardBridge internal _l1StandardBridgeImpl; + IOptimismMintableERC20Factory internal _optimismMintableERC20FactoryImpl; + IDisputeGameFactory internal _disputeGameFactoryImpl; function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployImplementationsOutput: cannot set zero address"); @@ -186,16 +179,16 @@ contract DeployImplementationsOutput is BaseDeployIO { // forgefmt: disable-start if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(payable(_addr)); else if (_sel == this.opcmImpl.selector) _opcmImpl = OPContractsManager(payable(_addr)); - else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = OptimismPortal2(payable(_addr)); - else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = DelayedWETH(payable(_addr)); - else if (_sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = PreimageOracle(_addr); - else if (_sel == this.mipsSingleton.selector) _mipsSingleton = MIPS(_addr); - else if (_sel == this.systemConfigImpl.selector) _systemConfigImpl = SystemConfig(_addr); - else if (_sel == this.l1CrossDomainMessengerImpl.selector) _l1CrossDomainMessengerImpl = L1CrossDomainMessenger(_addr); - else if (_sel == this.l1ERC721BridgeImpl.selector) _l1ERC721BridgeImpl = L1ERC721Bridge(_addr); - else if (_sel == this.l1StandardBridgeImpl.selector) _l1StandardBridgeImpl = L1StandardBridge(payable(_addr)); - else if (_sel == this.optimismMintableERC20FactoryImpl.selector) _optimismMintableERC20FactoryImpl = OptimismMintableERC20Factory(_addr); - else if (_sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = DisputeGameFactory(_addr); + else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = IOptimismPortal2(payable(_addr)); + else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = IDelayedWETH(payable(_addr)); + else if (_sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = IPreimageOracle(_addr); + else if (_sel == this.mipsSingleton.selector) _mipsSingleton = IMIPS(_addr); + else if (_sel == this.systemConfigImpl.selector) _systemConfigImpl = ISystemConfig(_addr); + else if (_sel == this.l1CrossDomainMessengerImpl.selector) _l1CrossDomainMessengerImpl = IL1CrossDomainMessenger(_addr); + else if (_sel == this.l1ERC721BridgeImpl.selector) _l1ERC721BridgeImpl = IL1ERC721Bridge(_addr); + else if (_sel == this.l1StandardBridgeImpl.selector) _l1StandardBridgeImpl = IL1StandardBridge(payable(_addr)); + else if (_sel == this.optimismMintableERC20FactoryImpl.selector) _optimismMintableERC20FactoryImpl = IOptimismMintableERC20Factory(_addr); + else if (_sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = IDisputeGameFactory(_addr); else revert("DeployImplementationsOutput: unknown selector"); // forgefmt: disable-end } @@ -237,52 +230,52 @@ contract DeployImplementationsOutput is BaseDeployIO { return _opcmImpl; } - function optimismPortalImpl() public view returns (OptimismPortal2) { + function optimismPortalImpl() public view returns (IOptimismPortal2) { DeployUtils.assertValidContractAddress(address(_optimismPortalImpl)); return _optimismPortalImpl; } - function delayedWETHImpl() public view returns (DelayedWETH) { + function delayedWETHImpl() public view returns (IDelayedWETH) { DeployUtils.assertValidContractAddress(address(_delayedWETHImpl)); return _delayedWETHImpl; } - function preimageOracleSingleton() public view returns (PreimageOracle) { + function preimageOracleSingleton() public view returns (IPreimageOracle) { DeployUtils.assertValidContractAddress(address(_preimageOracleSingleton)); return _preimageOracleSingleton; } - function mipsSingleton() public view returns (MIPS) { + function mipsSingleton() public view returns (IMIPS) { DeployUtils.assertValidContractAddress(address(_mipsSingleton)); return _mipsSingleton; } - function systemConfigImpl() public view returns (SystemConfig) { + function systemConfigImpl() public view returns (ISystemConfig) { DeployUtils.assertValidContractAddress(address(_systemConfigImpl)); return _systemConfigImpl; } - function l1CrossDomainMessengerImpl() public view returns (L1CrossDomainMessenger) { + function l1CrossDomainMessengerImpl() public view returns (IL1CrossDomainMessenger) { DeployUtils.assertValidContractAddress(address(_l1CrossDomainMessengerImpl)); return _l1CrossDomainMessengerImpl; } - function l1ERC721BridgeImpl() public view returns (L1ERC721Bridge) { + function l1ERC721BridgeImpl() public view returns (IL1ERC721Bridge) { DeployUtils.assertValidContractAddress(address(_l1ERC721BridgeImpl)); return _l1ERC721BridgeImpl; } - function l1StandardBridgeImpl() public view returns (L1StandardBridge) { + function l1StandardBridgeImpl() public view returns (IL1StandardBridge) { DeployUtils.assertValidContractAddress(address(_l1StandardBridgeImpl)); return _l1StandardBridgeImpl; } - function optimismMintableERC20FactoryImpl() public view returns (OptimismMintableERC20Factory) { + function optimismMintableERC20FactoryImpl() public view returns (IOptimismMintableERC20Factory) { DeployUtils.assertValidContractAddress(address(_optimismMintableERC20FactoryImpl)); return _optimismMintableERC20FactoryImpl; } - function disputeGameFactoryImpl() public view returns (DisputeGameFactory) { + function disputeGameFactoryImpl() public view returns (IDisputeGameFactory) { DeployUtils.assertValidContractAddress(address(_disputeGameFactoryImpl)); return _disputeGameFactoryImpl; } @@ -305,7 +298,7 @@ contract DeployImplementationsOutput is BaseDeployIO { function assertValidOpcmProxy(DeployImplementationsInput _dii) internal { // First we check the proxy as itself. - Proxy proxy = Proxy(payable(address(opcmProxy()))); + IProxy proxy = IProxy(payable(address(opcmProxy()))); vm.prank(address(0)); address admin = proxy.admin(); require(admin == address(_dii.opcmProxyOwner()), "OPCMP-10"); @@ -318,7 +311,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidOpcmImpl(DeployImplementationsInput _dii) internal { - Proxy proxy = Proxy(payable(address(opcmProxy()))); + IProxy proxy = IProxy(payable(address(opcmProxy()))); vm.prank(address(0)); OPContractsManager impl = OPContractsManager(proxy.implementation()); DeployUtils.assertInitialized({ _contractAddress: address(impl), _slot: 0, _offset: 0 }); @@ -327,7 +320,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidOptimismPortalImpl(DeployImplementationsInput) internal view { - OptimismPortal2 portal = optimismPortalImpl(); + IOptimismPortal2 portal = optimismPortalImpl(); DeployUtils.assertInitialized({ _contractAddress: address(portal), _slot: 0, _offset: 0 }); @@ -342,7 +335,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidDelayedWETHImpl(DeployImplementationsInput _dii) internal view { - DelayedWETH delayedWETH = delayedWETHImpl(); + IDelayedWETH delayedWETH = delayedWETHImpl(); DeployUtils.assertInitialized({ _contractAddress: address(delayedWETH), _slot: 0, _offset: 0 }); @@ -352,20 +345,20 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidPreimageOracleSingleton(DeployImplementationsInput _dii) internal view { - PreimageOracle oracle = preimageOracleSingleton(); + IPreimageOracle oracle = preimageOracleSingleton(); require(oracle.minProposalSize() == _dii.minProposalSizeBytes(), "PO-10"); require(oracle.challengePeriod() == _dii.challengePeriodSeconds(), "PO-20"); } function assertValidMipsSingleton(DeployImplementationsInput) internal view { - MIPS mips = mipsSingleton(); + IMIPS mips = mipsSingleton(); require(address(mips.oracle()) == address(preimageOracleSingleton()), "MIPS-10"); } function assertValidSystemConfigImpl(DeployImplementationsInput) internal view { - SystemConfig systemConfig = systemConfigImpl(); + ISystemConfig systemConfig = systemConfigImpl(); DeployUtils.assertInitialized({ _contractAddress: address(systemConfig), _slot: 0, _offset: 0 }); @@ -397,7 +390,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidL1CrossDomainMessengerImpl(DeployImplementationsInput) internal view { - L1CrossDomainMessenger messenger = l1CrossDomainMessengerImpl(); + IL1CrossDomainMessenger messenger = l1CrossDomainMessengerImpl(); DeployUtils.assertInitialized({ _contractAddress: address(messenger), _slot: 0, _offset: 20 }); @@ -412,7 +405,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidL1ERC721BridgeImpl(DeployImplementationsInput) internal view { - L1ERC721Bridge bridge = l1ERC721BridgeImpl(); + IL1ERC721Bridge bridge = l1ERC721BridgeImpl(); DeployUtils.assertInitialized({ _contractAddress: address(bridge), _slot: 0, _offset: 0 }); @@ -424,7 +417,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidL1StandardBridgeImpl(DeployImplementationsInput) internal view { - L1StandardBridge bridge = l1StandardBridgeImpl(); + IL1StandardBridge bridge = l1StandardBridgeImpl(); DeployUtils.assertInitialized({ _contractAddress: address(bridge), _slot: 0, _offset: 0 }); @@ -436,7 +429,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidOptimismMintableERC20FactoryImpl(DeployImplementationsInput) internal view { - OptimismMintableERC20Factory factory = optimismMintableERC20FactoryImpl(); + IOptimismMintableERC20Factory factory = optimismMintableERC20FactoryImpl(); DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); @@ -445,7 +438,7 @@ contract DeployImplementationsOutput is BaseDeployIO { } function assertValidDisputeGameFactoryImpl(DeployImplementationsInput) internal view { - DisputeGameFactory factory = disputeGameFactoryImpl(); + IDisputeGameFactory factory = disputeGameFactoryImpl(); DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); @@ -497,7 +490,7 @@ contract DeployImplementations is Script { // is a function of the `release` passed in by the caller. bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") ? ISystemConfigV160.initialize.selector - : SystemConfig.initialize.selector; + : ISystemConfig.initialize.selector; return OPContractsManager.ImplementationSetter({ name: "SystemConfig", info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), selector) @@ -515,7 +508,7 @@ contract DeployImplementations is Script { { bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") ? IL1CrossDomainMessengerV160.initialize.selector - : L1CrossDomainMessenger.initialize.selector; + : IL1CrossDomainMessenger.initialize.selector; return OPContractsManager.ImplementationSetter({ name: "L1CrossDomainMessenger", info: OPContractsManager.Implementation(address(_dio.l1CrossDomainMessengerImpl()), selector) @@ -533,7 +526,7 @@ contract DeployImplementations is Script { { bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") ? IL1StandardBridgeV160.initialize.selector - : L1StandardBridge.initialize.selector; + : IL1StandardBridge.initialize.selector; return OPContractsManager.ImplementationSetter({ name: "L1StandardBridge", info: OPContractsManager.Implementation(address(_dio.l1StandardBridgeImpl()), selector) @@ -555,7 +548,12 @@ contract DeployImplementations is Script { address opcmProxyOwner = _dii.opcmProxyOwner(); vm.broadcast(msg.sender); - Proxy proxy = new Proxy(address(msg.sender)); + IProxy proxy = IProxy( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) + }) + ); deployOPContractsManagerImpl(_dii, _dio); OPContractsManager opcmImpl = _dio.opcmImpl(); @@ -589,30 +587,32 @@ contract DeployImplementations is Script { OPContractsManager.Blueprints memory blueprints; vm.startBroadcast(msg.sender); - blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(type(AddressManager).creationCode), salt); - blueprints.proxy = deployBytecode(Blueprint.blueprintDeployerBytecode(type(Proxy).creationCode), salt); - blueprints.proxyAdmin = deployBytecode(Blueprint.blueprintDeployerBytecode(type(ProxyAdmin).creationCode), salt); - blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(type(L1ChugSplashProxy).creationCode), salt); - blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(type(ResolvedDelegateProxy).creationCode), salt); - blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(type(AnchorStateRegistry).creationCode), salt); - (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(type(PermissionedDisputeGame).creationCode, salt); + blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AddressManager")), salt); + blueprints.proxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("Proxy")), salt); + blueprints.proxyAdmin = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ProxyAdmin")), salt); + blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("L1ChugSplashProxy")), salt); + blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ResolvedDelegateProxy")), salt); + blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AnchorStateRegistry")), salt); + (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(vm.getCode("PermissionedDisputeGame"), salt); vm.stopBroadcast(); // forgefmt: disable-end OPContractsManager.ImplementationSetter[] memory setters = new OPContractsManager.ImplementationSetter[](9); setters[0] = OPContractsManager.ImplementationSetter({ name: "L1ERC721Bridge", - info: OPContractsManager.Implementation(address(_dio.l1ERC721BridgeImpl()), L1ERC721Bridge.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.l1ERC721BridgeImpl()), IL1ERC721Bridge.initialize.selector) }); setters[1] = OPContractsManager.ImplementationSetter({ name: "OptimismPortal", - info: OPContractsManager.Implementation(address(_dio.optimismPortalImpl()), OptimismPortal2.initialize.selector) + info: OPContractsManager.Implementation( + address(_dio.optimismPortalImpl()), IOptimismPortal2.initialize.selector + ) }); setters[2] = opcmSystemConfigSetter(_dii, _dio); setters[3] = OPContractsManager.ImplementationSetter({ name: "OptimismMintableERC20Factory", info: OPContractsManager.Implementation( - address(_dio.optimismMintableERC20FactoryImpl()), OptimismMintableERC20Factory.initialize.selector + address(_dio.optimismMintableERC20FactoryImpl()), IOptimismMintableERC20Factory.initialize.selector ) }); setters[4] = l1CrossDomainMessengerConfigSetter(_dii, _dio); @@ -620,12 +620,12 @@ contract DeployImplementations is Script { setters[6] = OPContractsManager.ImplementationSetter({ name: "DisputeGameFactory", info: OPContractsManager.Implementation( - address(_dio.disputeGameFactoryImpl()), DisputeGameFactory.initialize.selector + address(_dio.disputeGameFactoryImpl()), IDisputeGameFactory.initialize.selector ) }); setters[7] = OPContractsManager.ImplementationSetter({ name: "DelayedWETH", - info: OPContractsManager.Implementation(address(_dio.delayedWETHImpl()), DelayedWETH.initialize.selector) + info: OPContractsManager.Implementation(address(_dio.delayedWETHImpl()), IDelayedWETH.initialize.selector) }); setters[8] = OPContractsManager.ImplementationSetter({ name: "MIPS", @@ -648,15 +648,20 @@ contract DeployImplementations is Script { string memory stdVerToml = _dii.standardVersionsToml(); // Using snake case for contract name to match the TOML file in superchain-registry. string memory contractName = "system_config"; - SystemConfig impl; + ISystemConfig impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = SystemConfig(existingImplementation); + impl = ISystemConfig(existingImplementation); } else if (isDevelopRelease(release)) { // Deploy a new implementation for development builds. vm.broadcast(msg.sender); - impl = new SystemConfig(); + impl = ISystemConfig( + DeployUtils.create1({ + _name: "SystemConfig", + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -675,14 +680,19 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_cross_domain_messenger"; - L1CrossDomainMessenger impl; + IL1CrossDomainMessenger impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = L1CrossDomainMessenger(existingImplementation); + impl = IL1CrossDomainMessenger(existingImplementation); } else if (isDevelopRelease(release)) { vm.broadcast(msg.sender); - impl = new L1CrossDomainMessenger(); + impl = IL1CrossDomainMessenger( + DeployUtils.create1({ + _name: "L1CrossDomainMessenger", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -701,14 +711,19 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_erc721_bridge"; - L1ERC721Bridge impl; + IL1ERC721Bridge impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = L1ERC721Bridge(existingImplementation); + impl = IL1ERC721Bridge(existingImplementation); } else if (isDevelopRelease(release)) { vm.broadcast(msg.sender); - impl = new L1ERC721Bridge(); + impl = IL1ERC721Bridge( + DeployUtils.create1({ + _name: "L1ERC721Bridge", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -727,14 +742,19 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_standard_bridge"; - L1StandardBridge impl; + IL1StandardBridge impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = L1StandardBridge(payable(existingImplementation)); + impl = IL1StandardBridge(payable(existingImplementation)); } else if (isDevelopRelease(release)) { vm.broadcast(msg.sender); - impl = new L1StandardBridge(); + impl = IL1StandardBridge( + DeployUtils.create1({ + _name: "L1StandardBridge", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -753,14 +773,19 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_mintable_erc20_factory"; - OptimismMintableERC20Factory impl; + IOptimismMintableERC20Factory impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = OptimismMintableERC20Factory(existingImplementation); + impl = IOptimismMintableERC20Factory(existingImplementation); } else if (isDevelopRelease(release)) { vm.broadcast(msg.sender); - impl = new OptimismMintableERC20Factory(); + impl = IOptimismMintableERC20Factory( + DeployUtils.create1({ + _name: "OptimismMintableERC20Factory", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -834,16 +859,25 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_portal"; - OptimismPortal2 impl; + IOptimismPortal2 impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = OptimismPortal2(payable(existingImplementation)); + impl = IOptimismPortal2(payable(existingImplementation)); } else if (isDevelopRelease(release)) { uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); vm.broadcast(msg.sender); - impl = new OptimismPortal2(proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds); + impl = IOptimismPortal2( + DeployUtils.create1({ + _name: "OptimismPortal2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOptimismPortal2.__constructor__, (proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds) + ) + ) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -856,15 +890,22 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "delayed_weth"; - DelayedWETH impl; + IDelayedWETH impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = DelayedWETH(payable(existingImplementation)); + impl = IDelayedWETH(payable(existingImplementation)); } else if (isDevelopRelease(release)) { uint256 withdrawalDelaySeconds = _dii.withdrawalDelaySeconds(); vm.broadcast(msg.sender); - impl = new DelayedWETH(withdrawalDelaySeconds); + impl = IDelayedWETH( + DeployUtils.create1({ + _name: "DelayedWETH", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IDelayedWETH.__constructor__, (withdrawalDelaySeconds)) + ) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -883,16 +924,23 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "preimage_oracle"; - PreimageOracle singleton; + IPreimageOracle singleton; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - singleton = PreimageOracle(payable(existingImplementation)); + singleton = IPreimageOracle(payable(existingImplementation)); } else if (isDevelopRelease(release)) { uint256 minProposalSizeBytes = _dii.minProposalSizeBytes(); uint256 challengePeriodSeconds = _dii.challengePeriodSeconds(); vm.broadcast(msg.sender); - singleton = new PreimageOracle(minProposalSizeBytes, challengePeriodSeconds); + singleton = IPreimageOracle( + DeployUtils.create1({ + _name: "PreimageOracle", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IPreimageOracle.__constructor__, (minProposalSizeBytes, challengePeriodSeconds)) + ) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -905,15 +953,20 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "mips"; - MIPS singleton; + IMIPS singleton; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - singleton = MIPS(payable(existingImplementation)); + singleton = IMIPS(payable(existingImplementation)); } else if (isDevelopRelease(release)) { IPreimageOracle preimageOracle = IPreimageOracle(address(_dio.preimageOracleSingleton())); vm.broadcast(msg.sender); - singleton = new MIPS(preimageOracle); + singleton = IMIPS( + DeployUtils.create1({ + _name: "MIPS", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -932,14 +985,19 @@ contract DeployImplementations is Script { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "dispute_game_factory"; - DisputeGameFactory impl; + IDisputeGameFactory impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = DisputeGameFactory(payable(existingImplementation)); + impl = IDisputeGameFactory(payable(existingImplementation)); } else if (isDevelopRelease(release)) { vm.broadcast(msg.sender); - impl = new DisputeGameFactory(); + impl = IDisputeGameFactory( + DeployUtils.create1({ + _name: "DisputeGameFactory", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IDisputeGameFactory.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -1065,7 +1123,12 @@ contract DeployImplementationsInterop is DeployImplementations { address opcmProxyOwner = _dii.opcmProxyOwner(); vm.broadcast(msg.sender); - Proxy proxy = new Proxy(address(msg.sender)); + IProxy proxy = IProxy( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) + }) + ); deployOPContractsManagerImpl(_dii, _dio); // overriding function OPContractsManager opcmImpl = _dio.opcmImpl(); @@ -1094,16 +1157,26 @@ contract DeployImplementationsInterop is DeployImplementations { string memory release = _dii.release(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_portal"; - OptimismPortal2 impl; + IOptimismPortalInterop impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = OptimismPortalInterop(payable(existingImplementation)); + impl = IOptimismPortalInterop(payable(existingImplementation)); } else if (isDevelopRelease(release)) { uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); vm.broadcast(msg.sender); - impl = new OptimismPortalInterop(proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds); + impl = IOptimismPortalInterop( + DeployUtils.create1({ + _name: "OptimismPortalInterop", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOptimismPortalInterop.__constructor__, + (proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds) + ) + ) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -1123,14 +1196,19 @@ contract DeployImplementationsInterop is DeployImplementations { string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "system_config"; - SystemConfig impl; + ISystemConfigInterop impl; address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { - impl = SystemConfigInterop(existingImplementation); + impl = ISystemConfigInterop(existingImplementation); } else if (isDevelopRelease(release)) { vm.broadcast(msg.sender); - impl = new SystemConfigInterop(); + impl = ISystemConfigInterop( + DeployUtils.create1({ + _name: "SystemConfigInterop", + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) + }) + ); } else { revert(string.concat("DeployImplementations: failed to deploy release ", release)); } @@ -1169,7 +1247,7 @@ contract DeployImplementationsInterop is DeployImplementations { return OPContractsManager.ImplementationSetter({ name: "SystemConfig", info: OPContractsManager.Implementation( - address(_dio.systemConfigImpl()), SystemConfigInterop.initialize.selector + address(_dio.systemConfigImpl()), ISystemConfigInterop.initialize.selector ) }); } diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index 6c4360b8d666..f9cc5d5875f3 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -15,24 +15,24 @@ import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { Proxy } from "src/universal/Proxy.sol"; - -import { AddressManager } from "src/legacy/AddressManager.sol"; -import { DelayedWETH } from "src/dispute/DelayedWETH.sol"; -import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; -import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; -import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; -import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; + +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { Claim, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; -import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; -import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; -import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; +import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; contract DeployOPChainInput is BaseDeployIO { address internal _opChainProxyAdminOwner; @@ -140,9 +140,9 @@ contract DeployOPChainInput is BaseDeployIO { // // You can `console.logBytes(abi.encode(defaultStartingAnchorRoots))` to get the bytes that // are hardcoded into `op-chain-ops/deployer/opcm/opchain.go` - AnchorStateRegistry.StartingAnchorRoot[] memory defaultStartingAnchorRoots = - new AnchorStateRegistry.StartingAnchorRoot[](1); - defaultStartingAnchorRoots[0] = AnchorStateRegistry.StartingAnchorRoot({ + IAnchorStateRegistry.StartingAnchorRoot[] memory defaultStartingAnchorRoots = + new IAnchorStateRegistry.StartingAnchorRoot[](1); + defaultStartingAnchorRoots[0] = IAnchorStateRegistry.StartingAnchorRoot({ gameType: GameTypes.PERMISSIONED_CANNON, outputRoot: OutputRoot({ root: Hash.wrap(bytes32(hex"dead")), l2BlockNumber: 0 }) }); @@ -166,40 +166,40 @@ contract DeployOPChainInput is BaseDeployIO { } contract DeployOPChainOutput is BaseDeployIO { - ProxyAdmin internal _opChainProxyAdmin; - AddressManager internal _addressManager; - L1ERC721Bridge internal _l1ERC721BridgeProxy; - SystemConfig internal _systemConfigProxy; - OptimismMintableERC20Factory internal _optimismMintableERC20FactoryProxy; - L1StandardBridge internal _l1StandardBridgeProxy; - L1CrossDomainMessenger internal _l1CrossDomainMessengerProxy; - OptimismPortal2 internal _optimismPortalProxy; - DisputeGameFactory internal _disputeGameFactoryProxy; - AnchorStateRegistry internal _anchorStateRegistryProxy; - AnchorStateRegistry internal _anchorStateRegistryImpl; - FaultDisputeGame internal _faultDisputeGame; - PermissionedDisputeGame internal _permissionedDisputeGame; - DelayedWETH internal _delayedWETHPermissionedGameProxy; - DelayedWETH internal _delayedWETHPermissionlessGameProxy; + IProxyAdmin internal _opChainProxyAdmin; + IAddressManager internal _addressManager; + IL1ERC721Bridge internal _l1ERC721BridgeProxy; + ISystemConfig internal _systemConfigProxy; + IOptimismMintableERC20Factory internal _optimismMintableERC20FactoryProxy; + IL1StandardBridge internal _l1StandardBridgeProxy; + IL1CrossDomainMessenger internal _l1CrossDomainMessengerProxy; + IOptimismPortal2 internal _optimismPortalProxy; + IDisputeGameFactory internal _disputeGameFactoryProxy; + IAnchorStateRegistry internal _anchorStateRegistryProxy; + IAnchorStateRegistry internal _anchorStateRegistryImpl; + IFaultDisputeGame internal _faultDisputeGame; + IPermissionedDisputeGame internal _permissionedDisputeGame; + IDelayedWETH internal _delayedWETHPermissionedGameProxy; + IDelayedWETH internal _delayedWETHPermissionlessGameProxy; function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployOPChainOutput: cannot set zero address"); // forgefmt: disable-start - if (_sel == this.opChainProxyAdmin.selector) _opChainProxyAdmin = ProxyAdmin(_addr) ; - else if (_sel == this.addressManager.selector) _addressManager = AddressManager(_addr) ; - else if (_sel == this.l1ERC721BridgeProxy.selector) _l1ERC721BridgeProxy = L1ERC721Bridge(_addr) ; - else if (_sel == this.systemConfigProxy.selector) _systemConfigProxy = SystemConfig(_addr) ; - else if (_sel == this.optimismMintableERC20FactoryProxy.selector) _optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory(_addr) ; - else if (_sel == this.l1StandardBridgeProxy.selector) _l1StandardBridgeProxy = L1StandardBridge(payable(_addr)) ; - else if (_sel == this.l1CrossDomainMessengerProxy.selector) _l1CrossDomainMessengerProxy = L1CrossDomainMessenger(_addr) ; - else if (_sel == this.optimismPortalProxy.selector) _optimismPortalProxy = OptimismPortal2(payable(_addr)) ; - else if (_sel == this.disputeGameFactoryProxy.selector) _disputeGameFactoryProxy = DisputeGameFactory(_addr) ; - else if (_sel == this.anchorStateRegistryProxy.selector) _anchorStateRegistryProxy = AnchorStateRegistry(_addr) ; - else if (_sel == this.anchorStateRegistryImpl.selector) _anchorStateRegistryImpl = AnchorStateRegistry(_addr) ; - else if (_sel == this.faultDisputeGame.selector) _faultDisputeGame = FaultDisputeGame(_addr) ; - else if (_sel == this.permissionedDisputeGame.selector) _permissionedDisputeGame = PermissionedDisputeGame(_addr) ; - else if (_sel == this.delayedWETHPermissionedGameProxy.selector) _delayedWETHPermissionedGameProxy = DelayedWETH(payable(_addr)) ; - else if (_sel == this.delayedWETHPermissionlessGameProxy.selector) _delayedWETHPermissionlessGameProxy = DelayedWETH(payable(_addr)) ; + if (_sel == this.opChainProxyAdmin.selector) _opChainProxyAdmin = IProxyAdmin(_addr) ; + else if (_sel == this.addressManager.selector) _addressManager = IAddressManager(_addr) ; + else if (_sel == this.l1ERC721BridgeProxy.selector) _l1ERC721BridgeProxy = IL1ERC721Bridge(_addr) ; + else if (_sel == this.systemConfigProxy.selector) _systemConfigProxy = ISystemConfig(_addr) ; + else if (_sel == this.optimismMintableERC20FactoryProxy.selector) _optimismMintableERC20FactoryProxy = IOptimismMintableERC20Factory(_addr) ; + else if (_sel == this.l1StandardBridgeProxy.selector) _l1StandardBridgeProxy = IL1StandardBridge(payable(_addr)) ; + else if (_sel == this.l1CrossDomainMessengerProxy.selector) _l1CrossDomainMessengerProxy = IL1CrossDomainMessenger(_addr) ; + else if (_sel == this.optimismPortalProxy.selector) _optimismPortalProxy = IOptimismPortal2(payable(_addr)) ; + else if (_sel == this.disputeGameFactoryProxy.selector) _disputeGameFactoryProxy = IDisputeGameFactory(_addr) ; + else if (_sel == this.anchorStateRegistryProxy.selector) _anchorStateRegistryProxy = IAnchorStateRegistry(_addr) ; + else if (_sel == this.anchorStateRegistryImpl.selector) _anchorStateRegistryImpl = IAnchorStateRegistry(_addr) ; + else if (_sel == this.faultDisputeGame.selector) _faultDisputeGame = IFaultDisputeGame(_addr) ; + else if (_sel == this.permissionedDisputeGame.selector) _permissionedDisputeGame = IPermissionedDisputeGame(_addr) ; + else if (_sel == this.delayedWETHPermissionedGameProxy.selector) _delayedWETHPermissionedGameProxy = IDelayedWETH(payable(_addr)) ; + else if (_sel == this.delayedWETHPermissionlessGameProxy.selector) _delayedWETHPermissionlessGameProxy = IDelayedWETH(payable(_addr)) ; else revert("DeployOPChainOutput: unknown selector"); // forgefmt: disable-end } @@ -233,77 +233,77 @@ contract DeployOPChainOutput is BaseDeployIO { assertValidDeploy(_doi); } - function opChainProxyAdmin() public view returns (ProxyAdmin) { + function opChainProxyAdmin() public view returns (IProxyAdmin) { DeployUtils.assertValidContractAddress(address(_opChainProxyAdmin)); return _opChainProxyAdmin; } - function addressManager() public view returns (AddressManager) { + function addressManager() public view returns (IAddressManager) { DeployUtils.assertValidContractAddress(address(_addressManager)); return _addressManager; } - function l1ERC721BridgeProxy() public view returns (L1ERC721Bridge) { + function l1ERC721BridgeProxy() public view returns (IL1ERC721Bridge) { DeployUtils.assertValidContractAddress(address(_l1ERC721BridgeProxy)); return _l1ERC721BridgeProxy; } - function systemConfigProxy() public view returns (SystemConfig) { + function systemConfigProxy() public view returns (ISystemConfig) { DeployUtils.assertValidContractAddress(address(_systemConfigProxy)); return _systemConfigProxy; } - function optimismMintableERC20FactoryProxy() public view returns (OptimismMintableERC20Factory) { + function optimismMintableERC20FactoryProxy() public view returns (IOptimismMintableERC20Factory) { DeployUtils.assertValidContractAddress(address(_optimismMintableERC20FactoryProxy)); return _optimismMintableERC20FactoryProxy; } - function l1StandardBridgeProxy() public view returns (L1StandardBridge) { + function l1StandardBridgeProxy() public view returns (IL1StandardBridge) { DeployUtils.assertValidContractAddress(address(_l1StandardBridgeProxy)); return _l1StandardBridgeProxy; } - function l1CrossDomainMessengerProxy() public view returns (L1CrossDomainMessenger) { + function l1CrossDomainMessengerProxy() public view returns (IL1CrossDomainMessenger) { DeployUtils.assertValidContractAddress(address(_l1CrossDomainMessengerProxy)); return _l1CrossDomainMessengerProxy; } - function optimismPortalProxy() public view returns (OptimismPortal2) { + function optimismPortalProxy() public view returns (IOptimismPortal2) { DeployUtils.assertValidContractAddress(address(_optimismPortalProxy)); return _optimismPortalProxy; } - function disputeGameFactoryProxy() public view returns (DisputeGameFactory) { + function disputeGameFactoryProxy() public view returns (IDisputeGameFactory) { DeployUtils.assertValidContractAddress(address(_disputeGameFactoryProxy)); return _disputeGameFactoryProxy; } - function anchorStateRegistryProxy() public view returns (AnchorStateRegistry) { + function anchorStateRegistryProxy() public view returns (IAnchorStateRegistry) { DeployUtils.assertValidContractAddress(address(_anchorStateRegistryProxy)); return _anchorStateRegistryProxy; } - function anchorStateRegistryImpl() public view returns (AnchorStateRegistry) { + function anchorStateRegistryImpl() public view returns (IAnchorStateRegistry) { DeployUtils.assertValidContractAddress(address(_anchorStateRegistryImpl)); return _anchorStateRegistryImpl; } - function faultDisputeGame() public view returns (FaultDisputeGame) { + function faultDisputeGame() public view returns (IFaultDisputeGame) { DeployUtils.assertValidContractAddress(address(_faultDisputeGame)); return _faultDisputeGame; } - function permissionedDisputeGame() public view returns (PermissionedDisputeGame) { + function permissionedDisputeGame() public view returns (IPermissionedDisputeGame) { DeployUtils.assertValidContractAddress(address(_permissionedDisputeGame)); return _permissionedDisputeGame; } - function delayedWETHPermissionedGameProxy() public view returns (DelayedWETH) { + function delayedWETHPermissionedGameProxy() public view returns (IDelayedWETH) { DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionedGameProxy)); return _delayedWETHPermissionedGameProxy; } - function delayedWETHPermissionlessGameProxy() public view returns (DelayedWETH) { + function delayedWETHPermissionlessGameProxy() public view returns (IDelayedWETH) { // TODO: Eventually switch from Permissioned to Permissionless. Add this check back in. // DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionlessGameProxy)); return _delayedWETHPermissionlessGameProxy; @@ -326,7 +326,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidPermissionedDisputeGame(DeployOPChainInput _doi) internal { - PermissionedDisputeGame game = permissionedDisputeGame(); + IPermissionedDisputeGame game = permissionedDisputeGame(); require(GameType.unwrap(game.gameType()) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON), "DPG-10"); // This hex string is the absolutePrestate of the latest op-program release, see where the @@ -348,7 +348,7 @@ contract DeployOPChainOutput is BaseDeployIO { function assertValidAnchorStateRegistryProxy(DeployOPChainInput) internal { // First we check the proxy as itself. - Proxy proxy = Proxy(payable(address(anchorStateRegistryProxy()))); + IProxy proxy = IProxy(payable(address(anchorStateRegistryProxy()))); vm.prank(address(0)); address admin = proxy.admin(); require(admin == address(opChainProxyAdmin()), "ANCHORP-10"); @@ -365,7 +365,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidAnchorStateRegistryImpl(DeployOPChainInput) internal view { - AnchorStateRegistry registry = anchorStateRegistryImpl(); + IAnchorStateRegistry registry = anchorStateRegistryImpl(); DeployUtils.assertInitialized({ _contractAddress: address(registry), _slot: 0, _offset: 0 }); @@ -373,7 +373,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidSystemConfig(DeployOPChainInput _doi) internal { - SystemConfig systemConfig = systemConfigProxy(); + ISystemConfig systemConfig = systemConfigProxy(); DeployUtils.assertInitialized({ _contractAddress: address(systemConfig), _slot: 0, _offset: 0 }); @@ -412,7 +412,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidL1CrossDomainMessenger(DeployOPChainInput _doi) internal { - L1CrossDomainMessenger messenger = l1CrossDomainMessengerProxy(); + IL1CrossDomainMessenger messenger = l1CrossDomainMessengerProxy(); DeployUtils.assertInitialized({ _contractAddress: address(messenger), _slot: 0, _offset: 20 }); @@ -428,8 +428,8 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidL1StandardBridge(DeployOPChainInput _doi) internal { - L1StandardBridge bridge = l1StandardBridgeProxy(); - L1CrossDomainMessenger messenger = l1CrossDomainMessengerProxy(); + IL1StandardBridge bridge = l1StandardBridgeProxy(); + IL1CrossDomainMessenger messenger = l1CrossDomainMessengerProxy(); DeployUtils.assertInitialized({ _contractAddress: address(bridge), _slot: 0, _offset: 0 }); @@ -441,7 +441,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidOptimismMintableERC20Factory(DeployOPChainInput) internal view { - OptimismMintableERC20Factory factory = optimismMintableERC20FactoryProxy(); + IOptimismMintableERC20Factory factory = optimismMintableERC20FactoryProxy(); DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); @@ -450,7 +450,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidL1ERC721Bridge(DeployOPChainInput _doi) internal { - L1ERC721Bridge bridge = l1ERC721BridgeProxy(); + IL1ERC721Bridge bridge = l1ERC721BridgeProxy(); DeployUtils.assertInitialized({ _contractAddress: address(bridge), _slot: 0, _offset: 0 }); @@ -463,7 +463,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidOptimismPortal(DeployOPChainInput _doi) internal { - OptimismPortal2 portal = optimismPortalProxy(); + IOptimismPortal2 portal = optimismPortalProxy(); ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcmProxy().superchainConfig())); require(address(portal.disputeGameFactory()) == address(disputeGameFactoryProxy()), "PORTAL-10"); @@ -479,7 +479,7 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidDisputeGameFactory(DeployOPChainInput _doi) internal view { - DisputeGameFactory factory = disputeGameFactoryProxy(); + IDisputeGameFactory factory = disputeGameFactoryProxy(); DeployUtils.assertInitialized({ _contractAddress: address(factory), _slot: 0, _offset: 0 }); @@ -490,11 +490,11 @@ contract DeployOPChainOutput is BaseDeployIO { } function assertValidDelayedWETH(DeployOPChainInput _doi) internal { - DelayedWETH permissioned = delayedWETHPermissionedGameProxy(); + IDelayedWETH permissioned = delayedWETHPermissionedGameProxy(); require(permissioned.owner() == address(_doi.opChainProxyAdminOwner()), "DWETH-10"); - Proxy proxy = Proxy(payable(address(permissioned))); + IProxy proxy = IProxy(payable(address(permissioned))); vm.prank(address(0)); address admin = proxy.admin(); require(admin == address(opChainProxyAdmin()), "DWETH-20"); diff --git a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol index c9e1b23bf230..913bc510d5bb 100644 --- a/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/DeploySuperchain.s.sol @@ -4,10 +4,10 @@ pragma solidity 0.8.15; import { Script } from "forge-std/Script.sol"; import { stdToml } from "forge-std/StdToml.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions, ProtocolVersion } from "src/L1/ProtocolVersions.sol"; -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { Proxy } from "src/universal/Proxy.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; @@ -156,21 +156,21 @@ contract DeploySuperchainInput is BaseDeployIO { contract DeploySuperchainOutput is BaseDeployIO { // All outputs are stored in storage individually, with the same rationale as doing so for // inputs, and the same pattern is used below to expose the outputs. - ProtocolVersions internal _protocolVersionsImpl; - ProtocolVersions internal _protocolVersionsProxy; - SuperchainConfig internal _superchainConfigImpl; - SuperchainConfig internal _superchainConfigProxy; - ProxyAdmin internal _superchainProxyAdmin; + IProtocolVersions internal _protocolVersionsImpl; + IProtocolVersions internal _protocolVersionsProxy; + ISuperchainConfig internal _superchainConfigImpl; + ISuperchainConfig internal _superchainConfigProxy; + IProxyAdmin internal _superchainProxyAdmin; // This method lets each field be set individually. The selector of an output's getter method // is used to determine which field to set. function set(bytes4 _sel, address _address) public { require(_address != address(0), "DeploySuperchainOutput: cannot set zero address"); - if (_sel == this.superchainProxyAdmin.selector) _superchainProxyAdmin = ProxyAdmin(_address); - else if (_sel == this.superchainConfigImpl.selector) _superchainConfigImpl = SuperchainConfig(_address); - else if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = SuperchainConfig(_address); - else if (_sel == this.protocolVersionsImpl.selector) _protocolVersionsImpl = ProtocolVersions(_address); - else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = ProtocolVersions(_address); + if (_sel == this.superchainProxyAdmin.selector) _superchainProxyAdmin = IProxyAdmin(_address); + else if (_sel == this.superchainConfigImpl.selector) _superchainConfigImpl = ISuperchainConfig(_address); + else if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = ISuperchainConfig(_address); + else if (_sel == this.protocolVersionsImpl.selector) _protocolVersionsImpl = IProtocolVersions(_address); + else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = IProtocolVersions(_address); else revert("DeploySuperchainOutput: unknown selector"); } @@ -188,8 +188,8 @@ contract DeploySuperchainOutput is BaseDeployIO { // To read the implementations we prank as the zero address due to the proxyCallIfNotAdmin modifier. vm.startPrank(address(0)); - address actualSuperchainConfigImpl = Proxy(payable(address(_superchainConfigProxy))).implementation(); - address actualProtocolVersionsImpl = Proxy(payable(address(_protocolVersionsProxy))).implementation(); + address actualSuperchainConfigImpl = IProxy(payable(address(_superchainConfigProxy))).implementation(); + address actualProtocolVersionsImpl = IProxy(payable(address(_protocolVersionsProxy))).implementation(); vm.stopPrank(); require(actualSuperchainConfigImpl == address(_superchainConfigImpl), "100"); @@ -198,27 +198,27 @@ contract DeploySuperchainOutput is BaseDeployIO { assertValidDeploy(_dsi); } - function superchainProxyAdmin() public view returns (ProxyAdmin) { + function superchainProxyAdmin() public view returns (IProxyAdmin) { // This does not have to be a contract address, it could be an EOA. return _superchainProxyAdmin; } - function superchainConfigImpl() public view returns (SuperchainConfig) { + function superchainConfigImpl() public view returns (ISuperchainConfig) { DeployUtils.assertValidContractAddress(address(_superchainConfigImpl)); return _superchainConfigImpl; } - function superchainConfigProxy() public view returns (SuperchainConfig) { + function superchainConfigProxy() public view returns (ISuperchainConfig) { DeployUtils.assertValidContractAddress(address(_superchainConfigProxy)); return _superchainConfigProxy; } - function protocolVersionsImpl() public view returns (ProtocolVersions) { + function protocolVersionsImpl() public view returns (IProtocolVersions) { DeployUtils.assertValidContractAddress(address(_protocolVersionsImpl)); return _protocolVersionsImpl; } - function protocolVersionsProxy() public view returns (ProtocolVersions) { + function protocolVersionsProxy() public view returns (IProtocolVersions) { DeployUtils.assertValidContractAddress(address(_protocolVersionsProxy)); return _protocolVersionsProxy; } @@ -236,16 +236,16 @@ contract DeploySuperchainOutput is BaseDeployIO { function assertValidSuperchainConfig(DeploySuperchainInput _dsi) internal { // Proxy checks. - SuperchainConfig superchainConfig = superchainConfigProxy(); + ISuperchainConfig superchainConfig = superchainConfigProxy(); DeployUtils.assertInitialized({ _contractAddress: address(superchainConfig), _slot: 0, _offset: 0 }); require(superchainConfig.guardian() == _dsi.guardian(), "SUPCON-10"); require(superchainConfig.paused() == _dsi.paused(), "SUPCON-20"); vm.startPrank(address(0)); require( - Proxy(payable(address(superchainConfig))).implementation() == address(superchainConfigImpl()), "SUPCON-30" + IProxy(payable(address(superchainConfig))).implementation() == address(superchainConfigImpl()), "SUPCON-30" ); - require(Proxy(payable(address(superchainConfig))).admin() == address(superchainProxyAdmin()), "SUPCON-40"); + require(IProxy(payable(address(superchainConfig))).admin() == address(superchainProxyAdmin()), "SUPCON-40"); vm.stopPrank(); // Implementation checks @@ -256,7 +256,7 @@ contract DeploySuperchainOutput is BaseDeployIO { function assertValidProtocolVersions(DeploySuperchainInput _dsi) internal { // Proxy checks. - ProtocolVersions pv = protocolVersionsProxy(); + IProtocolVersions pv = protocolVersionsProxy(); DeployUtils.assertInitialized({ _contractAddress: address(pv), _slot: 0, _offset: 0 }); require(pv.owner() == _dsi.protocolVersionsOwner(), "PV-10"); require( @@ -268,8 +268,8 @@ contract DeploySuperchainOutput is BaseDeployIO { ); vm.startPrank(address(0)); - require(Proxy(payable(address(pv))).implementation() == address(protocolVersionsImpl()), "PV-40"); - require(Proxy(payable(address(pv))).admin() == address(superchainProxyAdmin()), "PV-50"); + require(IProxy(payable(address(pv))).implementation() == address(protocolVersionsImpl()), "PV-40"); + require(IProxy(payable(address(pv))).admin() == address(superchainProxyAdmin()), "PV-50"); vm.stopPrank(); // Implementation checks. @@ -320,7 +320,12 @@ contract DeploySuperchain is Script { // contract. If we provide no argument, the foundry default sender would be the broadcaster during test, but the // broadcaster needs to be the deployer since they are set to the initial proxy admin owner. vm.broadcast(msg.sender); - ProxyAdmin superchainProxyAdmin = new ProxyAdmin(msg.sender); + IProxyAdmin superchainProxyAdmin = IProxyAdmin( + DeployUtils.create1({ + _name: "ProxyAdmin", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (msg.sender))) + }) + ); vm.label(address(superchainProxyAdmin), "SuperchainProxyAdmin"); _dso.set(_dso.superchainProxyAdmin.selector, address(superchainProxyAdmin)); @@ -329,8 +334,18 @@ contract DeploySuperchain is Script { function deploySuperchainImplementationContracts(DeploySuperchainInput, DeploySuperchainOutput _dso) public { // Deploy implementation contracts. vm.startBroadcast(msg.sender); - SuperchainConfig superchainConfigImpl = new SuperchainConfig(); - ProtocolVersions protocolVersionsImpl = new ProtocolVersions(); + ISuperchainConfig superchainConfigImpl = ISuperchainConfig( + DeployUtils.create1({ + _name: "SuperchainConfig", + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISuperchainConfig.__constructor__, ())) + }) + ); + IProtocolVersions protocolVersionsImpl = IProtocolVersions( + DeployUtils.create1({ + _name: "ProtocolVersions", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProtocolVersions.__constructor__, ())) + }) + ); vm.stopBroadcast(); vm.label(address(superchainConfigImpl), "SuperchainConfigImpl"); @@ -344,15 +359,22 @@ contract DeploySuperchain is Script { address guardian = _dsi.guardian(); bool paused = _dsi.paused(); - ProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); - SuperchainConfig superchainConfigImpl = _dso.superchainConfigImpl(); + IProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); + ISuperchainConfig superchainConfigImpl = _dso.superchainConfigImpl(); vm.startBroadcast(msg.sender); - SuperchainConfig superchainConfigProxy = SuperchainConfig(address(new Proxy(address(superchainProxyAdmin)))); + ISuperchainConfig superchainConfigProxy = ISuperchainConfig( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IProxy.__constructor__, (address(superchainProxyAdmin))) + ) + }) + ); superchainProxyAdmin.upgradeAndCall( payable(address(superchainConfigProxy)), address(superchainConfigImpl), - abi.encodeCall(SuperchainConfig.initialize, (guardian, paused)) + abi.encodeCall(ISuperchainConfig.initialize, (guardian, paused)) ); vm.stopBroadcast(); @@ -365,16 +387,23 @@ contract DeploySuperchain is Script { ProtocolVersion requiredProtocolVersion = _dsi.requiredProtocolVersion(); ProtocolVersion recommendedProtocolVersion = _dsi.recommendedProtocolVersion(); - ProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); - ProtocolVersions protocolVersionsImpl = _dso.protocolVersionsImpl(); + IProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); + IProtocolVersions protocolVersionsImpl = _dso.protocolVersionsImpl(); vm.startBroadcast(msg.sender); - ProtocolVersions protocolVersionsProxy = ProtocolVersions(address(new Proxy(address(superchainProxyAdmin)))); + IProtocolVersions protocolVersionsProxy = IProtocolVersions( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IProxy.__constructor__, (address(superchainProxyAdmin))) + ) + }) + ); superchainProxyAdmin.upgradeAndCall( payable(address(protocolVersionsProxy)), address(protocolVersionsImpl), abi.encodeCall( - ProtocolVersions.initialize, + IProtocolVersions.initialize, (protocolVersionsOwner, requiredProtocolVersion, recommendedProtocolVersion) ) ); @@ -387,7 +416,7 @@ contract DeploySuperchain is Script { function transferProxyAdminOwnership(DeploySuperchainInput _dsi, DeploySuperchainOutput _dso) public { address superchainProxyAdminOwner = _dsi.superchainProxyAdminOwner(); - ProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); + IProxyAdmin superchainProxyAdmin = _dso.superchainProxyAdmin(); DeployUtils.assertValidContractAddress(address(superchainProxyAdmin)); vm.broadcast(msg.sender); diff --git a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh index 24c584690f10..174c26969058 100755 --- a/packages/contracts-bedrock/scripts/checks/check-interfaces.sh +++ b/packages/contracts-bedrock/scripts/checks/check-interfaces.sh @@ -73,6 +73,7 @@ EXCLUDE_CONTRACTS=( "IDelayedWETH" "IL2ToL2CrossDomainMessenger" "ICrossL2Inbox" + "ISystemConfigInterop" # Solidity complains about receive but contract doens't have it. "IResolvedDelegateProxy" diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 02a77678abfe..57c5e30c7d8f 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -15,7 +15,6 @@ import { Chains } from "scripts/libraries/Chains.sol"; import { Config } from "scripts/libraries/Config.sol"; import { LibStateDiff } from "scripts/libraries/LibStateDiff.sol"; import { Process } from "scripts/libraries/Process.sol"; -import { ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.sol"; import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; @@ -25,9 +24,8 @@ import { StorageSetter } from "src/universal/StorageSetter.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; -import { Predeploys } from "src/libraries/Predeploys.sol"; import { Types } from "scripts/libraries/Types.sol"; -import { LibClaim, Duration } from "src/dispute/lib/LibUDT.sol"; +import { Duration } from "src/dispute/lib/LibUDT.sol"; import "src/dispute/lib/Types.sol"; // Interfaces @@ -53,7 +51,6 @@ import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol" import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; -import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; import { IMIPS2 } from "src/cannon/interfaces/IMIPS2.sol"; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol index c038c27a5683..a68f0bf615f5 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol @@ -10,7 +10,7 @@ import "src/dispute/lib/Types.sol"; // Interfaces import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; diff --git a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol index 7db0de4c3fce..0f372e752b87 100644 --- a/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol +++ b/packages/contracts-bedrock/scripts/fpac/FPACOPS2.s.sol @@ -10,7 +10,7 @@ import "src/dispute/lib/Types.sol"; // Interfaces import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; +import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 73a9df9a7995..c6dcf97d6302 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xfaab186a660764265a837fac689a6d8602454c6ca9f39b5244282768b8d86b3a", - "sourceCodeHash": "0x831b7268e1beb93050dbaae1e83e17635385bd101779146a95150084f69d2835" + "initCodeHash": "0x44fa611dcacad2f61c8ca7ef970e580800b5070d10f9a2a4c04459d6cf4cd180", + "sourceCodeHash": "0xe66886dd90cef90525f5ba2310c9e9d2d910c81c283f9b7cbfcd57c5091473c6" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 3ca8074ff273..31b7cb368409 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -184,7 +184,7 @@ { "components": [ { - "internalType": "contract ProxyAdmin", + "internalType": "contract IProxyAdmin", "name": "opChainProxyAdmin", "type": "address" }, @@ -204,7 +204,7 @@ "type": "address" }, { - "internalType": "contract OptimismMintableERC20Factory", + "internalType": "contract IOptimismMintableERC20Factory", "name": "optimismMintableERC20FactoryProxy", "type": "address" }, diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 3ca8074ff273..31b7cb368409 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -184,7 +184,7 @@ { "components": [ { - "internalType": "contract ProxyAdmin", + "internalType": "contract IProxyAdmin", "name": "opChainProxyAdmin", "type": "address" }, @@ -204,7 +204,7 @@ "type": "address" }, { - "internalType": "contract OptimismMintableERC20Factory", + "internalType": "contract IOptimismMintableERC20Factory", "name": "optimismMintableERC20FactoryProxy", "type": "address" }, diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 08703db158ad..248c1b340e4a 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -15,10 +15,7 @@ import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; - -import { L1ChugSplashProxy } from "src/legacy/L1ChugSplashProxy.sol"; -import { ResolvedDelegateProxy } from "src/legacy/ResolvedDelegateProxy.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; @@ -35,7 +32,7 @@ import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; -import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; +import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; /// @custom:proxied true contract OPContractsManager is ISemver, Initializable { @@ -67,11 +64,11 @@ contract OPContractsManager is ISemver, Initializable { /// @notice The full set of outputs from deploying a new OP Stack chain. struct DeployOutput { - ProxyAdmin opChainProxyAdmin; + IProxyAdmin opChainProxyAdmin; IAddressManager addressManager; IL1ERC721Bridge l1ERC721BridgeProxy; ISystemConfig systemConfigProxy; - OptimismMintableERC20Factory optimismMintableERC20FactoryProxy; + IOptimismMintableERC20Factory optimismMintableERC20FactoryProxy; IL1StandardBridge l1StandardBridgeProxy; IL1CrossDomainMessenger l1CrossDomainMessengerProxy; // Fault proof contracts below. @@ -125,8 +122,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.18 - string public constant version = "1.0.0-beta.18"; + /// @custom:semver 1.0.0-beta.19 + string public constant version = "1.0.0-beta.19"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -237,7 +234,7 @@ contract OPContractsManager is ISemver, Initializable { // due to it's usage of the legacy ResolvedDelegateProxy. output.addressManager = IAddressManager(Blueprint.deployFrom(blueprint.addressManager, salt)); output.opChainProxyAdmin = - ProxyAdmin(Blueprint.deployFrom(blueprint.proxyAdmin, salt, abi.encode(address(this)))); + IProxyAdmin(Blueprint.deployFrom(blueprint.proxyAdmin, salt, abi.encode(address(this)))); output.opChainProxyAdmin.setAddressManager(output.addressManager); // -------- Deploy Proxy Contracts -------- @@ -249,7 +246,7 @@ contract OPContractsManager is ISemver, Initializable { IOptimismPortal2(payable(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismPortal"))); output.systemConfigProxy = ISystemConfig(deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "SystemConfig")); - output.optimismMintableERC20FactoryProxy = OptimismMintableERC20Factory( + output.optimismMintableERC20FactoryProxy = IOptimismMintableERC20Factory( deployProxy(l2ChainId, output.opChainProxyAdmin, saltMixer, "OptimismMintableERC20Factory") ); output.disputeGameFactoryProxy = @@ -261,14 +258,14 @@ contract OPContractsManager is ISemver, Initializable { output.l1StandardBridgeProxy = IL1StandardBridge( payable(Blueprint.deployFrom(blueprint.l1ChugSplashProxy, salt, abi.encode(output.opChainProxyAdmin))) ); - output.opChainProxyAdmin.setProxyType(address(output.l1StandardBridgeProxy), ProxyAdmin.ProxyType.CHUGSPLASH); + output.opChainProxyAdmin.setProxyType(address(output.l1StandardBridgeProxy), IProxyAdmin.ProxyType.CHUGSPLASH); string memory contractName = "OVM_L1CrossDomainMessenger"; output.l1CrossDomainMessengerProxy = IL1CrossDomainMessenger( Blueprint.deployFrom(blueprint.resolvedDelegateProxy, salt, abi.encode(output.addressManager, contractName)) ); output.opChainProxyAdmin.setProxyType( - address(output.l1CrossDomainMessengerProxy), ProxyAdmin.ProxyType.RESOLVED + address(output.l1CrossDomainMessengerProxy), IProxyAdmin.ProxyType.RESOLVED ); output.opChainProxyAdmin.setImplementationName(address(output.l1CrossDomainMessengerProxy), contractName); @@ -387,7 +384,7 @@ contract OPContractsManager is ISemver, Initializable { /// This is required because we deploy many identical proxies, so they each require a unique salt for determinism. function deployProxy( uint256 _l2ChainId, - ProxyAdmin _proxyAdmin, + IProxyAdmin _proxyAdmin, string memory _saltMixer, string memory _contractName ) @@ -683,7 +680,7 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Makes an external call to the target to initialize the proxy with the specified data. /// First performs safety checks to ensure the target, implementation, and proxy admin are valid. function upgradeAndCall( - ProxyAdmin _proxyAdmin, + IProxyAdmin _proxyAdmin, address _target, address _implementation, bytes memory _data diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index a9dad0d90020..9d541434a397 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -6,7 +6,6 @@ import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; /// @custom:proxied true contract OPContractsManagerInterop is OPContractsManager { diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol index b4617b8e6a42..fffbd3cb6681 100644 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigInterop.sol @@ -28,18 +28,6 @@ interface ISystemConfigInterop { function gasPayingToken() external view returns (address addr_, uint8 decimals_); function gasPayingTokenName() external view returns (string memory name_); function gasPayingTokenSymbol() external view returns (string memory symbol_); - function initialize( - address _owner, - uint32 _basefeeScalar, - uint32 _blobbasefeeScalar, - bytes32 _batcherHash, - uint64 _gasLimit, - address _unsafeBlockSigner, - IResourceMetering.ResourceConfig memory _config, - address _batchInbox, - ISystemConfig.Addresses memory _addresses - ) - external; function isCustomGasToken() external view returns (bool); function l1CrossDomainMessenger() external view returns (address addr_); function l1ERC721Bridge() external view returns (address addr_); diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 2b3a254c8bbe..53c0650835f0 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -7,11 +7,8 @@ import { DeployOPChainInput } from "scripts/DeployOPChain.s.sol"; import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; // Exposes internal functions for testing. contract OPContractsManager_Harness is OPContractsManager { diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 7dd603a55c9d..8e7ee96d388d 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -2,23 +2,24 @@ pragma solidity 0.8.15; import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { DelayedWETH } from "src/dispute/DelayedWETH.sol"; -import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; -import { MIPS } from "src/cannon/MIPS.sol"; -import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; +import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions } from "src/L1/ProtocolVersions.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; -import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; -import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; -import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; -import { Proxy } from "src/universal/Proxy.sol"; +import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { DeployImplementationsInput, @@ -36,8 +37,8 @@ contract DeployImplementationsInput_Test is Test { uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; string release = "dev-release"; // this means implementation contracts will be deployed - SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfigProxy")); - ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersionsProxy")); + ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfigProxy")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersionsProxy")); function setUp() public { dii = new DeployImplementationsInput(); @@ -95,24 +96,29 @@ contract DeployImplementationsOutput_Test is Test { } function test_set_succeeds() public { - Proxy proxy = new Proxy(address(0)); + IProxy proxy = IProxy( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (address(0)))) + }) + ); address opcmImpl = address(makeAddr("opcmImpl")); vm.prank(address(0)); proxy.upgradeTo(opcmImpl); OPContractsManager opcmProxy = OPContractsManager(address(proxy)); - OptimismPortal2 optimismPortalImpl = OptimismPortal2(payable(makeAddr("optimismPortalImpl"))); - DelayedWETH delayedWETHImpl = DelayedWETH(payable(makeAddr("delayedWETHImpl"))); - PreimageOracle preimageOracleSingleton = PreimageOracle(makeAddr("preimageOracleSingleton")); - MIPS mipsSingleton = MIPS(makeAddr("mipsSingleton")); - SystemConfig systemConfigImpl = SystemConfig(makeAddr("systemConfigImpl")); - L1CrossDomainMessenger l1CrossDomainMessengerImpl = - L1CrossDomainMessenger(makeAddr("l1CrossDomainMessengerImpl")); - L1ERC721Bridge l1ERC721BridgeImpl = L1ERC721Bridge(makeAddr("l1ERC721BridgeImpl")); - L1StandardBridge l1StandardBridgeImpl = L1StandardBridge(payable(makeAddr("l1StandardBridgeImpl"))); - OptimismMintableERC20Factory optimismMintableERC20FactoryImpl = - OptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryImpl")); - DisputeGameFactory disputeGameFactoryImpl = DisputeGameFactory(makeAddr("disputeGameFactoryImpl")); + IOptimismPortal2 optimismPortalImpl = IOptimismPortal2(payable(makeAddr("optimismPortalImpl"))); + IDelayedWETH delayedWETHImpl = IDelayedWETH(payable(makeAddr("delayedWETHImpl"))); + IPreimageOracle preimageOracleSingleton = IPreimageOracle(makeAddr("preimageOracleSingleton")); + IMIPS mipsSingleton = IMIPS(makeAddr("mipsSingleton")); + ISystemConfig systemConfigImpl = ISystemConfig(makeAddr("systemConfigImpl")); + IL1CrossDomainMessenger l1CrossDomainMessengerImpl = + IL1CrossDomainMessenger(makeAddr("l1CrossDomainMessengerImpl")); + IL1ERC721Bridge l1ERC721BridgeImpl = IL1ERC721Bridge(makeAddr("l1ERC721BridgeImpl")); + IL1StandardBridge l1StandardBridgeImpl = IL1StandardBridge(payable(makeAddr("l1StandardBridgeImpl"))); + IOptimismMintableERC20Factory optimismMintableERC20FactoryImpl = + IOptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryImpl")); + IDisputeGameFactory disputeGameFactoryImpl = IDisputeGameFactory(makeAddr("disputeGameFactoryImpl")); vm.etch(address(opcmProxy), address(opcmProxy).code); vm.etch(address(opcmImpl), hex"01"); @@ -240,8 +246,8 @@ contract DeployImplementations_Test is Test { uint256 challengePeriodSeconds = 300; uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; - SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfigProxy")); - ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersionsProxy")); + ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfigProxy")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersionsProxy")); function setUp() public virtual { deployImplementations = new DeployImplementations(); @@ -401,15 +407,27 @@ contract DeployImplementations_Test is Test { proofMaturityDelaySeconds = uint256(hash(_seed, 3)); disputeGameFinalityDelaySeconds = uint256(hash(_seed, 4)); string memory release = string(bytes.concat(hash(_seed, 5))); - protocolVersionsProxy = ProtocolVersions(address(uint160(uint256(hash(_seed, 7))))); + protocolVersionsProxy = IProtocolVersions(address(uint160(uint256(hash(_seed, 7))))); // Must configure the ProxyAdmin contract which is used to upgrade the OPCM's proxy contract. - ProxyAdmin superchainProxyAdmin = new ProxyAdmin(msg.sender); - superchainConfigProxy = SuperchainConfig(address(new Proxy(payable(address(superchainProxyAdmin))))); - - SuperchainConfig superchainConfigImpl = SuperchainConfig(address(uint160(uint256(hash(_seed, 6))))); + IProxyAdmin superchainProxyAdmin = IProxyAdmin( + DeployUtils.create1({ + _name: "ProxyAdmin", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (msg.sender))) + }) + ); + superchainConfigProxy = ISuperchainConfig( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IProxy.__constructor__, (address(superchainProxyAdmin))) + ) + }) + ); + + ISuperchainConfig superchainConfigImpl = ISuperchainConfig(address(uint160(uint256(hash(_seed, 6))))); vm.prank(address(superchainProxyAdmin)); - Proxy(payable(address(superchainConfigProxy))).upgradeTo(address(superchainConfigImpl)); + IProxy(payable(address(superchainConfigProxy))).upgradeTo(address(superchainConfigImpl)); vm.etch(address(superchainProxyAdmin), address(superchainProxyAdmin).code); vm.etch(address(superchainConfigProxy), address(superchainConfigProxy).code); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 890f8143cfad..b7a676d2a94b 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -11,26 +11,27 @@ import { DeployImplementationsOutput } from "scripts/DeployImplementations.s.sol"; import { DeployOPChainInput, DeployOPChain, DeployOPChainOutput } from "scripts/DeployOPChain.s.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; +import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; -import { AddressManager } from "src/legacy/AddressManager.sol"; -import { DelayedWETH } from "src/dispute/DelayedWETH.sol"; -import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; -import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; -import { FaultDisputeGame } from "src/dispute/FaultDisputeGame.sol"; -import { PermissionedDisputeGame } from "src/dispute/PermissionedDisputeGame.sol"; +import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; +import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; +import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; +import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions, ProtocolVersion } from "src/L1/ProtocolVersions.sol"; +import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; +import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; -import { OptimismPortal2 } from "src/L1/OptimismPortal2.sol"; -import { SystemConfig } from "src/L1/SystemConfig.sol"; -import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; -import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol"; -import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; -import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; -import { Proxy } from "src/universal/Proxy.sol"; +import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; +import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; +import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; +import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; +import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; +import { IProxy } from "src/universal/interfaces/IProxy.sol"; import { GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; @@ -54,8 +55,13 @@ contract DeployOPChainInput_Test is Test { doi = new DeployOPChainInput(); } - function buildOpcmProxy() public returns (Proxy opcmProxy) { - opcmProxy = new Proxy(address(0)); + function buildOpcmProxy() public returns (IProxy opcmProxy) { + opcmProxy = IProxy( + DeployUtils.create1({ + _name: "Proxy", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (address(0)))) + }) + ); OPContractsManager opcmImpl = OPContractsManager(address(makeAddr("opcmImpl"))); vm.prank(address(0)); opcmProxy.upgradeTo(address(opcmImpl)); @@ -74,7 +80,7 @@ contract DeployOPChainInput_Test is Test { doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - (Proxy opcmProxy) = buildOpcmProxy(); + (IProxy opcmProxy) = buildOpcmProxy(); doi.set(doi.opcmProxy.selector, address(opcmProxy)); // Compare the default inputs to the getter methods. @@ -127,21 +133,22 @@ contract DeployOPChainOutput_Test is Test { // Define default outputs to set. // We set these in storage because doing it locally in test_set_succeeds results in stack too deep. - ProxyAdmin opChainProxyAdmin = ProxyAdmin(makeAddr("optimismPortal2Impl")); - AddressManager addressManager = AddressManager(makeAddr("delayedWETHImpl")); - L1ERC721Bridge l1ERC721BridgeProxy = L1ERC721Bridge(makeAddr("l1ERC721BridgeProxy")); - SystemConfig systemConfigProxy = SystemConfig(makeAddr("systemConfigProxy")); - OptimismMintableERC20Factory optimismMintableERC20FactoryProxy = - OptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryProxy")); - L1StandardBridge l1StandardBridgeProxy = L1StandardBridge(payable(makeAddr("l1StandardBridgeProxy"))); - L1CrossDomainMessenger l1CrossDomainMessengerProxy = L1CrossDomainMessenger(makeAddr("l1CrossDomainMessengerProxy")); - OptimismPortal2 optimismPortalProxy = OptimismPortal2(payable(makeAddr("optimismPortalProxy"))); - DisputeGameFactory disputeGameFactoryProxy = DisputeGameFactory(makeAddr("disputeGameFactoryProxy")); - AnchorStateRegistry anchorStateRegistryProxy = AnchorStateRegistry(makeAddr("anchorStateRegistryProxy")); - AnchorStateRegistry anchorStateRegistryImpl = AnchorStateRegistry(makeAddr("anchorStateRegistryImpl")); - FaultDisputeGame faultDisputeGame = FaultDisputeGame(makeAddr("faultDisputeGame")); - PermissionedDisputeGame permissionedDisputeGame = PermissionedDisputeGame(makeAddr("permissionedDisputeGame")); - DelayedWETH delayedWETHPermissionedGameProxy = DelayedWETH(payable(makeAddr("delayedWETHPermissionedGameProxy"))); + IProxyAdmin opChainProxyAdmin = IProxyAdmin(makeAddr("optimismPortal2Impl")); + IAddressManager addressManager = IAddressManager(makeAddr("delayedWETHImpl")); + IL1ERC721Bridge l1ERC721BridgeProxy = IL1ERC721Bridge(makeAddr("l1ERC721BridgeProxy")); + ISystemConfig systemConfigProxy = ISystemConfig(makeAddr("systemConfigProxy")); + IOptimismMintableERC20Factory optimismMintableERC20FactoryProxy = + IOptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryProxy")); + IL1StandardBridge l1StandardBridgeProxy = IL1StandardBridge(payable(makeAddr("l1StandardBridgeProxy"))); + IL1CrossDomainMessenger l1CrossDomainMessengerProxy = + IL1CrossDomainMessenger(makeAddr("l1CrossDomainMessengerProxy")); + IOptimismPortal2 optimismPortalProxy = IOptimismPortal2(payable(makeAddr("optimismPortalProxy"))); + IDisputeGameFactory disputeGameFactoryProxy = IDisputeGameFactory(makeAddr("disputeGameFactoryProxy")); + IAnchorStateRegistry anchorStateRegistryProxy = IAnchorStateRegistry(makeAddr("anchorStateRegistryProxy")); + IAnchorStateRegistry anchorStateRegistryImpl = IAnchorStateRegistry(makeAddr("anchorStateRegistryImpl")); + IFaultDisputeGame faultDisputeGame = IFaultDisputeGame(makeAddr("faultDisputeGame")); + IPermissionedDisputeGame permissionedDisputeGame = IPermissionedDisputeGame(makeAddr("permissionedDisputeGame")); + IDelayedWETH delayedWETHPermissionedGameProxy = IDelayedWETH(payable(makeAddr("delayedWETHPermissionedGameProxy"))); // TODO: Eventually switch from Permissioned to Permissionless. // DelayedWETH delayedWETHPermissionlessGameProxy = // DelayedWETH(payable(makeAddr("delayedWETHPermissionlessGameProxy"))); @@ -345,8 +352,8 @@ contract DeployOPChain_TestBase is Test { uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; string release = "dev-release"; // this means implementation contracts will be deployed - SuperchainConfig superchainConfigProxy; - ProtocolVersions protocolVersionsProxy; + ISuperchainConfig superchainConfigProxy; + IProtocolVersions protocolVersionsProxy; // Define default inputs for DeployOPChain. // `opcm` is set during `setUp` since it is an output of the previous step. @@ -359,7 +366,7 @@ contract DeployOPChain_TestBase is Test { uint32 basefeeScalar = 100; uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; - AnchorStateRegistry.StartingAnchorRoot[] startingAnchorRoots; + IAnchorStateRegistry.StartingAnchorRoot[] startingAnchorRoots; OPContractsManager opcm = OPContractsManager(address(0)); string saltMixer = "defaultSaltMixer"; uint64 gasLimit = 30_000_000; @@ -369,13 +376,13 @@ contract DeployOPChain_TestBase is Test { uint256 cannonBlock = 400; uint256 permissionedBlock = 500; startingAnchorRoots.push( - AnchorStateRegistry.StartingAnchorRoot({ + IAnchorStateRegistry.StartingAnchorRoot({ gameType: GameTypes.CANNON, outputRoot: OutputRoot({ root: Hash.wrap(keccak256("defaultOutputRootCannon")), l2BlockNumber: cannonBlock }) }) ); startingAnchorRoots.push( - AnchorStateRegistry.StartingAnchorRoot({ + IAnchorStateRegistry.StartingAnchorRoot({ gameType: GameTypes.PERMISSIONED_CANNON, outputRoot: OutputRoot({ root: Hash.wrap(keccak256("defaultOutputRootPermissioned")), @@ -456,13 +463,13 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { uint256 cannonBlock = uint256(hash(_seed, 9)); uint256 permissionedBlock = uint256(hash(_seed, 10)); startingAnchorRoots.push( - AnchorStateRegistry.StartingAnchorRoot({ + IAnchorStateRegistry.StartingAnchorRoot({ gameType: GameTypes.CANNON, outputRoot: OutputRoot({ root: Hash.wrap(keccak256(abi.encode(_seed, 11))), l2BlockNumber: cannonBlock }) }) ); startingAnchorRoots.push( - AnchorStateRegistry.StartingAnchorRoot({ + IAnchorStateRegistry.StartingAnchorRoot({ gameType: GameTypes.PERMISSIONED_CANNON, outputRoot: OutputRoot({ root: Hash.wrap(keccak256(abi.encode(_seed, 12))), diff --git a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol index a6bcf2aa2f50..8641772a74d9 100644 --- a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol @@ -7,7 +7,7 @@ import { stdToml } from "forge-std/StdToml.sol"; import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; -import { ProtocolVersions, ProtocolVersion } from "src/L1/ProtocolVersions.sol"; +import { IProtocolVersions, ProtocolVersion } from "src/L1/interfaces/IProtocolVersions.sol"; import { DeploySuperchainInput, DeploySuperchain, DeploySuperchainOutput } from "scripts/DeploySuperchain.s.sol"; contract DeploySuperchainInput_Test is Test { @@ -58,8 +58,8 @@ contract DeploySuperchainOutput_Test is Test { ProxyAdmin superchainProxyAdmin = ProxyAdmin(makeAddr("superchainProxyAdmin")); SuperchainConfig superchainConfigImpl = SuperchainConfig(makeAddr("superchainConfigImpl")); SuperchainConfig superchainConfigProxy = SuperchainConfig(makeAddr("superchainConfigProxy")); - ProtocolVersions protocolVersionsImpl = ProtocolVersions(makeAddr("protocolVersionsImpl")); - ProtocolVersions protocolVersionsProxy = ProtocolVersions(makeAddr("protocolVersionsProxy")); + IProtocolVersions protocolVersionsImpl = IProtocolVersions(makeAddr("protocolVersionsImpl")); + IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersionsProxy")); // Ensure each address has code, since these are expected to be contracts. vm.etch(address(superchainProxyAdmin), hex"01"); From 28726a33ddaed29f4f815c3d8ad5c80ba13d0d9e Mon Sep 17 00:00:00 2001 From: Roberto Bayardo Date: Tue, 1 Oct 2024 15:33:17 -0700 Subject: [PATCH 102/116] Revert "Holocene extensions to L1Block.sol (#12096)" (#12236) This reverts commit 5bd72f690bf09a6ae3bbe1802c4be60cf99628b3. --- packages/contracts-bedrock/.gas-snapshot | 6 +- packages/contracts-bedrock/semver-lock.json | 8 +- .../snapshots/abi/L1Block.json | 33 ------ .../snapshots/abi/L1BlockInterop.json | 33 ------ .../snapshots/storageLayout/L1Block.json | 14 --- .../storageLayout/L1BlockInterop.json | 16 +-- packages/contracts-bedrock/src/L2/L1Block.sol | 63 +--------- .../src/L2/L1BlockInterop.sol | 4 +- .../src/L2/interfaces/IL1Block.sol | 3 - .../src/L2/interfaces/IL1BlockInterop.sol | 3 - .../src/libraries/Encoding.sol | 46 -------- .../contracts-bedrock/test/L2/L1Block.t.sol | 110 ------------------ 12 files changed, 12 insertions(+), 327 deletions(-) diff --git a/packages/contracts-bedrock/.gas-snapshot b/packages/contracts-bedrock/.gas-snapshot index 700053bd8ab9..8e43cb748941 100644 --- a/packages/contracts-bedrock/.gas-snapshot +++ b/packages/contracts-bedrock/.gas-snapshot @@ -1,6 +1,6 @@ -GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7589) -GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5589) -GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175655) +GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7567) +GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5567) +GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175677) GasBenchMark_L1BlockInterop_SetValuesInterop_Warm:test_setL1BlockValuesInterop_benchmark() (gas: 5099) GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158531) GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7597) diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index c6dcf97d6302..73d8ba52c48b 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -80,12 +80,12 @@ "sourceCodeHash": "0x4f21025d4b5c9c74cf7040db6f8e9ce605b82931e3012fee51d3f5d9fbd7b73f" }, "src/L2/L1Block.sol": { - "initCodeHash": "0x48d118de2a69fb0fbf6a8da4603025e12da1360da8fb70a5e56342ba64b3ff5f", - "sourceCodeHash": "0x04d25cbf0c4ea5025b0dd3f79f0a32f6623ddb869cff35649072ab3ad964b310" + "initCodeHash": "0xd12353c5bf71c6765cc9292eecf262f216e67f117f4ba6287796a5207dbca00f", + "sourceCodeHash": "0xfe3a9585d9bfca8428e12759cab68a3114374e5c37371cfe08bb1976a9a5a041" }, "src/L2/L1BlockInterop.sol": { - "initCodeHash": "0x7f87e0b8be9801cb242c469ec7999eb80221f65063aedd4ca4923a5e0fb0e5a7", - "sourceCodeHash": "0x722071a9d08dcbeda9cdaadeb2dd679a8bc192563e4a0439f4cd74439fa75581" + "initCodeHash": "0x77b3b2151fe14ea36a640469115a5e4de27f7654a9606a9d0701522c6a4ad887", + "sourceCodeHash": "0x7417677643e1df1ae1782513b94c7821097b9529d3f8626c3bcb8b3a9ae0d180" }, "src/L2/L1FeeVault.sol": { "initCodeHash": "0x3bfcd57e25ad54b66c374f63e24e33a6cf107044aa8f5f69ef21202c380b5c5b", diff --git a/packages/contracts-bedrock/snapshots/abi/L1Block.json b/packages/contracts-bedrock/snapshots/abi/L1Block.json index 6efa216b5bd6..020c9e942c75 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1Block.json +++ b/packages/contracts-bedrock/snapshots/abi/L1Block.json @@ -77,32 +77,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "eip1559Denominator", - "outputs": [ - { - "internalType": "uint64", - "name": "", - "type": "uint64" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "eip1559Elasticity", - "outputs": [ - { - "internalType": "uint64", - "name": "", - "type": "uint64" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "gasPayingToken", @@ -308,13 +282,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [], - "name": "setL1BlockValuesHolocene", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "timestamp", diff --git a/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json index ba871eb2086a..ab089f0cec55 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/L1BlockInterop.json @@ -97,32 +97,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [], - "name": "eip1559Denominator", - "outputs": [ - { - "internalType": "uint64", - "name": "", - "type": "uint64" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "eip1559Elasticity", - "outputs": [ - { - "internalType": "uint64", - "name": "", - "type": "uint64" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "gasPayingToken", @@ -378,13 +352,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [], - "name": "setL1BlockValuesHolocene", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "setL1BlockValuesInterop", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json b/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json index 5ee7d1e31942..2928d2147b5c 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/L1Block.json @@ -75,19 +75,5 @@ "offset": 0, "slot": "7", "type": "uint256" - }, - { - "bytes": "8", - "label": "eip1559Denominator", - "offset": 0, - "slot": "8", - "type": "uint64" - }, - { - "bytes": "8", - "label": "eip1559Elasticity", - "offset": 8, - "slot": "8", - "type": "uint64" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json index 4f0eeb0e52d7..14ee2ff9609a 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/L1BlockInterop.json @@ -76,25 +76,11 @@ "slot": "7", "type": "uint256" }, - { - "bytes": "8", - "label": "eip1559Denominator", - "offset": 0, - "slot": "8", - "type": "uint64" - }, - { - "bytes": "8", - "label": "eip1559Elasticity", - "offset": 8, - "slot": "8", - "type": "uint64" - }, { "bytes": "64", "label": "dependencySet", "offset": 0, - "slot": "9", + "slot": "8", "type": "struct EnumerableSet.UintSet" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L2/L1Block.sol b/packages/contracts-bedrock/src/L2/L1Block.sol index feb9f18d1b89..c61f45b83629 100644 --- a/packages/contracts-bedrock/src/L2/L1Block.sol +++ b/packages/contracts-bedrock/src/L2/L1Block.sol @@ -57,15 +57,9 @@ contract L1Block is ISemver, IGasToken { /// @notice The latest L1 blob base fee. uint256 public blobBaseFee; - /// @notice The eip-1550 base fee change denominator value. - uint64 public eip1559Denominator; - - /// @notice The eip-1550 base fee change elasticity value. - uint64 public eip1559Elasticity; - - /// @custom:semver 1.5.1-beta.3 + /// @custom:semver 1.5.1-beta.2 function version() public pure virtual returns (string memory) { - return "1.5.1-beta.3"; + return "1.5.1-beta.2"; } /// @notice Returns the gas paying token, its decimals, name and symbol. @@ -174,59 +168,6 @@ contract L1Block is ISemver, IGasToken { } } - /// @notice Updates the L1 block values for a Holocene upgraded chain. - /// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size. - /// Params are expected to be in the following order: - /// 1. _baseFeeScalar L1 base fee scalar - /// 2. _blobBaseFeeScalar L1 blob base fee scalar - /// 3. _sequenceNumber Number of L2 blocks since epoch start. - /// 4. _timestamp L1 timestamp. - /// 5. _number L1 blocknumber. - /// 6. _basefee L1 base fee. - /// 7. _blobBaseFee L1 blob base fee. - /// 8. _hash L1 blockhash. - /// 9. _batcherHash Versioned hash to authenticate batcher by. - /// 10. _eip1559Elasticity EIP-1559 elasticity multiplier value. - /// 11. _eip1559Denominator EIP-1559 base fee change denominator value. - function setL1BlockValuesHolocene() public { - _setL1BlockValuesHolocene(); - } - - /// @notice Updates the L1 block values for a Holocene upgraded chain. - /// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size. - /// Params are expected to be in the following order: - /// 1. _baseFeeScalar L1 base fee scalar - /// 2. _blobBaseFeeScalar L1 blob base fee scalar - /// 3. _sequenceNumber Number of L2 blocks since epoch start. - /// 4. _timestamp L1 timestamp. - /// 5. _number L1 blocknumber. - /// 6. _basefee L1 base fee. - /// 7. _blobBaseFee L1 blob base fee. - /// 8. _hash L1 blockhash. - /// 9. _batcherHash Versioned hash to authenticate batcher by. - /// 10. _eip1559Elasticity EIP-1559 elasticity multiplier value. - /// 11. _eip1559Denominator EIP-1559 base fee change denominator value. - function _setL1BlockValuesHolocene() internal { - address depositor = DEPOSITOR_ACCOUNT(); - assembly { - // Revert if the caller is not the depositor account. - if xor(caller(), depositor) { - mstore(0x00, 0x3cc50b45) // 0x3cc50b45 is the 4-byte selector of "NotDepositor()" - revert(0x1C, 0x04) // returns the stored 4-byte selector from above - } - // sequencenum (uint64), blobBaseFeeScalar (uint32), baseFeeScalar (uint32) - sstore(sequenceNumber.slot, shr(128, calldataload(4))) - // number (uint64) and timestamp (uint64) - sstore(number.slot, shr(128, calldataload(20))) - sstore(basefee.slot, calldataload(36)) // uint256 - sstore(blobBaseFee.slot, calldataload(68)) // uint256 - sstore(hash.slot, calldataload(100)) // bytes32 - sstore(batcherHash.slot, calldataload(132)) // bytes32 - // eip1559Denominator (uint64) and eip1559Elasticity (uint64) - sstore(eip1559Denominator.slot, shr(128, calldataload(164))) // uint64 - } - } - /// @notice Sets the gas paying token for the L2 system. Can only be called by the special /// depositor account. This function is not called on every L2 block but instead /// only called by specially crafted L1 deposit transactions. diff --git a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol index 189e0fe7d7d0..15ea67f5e6b3 100644 --- a/packages/contracts-bedrock/src/L2/L1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/L1BlockInterop.sol @@ -42,9 +42,9 @@ contract L1BlockInterop is L1Block { /// keccak256(abi.encode(uint256(keccak256("l1Block.identifier.isDeposit")) - 1)) & ~bytes32(uint256(0xff)) uint256 internal constant IS_DEPOSIT_SLOT = 0x921bd3a089295c6e5540e8fba8195448d253efd6f2e3e495b499b627dc36a300; - /// @custom:semver +interop-beta.1 + /// @custom:semver +interop function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.1"); + return string.concat(super.version(), "+interop"); } /// @notice Returns whether the call was triggered from a a deposit or not. diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol index 0eba9a9973f3..a43b3c7c3963 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1Block.sol @@ -34,11 +34,8 @@ interface IL1Block { ) external; function setL1BlockValuesEcotone() external; - function setL1BlockValuesHolocene() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); - function eip1559Denominator() external view returns (uint64); - function eip1559Elasticity() external view returns (uint64); function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol index 31943804b961..dd72e3fa6f89 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/IL1BlockInterop.sol @@ -52,12 +52,9 @@ interface IL1BlockInterop { ) external; function setL1BlockValuesEcotone() external; - function setL1BlockValuesHolocene() external; function setL1BlockValuesInterop() external; function timestamp() external view returns (uint64); function version() external pure returns (string memory); - function eip1559Denominator() external view returns (uint64); - function eip1559Elasticity() external view returns (uint64); function __constructor__() external; } diff --git a/packages/contracts-bedrock/src/libraries/Encoding.sol b/packages/contracts-bedrock/src/libraries/Encoding.sol index ea33f3ca50bf..edcdd4ed75e2 100644 --- a/packages/contracts-bedrock/src/libraries/Encoding.sol +++ b/packages/contracts-bedrock/src/libraries/Encoding.sol @@ -213,50 +213,4 @@ library Encoding { _batcherHash ); } - - /// @notice Returns an appropriately encoded call to L1Block.setL1BlockValuesHolocene - /// @param _baseFeeScalar L1 base fee Scalar - /// @param _blobBaseFeeScalar L1 blob base fee Scalar - /// @param _sequenceNumber Number of L2 blocks since epoch start. - /// @param _timestamp L1 timestamp. - /// @param _number L1 blocknumber. - /// @param _baseFee L1 base fee. - /// @param _blobBaseFee L1 blob base fee. - /// @param _hash L1 blockhash. - /// @param _batcherHash Versioned hash to authenticate batcher by. - /// @param _eip1559Elasticity EIP-1559 elasticity parameter - /// @param _eip1559Denominator EIP-1559 denominator parameter - function encodeSetL1BlockValuesHolocene( - uint32 _baseFeeScalar, - uint32 _blobBaseFeeScalar, - uint64 _sequenceNumber, - uint64 _timestamp, - uint64 _number, - uint256 _baseFee, - uint256 _blobBaseFee, - bytes32 _hash, - bytes32 _batcherHash, - uint64 _eip1559Elasticity, - uint64 _eip1559Denominator - ) - internal - pure - returns (bytes memory) - { - bytes4 functionSignature = bytes4(keccak256("setL1BlockValuesHolocene()")); - return abi.encodePacked( - functionSignature, - _baseFeeScalar, - _blobBaseFeeScalar, - _sequenceNumber, - _timestamp, - _number, - _baseFee, - _blobBaseFee, - _hash, - _batcherHash, - _eip1559Elasticity, - _eip1559Denominator - ); - } } diff --git a/packages/contracts-bedrock/test/L2/L1Block.t.sol b/packages/contracts-bedrock/test/L2/L1Block.t.sol index 06de35f51c1d..762553a2ff2f 100644 --- a/packages/contracts-bedrock/test/L2/L1Block.t.sol +++ b/packages/contracts-bedrock/test/L2/L1Block.t.sol @@ -165,116 +165,6 @@ contract L1BlockEcotone_Test is L1BlockTest { } } -contract L1BlockHolocene_Test is L1BlockTest { - /// @dev Tests that setL1BlockValuesHolocene updates the values appropriately. - function testFuzz_setL1BlockValuesHolocene_succeeds( - uint32 baseFeeScalar, - uint32 blobBaseFeeScalar, - uint64 sequenceNumber, - uint64 timestamp, - uint64 number, - uint256 baseFee, - uint256 blobBaseFee, - bytes32 hash, - bytes32 batcherHash, - uint64 eip1559Elasticity, - uint64 eip1559Denominator - ) - external - { - bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( - baseFeeScalar, - blobBaseFeeScalar, - sequenceNumber, - timestamp, - number, - baseFee, - blobBaseFee, - hash, - batcherHash, - eip1559Elasticity, - eip1559Denominator - ); - - vm.prank(depositor); - (bool success,) = address(l1Block).call(functionCallDataPacked); - assertTrue(success, "Function call failed"); - - assertEq(l1Block.baseFeeScalar(), baseFeeScalar); - assertEq(l1Block.blobBaseFeeScalar(), blobBaseFeeScalar); - assertEq(l1Block.sequenceNumber(), sequenceNumber); - assertEq(l1Block.timestamp(), timestamp); - assertEq(l1Block.number(), number); - assertEq(l1Block.basefee(), baseFee); - assertEq(l1Block.blobBaseFee(), blobBaseFee); - assertEq(l1Block.hash(), hash); - assertEq(l1Block.batcherHash(), batcherHash); - assertEq(l1Block.eip1559Denominator(), eip1559Denominator); - assertEq(l1Block.eip1559Elasticity(), eip1559Elasticity); - - // ensure we didn't accidentally pollute the 128 bits of the sequencenum+scalars slot that - // should be empty - bytes32 scalarsSlot = vm.load(address(l1Block), bytes32(uint256(3))); - bytes32 mask128 = hex"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF00000000000000000000000000000000"; - - assertEq(0, scalarsSlot & mask128); - - // ensure we didn't accidentally pollute the 128 bits of the number & timestamp slot that - // should be empty - bytes32 numberTimestampSlot = vm.load(address(l1Block), bytes32(uint256(0))); - assertEq(0, numberTimestampSlot & mask128); - - // ensure we didn't accidentally pollute the 128 bits of the eip-1559 parameters slot that - // should be empty - bytes32 eip1559ParamsSlot = vm.load(address(l1Block), bytes32(uint256(9))); - assertEq(0, eip1559ParamsSlot & mask128); - } - - /// @dev Tests that `setL1BlockValuesHolocene` succeeds if sender address is the depositor - function test_setL1BlockValuesHolocene_isDepositor_succeeds() external { - bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( - type(uint32).max, - type(uint32).max, - type(uint64).max, - type(uint64).max, - type(uint64).max, - type(uint256).max, - type(uint256).max, - bytes32(type(uint256).max), - bytes32(type(uint256).max), - type(uint64).max, - type(uint64).max - ); - - vm.prank(depositor); - (bool success,) = address(l1Block).call(functionCallDataPacked); - assertTrue(success, "function call failed"); - } - - /// @dev Tests that `setL1BlockValuesEcotone` reverts if sender address is not the depositor - function test_setL1BlockValuesHolocene_notDepositor_reverts() external { - bytes memory functionCallDataPacked = Encoding.encodeSetL1BlockValuesHolocene( - type(uint32).max, - type(uint32).max, - type(uint64).max, - type(uint64).max, - type(uint64).max, - type(uint256).max, - type(uint256).max, - bytes32(type(uint256).max), - bytes32(type(uint256).max), - type(uint64).max, - type(uint64).max - ); - - (bool success, bytes memory data) = address(l1Block).call(functionCallDataPacked); - assertTrue(!success, "function call should have failed"); - // make sure return value is the expected function selector for "NotDepositor()" - bytes memory expReturn = hex"3cc50b45"; - assertEq(data, expReturn); - } -} - contract L1BlockCustomGasToken_Test is L1BlockTest { function testFuzz_setGasPayingToken_succeeds( address _token, From 73038c881b48a591c216c880d946f41efb185a32 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Wed, 2 Oct 2024 00:35:04 +0200 Subject: [PATCH 103/116] op-e2e/system/fjord: Fix check script test (#12231) --- op-e2e/system/fjord/check_scripts_test.go | 46 ++++++++++------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/op-e2e/system/fjord/check_scripts_test.go b/op-e2e/system/fjord/check_scripts_test.go index b6115dbd7e0b..fb1744dafe8e 100644 --- a/op-e2e/system/fjord/check_scripts_test.go +++ b/op-e2e/system/fjord/check_scripts_test.go @@ -2,10 +2,12 @@ package fjord import ( "context" + "fmt" "testing" op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum/go-ethereum/common/hexutil" @@ -23,47 +25,37 @@ func TestCheckFjordScript(t *testing.T) { op_e2e.InitParallel(t) genesisActivation := hexutil.Uint64(0) tests := []struct { - name string - fjordActivation *hexutil.Uint64 - expectErr bool + fjord bool }{ - { - name: "fjord_activated", - fjordActivation: &genesisActivation, - expectErr: false, - }, - { - name: "fjord_unactivated", - fjordActivation: nil, - expectErr: true, - }, + {fjord: true}, + {fjord: false}, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - op_e2e.InitParallel(t) + tt := tt + t.Run(fmt.Sprintf("fjord=%t", tt.fjord), func(t *testing.T) { + t.Parallel() log := testlog.Logger(t, log.LevelInfo) - - cfg := e2esys.DefaultSystemConfig(t) - cfg.DeployConfig.L1CancunTimeOffset = &genesisActivation - cfg.DeployConfig.L2GenesisRegolithTimeOffset = &genesisActivation - cfg.DeployConfig.L2GenesisCanyonTimeOffset = &genesisActivation - cfg.DeployConfig.L2GenesisDeltaTimeOffset = &genesisActivation - cfg.DeployConfig.L2GenesisEcotoneTimeOffset = &genesisActivation - - cfg.DeployConfig.L2GenesisFjordTimeOffset = tt.fjordActivation + cfg := e2esys.EcotoneSystemConfig(t, &genesisActivation) + if tt.fjord { + cfg.DeployConfig.L2GenesisFjordTimeOffset = ptr(hexutil.Uint64(cfg.DeployConfig.L2BlockTime)) + } else { + cfg.DeployConfig.L2GenesisFjordTimeOffset = nil + } sys, err := cfg.Start(t) require.NoError(t, err, "Error starting up system") + require.NoError(t, wait.ForNextBlock(context.Background(), sys.NodeClient(e2esys.RoleSeq))) + checkFjordConfig := &fjordChecks.CheckFjordConfig{ Log: log, - L2: sys.NodeClient("sequencer"), + L2: sys.NodeClient(e2esys.RoleSeq), Key: sys.Cfg.Secrets.Alice, Addr: sys.Cfg.Secrets.Addresses().Alice, } - if tt.expectErr { + if !tt.fjord { err = fjordChecks.CheckRIP7212(context.Background(), checkFjordConfig) require.Error(t, err, "expected error for CheckRIP7212") err = fjordChecks.CheckGasPriceOracle(context.Background(), checkFjordConfig) @@ -83,3 +75,5 @@ func TestCheckFjordScript(t *testing.T) { }) } } + +func ptr[T any](t T) *T { return &t } From 445a3d40ec13f36d3ac0ae7bcdf6fb337c540c5f Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 1 Oct 2024 19:49:44 -0700 Subject: [PATCH 104/116] feat(sequencer): Origin Selector asynchronously prefetches the next origin from events (#12134) * Sequencer: Origin Selector optimistically prefetches the next origin in background * L1OriginSelector erases cached state on reset * L1OriginSelector attempts to fetch on ForkchoiceUpdateEvent * Move to a fully event-driven model, no extra goroutines * Add missing test comment * Minor cleanup, more tests * Tune the context timeouts --- op-e2e/actions/helpers/l2_sequencer.go | 4 +- op-node/rollup/driver/driver.go | 3 +- op-node/rollup/sequencing/origin_selector.go | 181 ++++++-- .../rollup/sequencing/origin_selector_test.go | 428 ++++++++++++++++-- 4 files changed, 551 insertions(+), 65 deletions(-) diff --git a/op-e2e/actions/helpers/l2_sequencer.go b/op-e2e/actions/helpers/l2_sequencer.go index 98becdcc87a4..424e12b23fda 100644 --- a/op-e2e/actions/helpers/l2_sequencer.go +++ b/op-e2e/actions/helpers/l2_sequencer.go @@ -56,8 +56,9 @@ func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc deri ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, &sync.Config{}, safedb.Disabled, interopBackend) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng) seqConfDepthL1 := confdepth.NewConfDepth(seqConfDepth, ver.syncStatus.L1Head, l1) + originSelector := sequencing.NewL1OriginSelector(t.Ctx(), log, cfg, seqConfDepthL1) l1OriginSelector := &MockL1OriginSelector{ - actual: sequencing.NewL1OriginSelector(log, cfg, seqConfDepthL1), + actual: originSelector, } metr := metrics.NoopMetrics seqStateListener := node.DisabledConfigPersistence{} @@ -78,6 +79,7 @@ func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc deri }, } ver.eventSys.Register("sequencer", seq, opts) + ver.eventSys.Register("origin-selector", originSelector, opts) require.NoError(t, seq.Init(t.Ctx(), true)) return &L2Sequencer{ L2Verifier: ver, diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index 81607e612d5a..1fd751846cf3 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -245,7 +245,8 @@ func NewDriver( asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2) sequencerConfDepth := confdepth.NewConfDepth(driverCfg.SequencerConfDepth, statusTracker.L1Head, l1) - findL1Origin := sequencing.NewL1OriginSelector(log, cfg, sequencerConfDepth) + findL1Origin := sequencing.NewL1OriginSelector(driverCtx, log, cfg, sequencerConfDepth) + sys.Register("origin-selector", findL1Origin, opts) sequencer = sequencing.NewSequencer(driverCtx, log, cfg, attrBuilder, findL1Origin, sequencerStateListener, sequencerConductor, asyncGossiper, metrics) sys.Register("sequencer", sequencer, opts) diff --git a/op-node/rollup/sequencing/origin_selector.go b/op-node/rollup/sequencing/origin_selector.go index 41bd64505415..b64b45dcfd20 100644 --- a/op-node/rollup/sequencing/origin_selector.go +++ b/op-node/rollup/sequencing/origin_selector.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sync" "time" "github.com/ethereum/go-ethereum" @@ -11,6 +12,8 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-node/rollup/event" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -20,15 +23,23 @@ type L1Blocks interface { } type L1OriginSelector struct { + ctx context.Context log log.Logger cfg *rollup.Config spec *rollup.ChainSpec l1 L1Blocks + + // Internal cache of L1 origins for faster access. + currentOrigin eth.L1BlockRef + nextOrigin eth.L1BlockRef + + mu sync.Mutex } -func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1OriginSelector { +func NewL1OriginSelector(ctx context.Context, log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1OriginSelector { return &L1OriginSelector{ + ctx: ctx, log: log, cfg: cfg, spec: rollup.NewChainSpec(cfg), @@ -36,62 +47,162 @@ func NewL1OriginSelector(log log.Logger, cfg *rollup.Config, l1 L1Blocks) *L1Ori } } +func (los *L1OriginSelector) OnEvent(ev event.Event) bool { + switch x := ev.(type) { + case engine.ForkchoiceUpdateEvent: + los.onForkchoiceUpdate(x.UnsafeL2Head) + case rollup.ResetEvent: + los.reset() + default: + return false + } + return true +} + // FindL1Origin determines what the next L1 Origin should be. // The L1 Origin is either the L2 Head's Origin, or the following L1 block // if the next L2 block's time is greater than or equal to the L2 Head's Origin. func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2BlockRef) (eth.L1BlockRef, error) { - // Grab a reference to the current L1 origin block. This call is by hash and thus easily cached. - currentOrigin, err := los.l1.L1BlockRefByHash(ctx, l2Head.L1Origin.Hash) + currentOrigin, nextOrigin, err := los.CurrentAndNextOrigin(ctx, l2Head) if err != nil { return eth.L1BlockRef{}, err } + + // If the next L2 block time is greater than the next origin block's time, we can choose to + // start building on top of the next origin. Sequencer implementation has some leeway here and + // could decide to continue to build on top of the previous origin until the Sequencer runs out + // of slack. For simplicity, we implement our Sequencer to always start building on the latest + // L1 block when we can. + if nextOrigin != (eth.L1BlockRef{}) && l2Head.Time+los.cfg.BlockTime >= nextOrigin.Time { + return nextOrigin, nil + } + msd := los.spec.MaxSequencerDrift(currentOrigin.Time) log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, "l2_head", l2Head, "l2_head_time", l2Head.Time, "max_seq_drift", msd) - seqDrift := l2Head.Time + los.cfg.BlockTime - currentOrigin.Time + pastSeqDrift := l2Head.Time+los.cfg.BlockTime-currentOrigin.Time > msd - // If we are past the sequencer depth, we may want to advance the origin, but need to still - // check the time of the next origin. - pastSeqDrift := seqDrift > msd - if pastSeqDrift { - log.Warn("Next L2 block time is past the sequencer drift + current origin time") - seqDrift = msd + // If we are not past the max sequencer drift, we can just return the current origin. + if !pastSeqDrift { + return currentOrigin, nil } - // Calculate the maximum time we can spend attempting to fetch the next L1 origin block. - // Time spent fetching this information is time not spent building the next L2 block, so - // we generally prioritize keeping this value small, allowing for a nonzero failure rate. - // As the next L2 block time approaches the max sequencer drift, increase our tolerance for - // slower L1 fetches in order to avoid falling too far behind. - fetchTimeout := time.Second + (9*time.Second*time.Duration(seqDrift))/time.Duration(msd) - fetchCtx, cancel := context.WithTimeout(ctx, fetchTimeout) - defer cancel() + // Otherwise, we need to find the next L1 origin block in order to continue producing blocks. + log.Warn("Next L2 block time is past the sequencer drift + current origin time") - // Attempt to find the next L1 origin block, where the next origin is the immediate child of - // the current origin block. - // The L1 source can be shimmed to hide new L1 blocks and enforce a sequencer confirmation distance. - nextOrigin, err := los.l1.L1BlockRefByNumber(fetchCtx, currentOrigin.Number+1) - if err != nil { - if pastSeqDrift { + if nextOrigin == (eth.L1BlockRef{}) { + fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // If the next origin is not set, we need to fetch it now. + nextOrigin, err = los.fetch(fetchCtx, currentOrigin.Number+1) + if err != nil { return eth.L1BlockRef{}, fmt.Errorf("cannot build next L2 block past current L1 origin %s by more than sequencer time drift, and failed to find next L1 origin: %w", currentOrigin, err) } + } + + // If the next origin is ahead of the L2 head, we must return the current origin. + if l2Head.Time+los.cfg.BlockTime < nextOrigin.Time { + return currentOrigin, nil + } + + return nextOrigin, nil +} + +func (los *L1OriginSelector) CurrentAndNextOrigin(ctx context.Context, l2Head eth.L2BlockRef) (eth.L1BlockRef, eth.L1BlockRef, error) { + los.mu.Lock() + defer los.mu.Unlock() + + if l2Head.L1Origin == los.currentOrigin.ID() { + // Most likely outcome: the L2 head is still on the current origin. + } else if l2Head.L1Origin == los.nextOrigin.ID() { + // If the L2 head has progressed to the next origin, update the current and next origins. + los.currentOrigin = los.nextOrigin + los.nextOrigin = eth.L1BlockRef{} + } else { + // If for some reason the L2 head is not on the current or next origin, we need to find the + // current origin block and reset the next origin. + // This is most likely to occur on the first block after a restart. + + // Grab a reference to the current L1 origin block. This call is by hash and thus easily cached. + currentOrigin, err := los.l1.L1BlockRefByHash(ctx, l2Head.L1Origin.Hash) + if err != nil { + return eth.L1BlockRef{}, eth.L1BlockRef{}, err + } + + los.currentOrigin = currentOrigin + los.nextOrigin = eth.L1BlockRef{} + } + + return los.currentOrigin, los.nextOrigin, nil +} + +func (los *L1OriginSelector) maybeSetNextOrigin(nextOrigin eth.L1BlockRef) { + los.mu.Lock() + defer los.mu.Unlock() + + // Set the next origin if it is the immediate child of the current origin. + if nextOrigin.ParentHash == los.currentOrigin.Hash { + los.nextOrigin = nextOrigin + } +} + +func (los *L1OriginSelector) onForkchoiceUpdate(unsafeL2Head eth.L2BlockRef) { + // Only allow a relatively small window for fetching the next origin, as this is performed + // on a best-effort basis. + ctx, cancel := context.WithTimeout(los.ctx, 500*time.Millisecond) + defer cancel() + + currentOrigin, nextOrigin, err := los.CurrentAndNextOrigin(ctx, unsafeL2Head) + if err != nil { + log.Error("Failed to get current and next L1 origin on forkchoice update", "err", err) + return + } + + los.tryFetchNextOrigin(ctx, currentOrigin, nextOrigin) +} + +// tryFetchNextOrigin schedules a fetch for the next L1 origin block if it is not already set. +// This method always closes the channel, even if the next origin is already set. +func (los *L1OriginSelector) tryFetchNextOrigin(ctx context.Context, currentOrigin, nextOrigin eth.L1BlockRef) { + // If the next origin is already set, we don't need to do anything. + if nextOrigin != (eth.L1BlockRef{}) { + return + } + + // If the current origin is not set, we can't schedule the next origin check. + if currentOrigin == (eth.L1BlockRef{}) { + return + } + + if _, err := los.fetch(ctx, currentOrigin.Number+1); err != nil { if errors.Is(err, ethereum.NotFound) { - log.Debug("No next L1 block found, repeating current origin") + log.Debug("No next potential L1 origin found") } else { - log.Error("Failed to get next origin. Falling back to current origin", "err", err) + log.Error("Failed to get next origin", "err", err) } - return currentOrigin, nil } +} - // If the next L2 block time is greater than the next origin block's time, we can choose to - // start building on top of the next origin. Sequencer implementation has some leeway here and - // could decide to continue to build on top of the previous origin until the Sequencer runs out - // of slack. For simplicity, we implement our Sequencer to always start building on the latest - // L1 block when we can. - if l2Head.Time+los.cfg.BlockTime >= nextOrigin.Time { - return nextOrigin, nil +func (los *L1OriginSelector) fetch(ctx context.Context, number uint64) (eth.L1BlockRef, error) { + // Attempt to find the next L1 origin block, where the next origin is the immediate child of + // the current origin block. + // The L1 source can be shimmed to hide new L1 blocks and enforce a sequencer confirmation distance. + nextOrigin, err := los.l1.L1BlockRefByNumber(ctx, number) + if err != nil { + return eth.L1BlockRef{}, err } - return currentOrigin, nil + los.maybeSetNextOrigin(nextOrigin) + + return nextOrigin, nil +} + +func (los *L1OriginSelector) reset() { + los.mu.Lock() + defer los.mu.Unlock() + + los.currentOrigin = eth.L1BlockRef{} + los.nextOrigin = eth.L1BlockRef{} } diff --git a/op-node/rollup/sequencing/origin_selector_test.go b/op-node/rollup/sequencing/origin_selector_test.go index 44461eac3077..7894d4de8132 100644 --- a/op-node/rollup/sequencing/origin_selector_test.go +++ b/op-node/rollup/sequencing/origin_selector_test.go @@ -2,10 +2,12 @@ package sequencing import ( "context" + "errors" "testing" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/confdepth" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" @@ -15,14 +17,186 @@ import ( "github.com/stretchr/testify/require" ) +// TestOriginSelectorFetchCurrentError ensures that the origin selector +// returns an error when it cannot fetch the current origin and has no +// internal cached state. +func TestOriginSelectorFetchCurrentError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 500, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + a := eth.L1BlockRef{ + Hash: common.Hash{'a'}, + Number: 10, + Time: 20, + } + b := eth.L1BlockRef{ + Hash: common.Hash{'b'}, + Number: 11, + Time: 25, + ParentHash: a.Hash, + } + l2Head := eth.L2BlockRef{ + L1Origin: a.ID(), + Time: 24, + } + + l1.ExpectL1BlockRefByHash(a.Hash, eth.L1BlockRef{}, errors.New("test error")) + + s := NewL1OriginSelector(ctx, log, cfg, l1) + + _, err := s.FindL1Origin(ctx, l2Head) + require.ErrorContains(t, err, "test error") + + // The same outcome occurs when the cached origin is different from that of the L2 head. + l1.ExpectL1BlockRefByHash(a.Hash, eth.L1BlockRef{}, errors.New("test error")) + + s = NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = b + + _, err = s.FindL1Origin(ctx, l2Head) + require.ErrorContains(t, err, "test error") +} + +// TestOriginSelectorFetchNextError ensures that the origin selector +// gracefully handles an error when fetching the next origin from the +// forkchoice update event. +func TestOriginSelectorFetchNextError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 500, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + a := eth.L1BlockRef{ + Hash: common.Hash{'a'}, + Number: 10, + Time: 20, + } + b := eth.L1BlockRef{ + Hash: common.Hash{'b'}, + Number: 11, + } + l2Head := eth.L2BlockRef{ + L1Origin: a.ID(), + Time: 24, + } + + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + + next, err := s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, a, next) + + l1.ExpectL1BlockRefByNumber(b.Number, eth.L1BlockRef{}, ethereum.NotFound) + + handled := s.OnEvent(engine.ForkchoiceUpdateEvent{UnsafeL2Head: l2Head}) + require.True(t, handled) + + l1.ExpectL1BlockRefByNumber(b.Number, eth.L1BlockRef{}, errors.New("test error")) + + handled = s.OnEvent(engine.ForkchoiceUpdateEvent{UnsafeL2Head: l2Head}) + require.True(t, handled) + + // The next origin should still be `a` because the fetch failed. + next, err = s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, a, next) +} + // TestOriginSelectorAdvances ensures that the origin selector -// advances the origin +// advances the origin with the internal cache // -// There are 2 L1 blocks at time 20 & 25. The L2 Head is at time 24. +// There are 3 L1 blocks at times 20, 22, 24. The L2 Head is at time 24. // The next L2 time is 26 which is after the next L1 block time. There // is no conf depth to stop the origin selection so block `b` should -// be the next L1 origin +// be the next L1 origin, and then block `c` is the subsequent L1 origin. func TestOriginSelectorAdvances(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 500, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + a := eth.L1BlockRef{ + Hash: common.Hash{'a'}, + Number: 10, + Time: 20, + } + b := eth.L1BlockRef{ + Hash: common.Hash{'b'}, + Number: 11, + Time: 22, + ParentHash: a.Hash, + } + c := eth.L1BlockRef{ + Hash: common.Hash{'c'}, + Number: 12, + Time: 24, + ParentHash: b.Hash, + } + l2Head := eth.L2BlockRef{ + L1Origin: a.ID(), + Time: 24, + } + + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + s.nextOrigin = b + + // Trigger the background fetch via a forkchoice update. + // This should be a no-op because the next origin is already cached. + handled := s.OnEvent(engine.ForkchoiceUpdateEvent{UnsafeL2Head: l2Head}) + require.True(t, handled) + + next, err := s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, b, next) + + l2Head = eth.L2BlockRef{ + L1Origin: b.ID(), + Time: 26, + } + + // The origin is still `b` because the next origin has not been fetched yet. + next, err = s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, b, next) + + l1.ExpectL1BlockRefByNumber(c.Number, c, nil) + + // Trigger the background fetch via a forkchoice update. + // This will actually fetch the next origin because the internal cache is empty. + handled = s.OnEvent(engine.ForkchoiceUpdateEvent{UnsafeL2Head: l2Head}) + require.True(t, handled) + + // The next origin should be `c` now. + next, err = s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, c, next) +} + +// TestOriginSelectorHandlesReset ensures that the origin selector +// resets its internal cached state on derivation pipeline resets. +func TestOriginSelectorHandlesReset(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, @@ -46,11 +220,81 @@ func TestOriginSelectorAdvances(t *testing.T) { Time: 24, } + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + s.nextOrigin = b + + next, err := s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, b, next) + + // Trigger the pipeline reset + handled := s.OnEvent(rollup.ResetEvent{}) + require.True(t, handled) + + // The next origin should be `a` now, but we need to fetch it + // because the internal cache was reset. l1.ExpectL1BlockRefByHash(a.Hash, a, nil) + + next, err = s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, a, next) +} + +// TestOriginSelectorFetchesNextOrigin ensures that the origin selector +// fetches the next origin when a fcu is received and the internal cache is empty +// +// The next L2 time is 26 which is after the next L1 block time. There +// is no conf depth to stop the origin selection so block `b` will +// be the next L1 origin as soon as it is fetched. +func TestOriginSelectorFetchesNextOrigin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 500, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + a := eth.L1BlockRef{ + Hash: common.Hash{'a'}, + Number: 10, + Time: 20, + } + b := eth.L1BlockRef{ + Hash: common.Hash{'b'}, + Number: 11, + Time: 25, + ParentHash: a.Hash, + } + l2Head := eth.L2BlockRef{ + L1Origin: a.ID(), + Time: 24, + } + + // This is called as part of the background prefetch job l1.ExpectL1BlockRefByNumber(b.Number, b, nil) - s := NewL1OriginSelector(log, cfg, l1) - next, err := s.FindL1Origin(context.Background(), l2Head) + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + + next, err := s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, a, next) + + // Selection is stable until the next origin is fetched + next, err = s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, a, next) + + // Trigger the background fetch via a forkchoice update + handled := s.OnEvent(engine.ForkchoiceUpdateEvent{UnsafeL2Head: l2Head}) + require.True(t, handled) + + // The next origin should be `b` now. + next, err = s.FindL1Origin(ctx, l2Head) require.Nil(t, err) require.Equal(t, b, next) } @@ -64,6 +308,9 @@ func TestOriginSelectorAdvances(t *testing.T) { // but it should select block `a` because the L2 block time must be ahead // of the the timestamp of it's L1 origin. func TestOriginSelectorRespectsOriginTiming(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, @@ -87,15 +334,61 @@ func TestOriginSelectorRespectsOriginTiming(t *testing.T) { Time: 22, } - l1.ExpectL1BlockRefByHash(a.Hash, a, nil) - l1.ExpectL1BlockRefByNumber(b.Number, b, nil) + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + s.nextOrigin = b - s := NewL1OriginSelector(log, cfg, l1) - next, err := s.FindL1Origin(context.Background(), l2Head) + next, err := s.FindL1Origin(ctx, l2Head) require.Nil(t, err) require.Equal(t, a, next) } +// TestOriginSelectorRespectsSeqDrift +// +// There are 2 L1 blocks at time 20 & 25. The L2 Head is at time 27. +// The next L2 time is 29. The sequencer drift is 8 so the L2 head is +// valid with origin `a`, but the next L2 block is not valid with origin `b.` +// This is because 29 (next L2 time) > 20 (origin) + 8 (seq drift) => invalid block. +// The origin selector does not yet know about block `b` so it should wait for the +// background fetch to complete synchronously. +func TestOriginSelectorRespectsSeqDrift(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 8, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + a := eth.L1BlockRef{ + Hash: common.Hash{'a'}, + Number: 10, + Time: 20, + } + b := eth.L1BlockRef{ + Hash: common.Hash{'b'}, + Number: 11, + Time: 25, + ParentHash: a.Hash, + } + l2Head := eth.L2BlockRef{ + L1Origin: a.ID(), + Time: 27, + } + + l1.ExpectL1BlockRefByHash(a.Hash, a, nil) + + l1.ExpectL1BlockRefByNumber(b.Number, b, nil) + + s := NewL1OriginSelector(ctx, log, cfg, l1) + + next, err := s.FindL1Origin(ctx, l2Head) + require.NoError(t, err) + require.Equal(t, b, next) +} + // TestOriginSelectorRespectsConfDepth ensures that the origin selector // will respect the confirmation depth requirement // @@ -104,6 +397,9 @@ func TestOriginSelectorRespectsOriginTiming(t *testing.T) { // as the origin, however block `b` is the L1 Head & the sequencer // needs to wait until that block is confirmed enough before advancing. func TestOriginSelectorRespectsConfDepth(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, @@ -127,11 +423,11 @@ func TestOriginSelectorRespectsConfDepth(t *testing.T) { Time: 27, } - l1.ExpectL1BlockRefByHash(a.Hash, a, nil) confDepthL1 := confdepth.NewConfDepth(10, func() eth.L1BlockRef { return b }, l1) - s := NewL1OriginSelector(log, cfg, confDepthL1) + s := NewL1OriginSelector(ctx, log, cfg, confDepthL1) + s.currentOrigin = a - next, err := s.FindL1Origin(context.Background(), l2Head) + next, err := s.FindL1Origin(ctx, l2Head) require.Nil(t, err) require.Equal(t, a, next) } @@ -147,6 +443,9 @@ func TestOriginSelectorRespectsConfDepth(t *testing.T) { // This is because 29 (next L2 time) > 20 (origin) + 8 (seq drift) => invalid block. // We maintain confirmation distance, even though we would shift to the next origin if we could. func TestOriginSelectorStrictConfDepth(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, @@ -172,9 +471,9 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { l1.ExpectL1BlockRefByHash(a.Hash, a, nil) confDepthL1 := confdepth.NewConfDepth(10, func() eth.L1BlockRef { return b }, l1) - s := NewL1OriginSelector(log, cfg, confDepthL1) + s := NewL1OriginSelector(ctx, log, cfg, confDepthL1) - _, err := s.FindL1Origin(context.Background(), l2Head) + _, err := s.FindL1Origin(ctx, l2Head) require.ErrorContains(t, err, "sequencer time drift") } @@ -187,6 +486,9 @@ func u64ptr(n uint64) *uint64 { // This time the same L1 origin is returned if no new L1 head is seen, instead of an error, // because the Fjord max sequencer drift is higher. func TestOriginSelector_FjordSeqDrift(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, @@ -205,13 +507,12 @@ func TestOriginSelector_FjordSeqDrift(t *testing.T) { Time: 27, // next L2 block time would be past pre-Fjord seq drift } - l1.ExpectL1BlockRefByHash(a.Hash, a, nil) - l1.ExpectL1BlockRefByNumber(a.Number+1, eth.L1BlockRef{}, ethereum.NotFound) - s := NewL1OriginSelector(log, cfg, l1) + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a - l1O, err := s.FindL1Origin(context.Background(), l2Head) + next, err := s.FindL1Origin(ctx, l2Head) require.NoError(t, err, "with Fjord activated, have increased max seq drift") - require.Equal(t, a, l1O) + require.Equal(t, a, next) } // TestOriginSelectorSeqDriftRespectsNextOriginTime @@ -221,6 +522,53 @@ func TestOriginSelector_FjordSeqDrift(t *testing.T) { // drift, the origin should remain on block `a` because the next origin's // time is greater than the next L2 time. func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 8, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + a := eth.L1BlockRef{ + Hash: common.Hash{'a'}, + Number: 10, + Time: 20, + } + b := eth.L1BlockRef{ + Hash: common.Hash{'b'}, + Number: 11, + Time: 100, + ParentHash: a.Hash, + } + l2Head := eth.L2BlockRef{ + L1Origin: a.ID(), + Time: 27, + } + + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + s.nextOrigin = b + + next, err := s.FindL1Origin(ctx, l2Head) + require.Nil(t, err) + require.Equal(t, a, next) +} + +// TestOriginSelectorSeqDriftRespectsNextOriginTimeNoCache +// +// There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27. +// The next L2 time is 29. Even though the next L2 time is past the seq +// drift, the origin should remain on block `a` because the next origin's +// time is greater than the next L2 time. +// The L1OriginSelector does not have the next origin cached, and must fetch it +// because the max sequencer drift has been exceeded. +func TestOriginSelectorSeqDriftRespectsNextOriginTimeNoCache(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, @@ -244,11 +592,12 @@ func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { Time: 27, } - l1.ExpectL1BlockRefByHash(a.Hash, a, nil) l1.ExpectL1BlockRefByNumber(b.Number, b, nil) - s := NewL1OriginSelector(log, cfg, l1) - next, err := s.FindL1Origin(context.Background(), l2Head) + s := NewL1OriginSelector(ctx, log, cfg, l1) + s.currentOrigin = a + + next, err := s.FindL1Origin(ctx, l2Head) require.Nil(t, err) require.Equal(t, a, next) } @@ -263,6 +612,9 @@ func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { // Due to a conf depth of 2, block `b` is not immediately visible, // and the origin selection should fail until it is visible, by waiting for block `c`. func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, @@ -300,23 +652,43 @@ func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { // l2 head does not change, so we start at the same origin again and again until we meet the conf depth l1.ExpectL1BlockRefByHash(a.Hash, a, nil) - l1.ExpectL1BlockRefByHash(a.Hash, a, nil) - l1.ExpectL1BlockRefByHash(a.Hash, a, nil) + l1.ExpectL1BlockRefByNumber(b.Number, b, nil) l1Head := b confDepthL1 := confdepth.NewConfDepth(2, func() eth.L1BlockRef { return l1Head }, l1) - s := NewL1OriginSelector(log, cfg, confDepthL1) + s := NewL1OriginSelector(ctx, log, cfg, confDepthL1) - _, err := s.FindL1Origin(context.Background(), l2Head) + _, err := s.FindL1Origin(ctx, l2Head) require.ErrorContains(t, err, "sequencer time drift") l1Head = c - _, err = s.FindL1Origin(context.Background(), l2Head) + _, err = s.FindL1Origin(ctx, l2Head) require.ErrorContains(t, err, "sequencer time drift") l1Head = d - next, err := s.FindL1Origin(context.Background(), l2Head) + next, err := s.FindL1Origin(ctx, l2Head) require.Nil(t, err) require.Equal(t, a, next, "must stay on a because the L1 time may not be higher than the L2 time") } + +// TestOriginSelectorMiscEvent ensures that the origin selector ignores miscellaneous events, +// but instead returns false to indicate that the event was not handled. +func TestOriginSelectorMiscEvent(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log := testlog.Logger(t, log.LevelCrit) + cfg := &rollup.Config{ + MaxSequencerDrift: 8, + BlockTime: 2, + } + l1 := &testutils.MockL1Source{} + defer l1.AssertExpectations(t) + + s := NewL1OriginSelector(ctx, log, cfg, l1) + + // This event is not handled + handled := s.OnEvent(rollup.L1TemporaryErrorEvent{}) + require.False(t, handled) +} From ab8b3719048fbd265b8a9af23a0d6a3ea520ae97 Mon Sep 17 00:00:00 2001 From: Chen Kai <281165273grape@gmail.com> Date: Wed, 2 Oct 2024 22:48:16 +0800 Subject: [PATCH 105/116] MT Cannon: add cannon load/store opcodes tests (#12196) * feat:add cannon load/store opcodes tests Signed-off-by: Chen Kai <281165273grape@gmail.com> * fix:code review suggestion Signed-off-by: Chen Kai <281165273grape@gmail.com> --------- Signed-off-by: Chen Kai <281165273grape@gmail.com> --- cannon/mipsevm/tests/evm_common_test.go | 97 +++++++++++++++++++++++-- 1 file changed, 90 insertions(+), 7 deletions(-) diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index b0068cb993ba..890feca9ee03 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -202,14 +202,20 @@ func TestEVMSingleStep_Operators(t *testing.T) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(0), testutil.WithNextPC(4)) state := goVm.GetState() var insn uint32 + var baseReg uint32 = 17 + var rtReg uint32 + var rdReg uint32 if tt.isImm { - insn = tt.opcode<<26 | uint32(17)<<21 | uint32(8)<<16 | uint32(tt.imm) - state.GetRegistersRef()[8] = tt.rt - state.GetRegistersRef()[17] = tt.rs + rtReg = 8 + insn = tt.opcode<<26 | baseReg<<21 | rtReg<<16 | uint32(tt.imm) + state.GetRegistersRef()[rtReg] = tt.rt + state.GetRegistersRef()[baseReg] = tt.rs } else { - insn = uint32(17)<<21 | uint32(18)<<16 | uint32(8)<<11 | tt.funct - state.GetRegistersRef()[17] = tt.rs - state.GetRegistersRef()[18] = tt.rt + rtReg = 18 + rdReg = 8 + insn = baseReg<<21 | rtReg<<16 | rdReg<<11 | tt.funct + state.GetRegistersRef()[baseReg] = tt.rs + state.GetRegistersRef()[rtReg] = tt.rt } state.GetMemory().SetMemory(0, insn) step := state.GetStep() @@ -219,8 +225,85 @@ func TestEVMSingleStep_Operators(t *testing.T) { expected.Step += 1 expected.PC = 4 expected.NextPC = 8 - expected.Registers[8] = tt.expectRes + if tt.isImm { + expected.Registers[rtReg] = tt.expectRes + } else { + expected.Registers[rdReg] = tt.expectRes + } + + stepWitness, err := goVm.Step(true) + require.NoError(t, err) + + // Check expectations + expected.Validate(t, state) + testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts, tracer) + }) + } + } +} + +func TestEVMSingleStep_LoadStore(t *testing.T) { + var tracer *tracing.Hooks + + versions := GetMipsVersionTestCases(t) + cases := []struct { + name string + rs uint32 + rt uint32 + isUnAligned bool + opcode uint32 + memVal uint32 + expectMemVal uint32 + expectRes uint32 + }{ + {name: "lb", opcode: uint32(0x20), memVal: uint32(0x12_00_00_00), expectRes: uint32(0x12)}, // lb $t0, 4($t1) + {name: "lh", opcode: uint32(0x21), memVal: uint32(0x12_23_00_00), expectRes: uint32(0x12_23)}, // lh $t0, 4($t1) + {name: "lw", opcode: uint32(0x23), memVal: uint32(0x12_23_45_67), expectRes: uint32(0x12_23_45_67)}, // lw $t0, 4($t1) + {name: "lbu", opcode: uint32(0x24), memVal: uint32(0x12_23_00_00), expectRes: uint32(0x12)}, // lbu $t0, 4($t1) + {name: "lhu", opcode: uint32(0x25), memVal: uint32(0x12_23_00_00), expectRes: uint32(0x12_23)}, // lhu $t0, 4($t1) + {name: "lwl", opcode: uint32(0x22), rt: uint32(0xaa_bb_cc_dd), memVal: uint32(0x12_34_56_78), expectRes: uint32(0x12_34_56_78)}, // lwl $t0, 4($t1) + {name: "lwl unaligned address", opcode: uint32(0x22), rt: uint32(0xaa_bb_cc_dd), isUnAligned: true, memVal: uint32(0x12_34_56_78), expectRes: uint32(0x34_56_78_dd)}, // lwl $t0, 5($t1) + {name: "lwr", opcode: uint32(0x26), rt: uint32(0xaa_bb_cc_dd), memVal: uint32(0x12_34_56_78), expectRes: uint32(0xaa_bb_cc_12)}, // lwr $t0, 4($t1) + {name: "lwr unaligned address", opcode: uint32(0x26), rt: uint32(0xaa_bb_cc_dd), isUnAligned: true, memVal: uint32(0x12_34_56_78), expectRes: uint32(0xaa_bb_12_34)}, // lwr $t0, 5($t1) + {name: "sb", opcode: uint32(0x28), rt: uint32(0xaa_bb_cc_dd), expectMemVal: uint32(0xdd_00_00_00)}, // sb $t0, 4($t1) + {name: "sh", opcode: uint32(0x29), rt: uint32(0xaa_bb_cc_dd), expectMemVal: uint32(0xcc_dd_00_00)}, // sh $t0, 4($t1) + {name: "swl", opcode: uint32(0x2a), rt: uint32(0xaa_bb_cc_dd), expectMemVal: uint32(0xaa_bb_cc_dd)}, // swl $t0, 4($t1) + {name: "sw", opcode: uint32(0x2b), rt: uint32(0xaa_bb_cc_dd), expectMemVal: uint32(0xaa_bb_cc_dd)}, // sw $t0, 4($t1) + {name: "swr unaligned address", opcode: uint32(0x2e), rt: uint32(0xaa_bb_cc_dd), isUnAligned: true, expectMemVal: uint32(0xcc_dd_00_00)}, // swr $t0, 5($t1) + } + + var t1 uint32 = 0x100 + var baseReg uint32 = 9 + var rtReg uint32 = 8 + for _, v := range versions { + for i, tt := range cases { + testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) + t.Run(testName, func(t *testing.T) { + goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithPC(0), testutil.WithNextPC(4)) + state := goVm.GetState() + var insn uint32 + imm := uint32(0x4) + if tt.isUnAligned { + imm = uint32(0x5) + } + + insn = tt.opcode<<26 | baseReg<<21 | rtReg<<16 | imm + state.GetRegistersRef()[rtReg] = tt.rt + state.GetRegistersRef()[baseReg] = t1 + + state.GetMemory().SetMemory(0, insn) + state.GetMemory().SetMemory(t1+4, tt.memVal) + step := state.GetStep() + // Setup expectations + expected := testutil.NewExpectedState(state) + expected.ExpectStep() + + if tt.expectMemVal != 0 { + expected.ExpectMemoryWrite(t1+4, tt.expectMemVal) + } else { + expected.Registers[rtReg] = tt.expectRes + } stepWitness, err := goVm.Step(true) require.NoError(t, err) From e7dbd848b5300ee6121b2f941b5f021ce6a66780 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Wed, 2 Oct 2024 10:55:09 -0400 Subject: [PATCH 106/116] feat: Use DeploySuperchain script in Deploy.s.sol (#12203) * feat: Use DeploySuperchain script in Deploy.s.sol Demonstrate that build breaks when using high level syntax * fix: Cannot set null protocol versions error * feat: Also save impls * fix: semver lock * fix: bump ProtocolVersions semver * feat: Add superchainProxyAdmin * feat: Undo removeing ProtocolVersion type from interface * fix: semver-lock --- .../deploy-config/devnetL1-template.json | 4 +- .../deploy-config/hardhat.json | 4 +- .../scripts/deploy/Deploy.s.sol | 93 ++++--------------- packages/contracts-bedrock/semver-lock.json | 4 +- .../src/L1/ProtocolVersions.sol | 4 +- 5 files changed, 27 insertions(+), 82 deletions(-) diff --git a/packages/contracts-bedrock/deploy-config/devnetL1-template.json b/packages/contracts-bedrock/deploy-config/devnetL1-template.json index 11bc3557791d..d241c3186a08 100644 --- a/packages/contracts-bedrock/deploy-config/devnetL1-template.json +++ b/packages/contracts-bedrock/deploy-config/devnetL1-template.json @@ -50,8 +50,8 @@ "l2GenesisFjordTimeOffset": "0x0", "l1CancunTimeOffset": "0x0", "systemConfigStartBlock": 0, - "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", - "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000001", + "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000001", "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", "faultGameMaxDepth": 50, "faultGameClockExtension": 0, diff --git a/packages/contracts-bedrock/deploy-config/hardhat.json b/packages/contracts-bedrock/deploy-config/hardhat.json index e26c10ef78ce..6dcbb299d1de 100644 --- a/packages/contracts-bedrock/deploy-config/hardhat.json +++ b/packages/contracts-bedrock/deploy-config/hardhat.json @@ -41,8 +41,8 @@ "eip1559Elasticity": 10, "l2GenesisRegolithTimeOffset": "0x0", "systemConfigStartBlock": 0, - "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", - "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000001", + "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000001", "faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000", "faultGameMaxDepth": 8, "faultGameClockExtension": 0, diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 57c5e30c7d8f..2843b60be5d7 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -17,6 +17,7 @@ import { LibStateDiff } from "scripts/libraries/LibStateDiff.sol"; import { Process } from "scripts/libraries/Process.sol"; import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +import { DeploySuperchainInput, DeploySuperchain, DeploySuperchainOutput } from "scripts/DeploySuperchain.s.sol"; // Contracts import { AddressManager } from "src/legacy/AddressManager.sol"; @@ -298,16 +299,26 @@ contract Deploy is Deployer { /// 2. The ProtocolVersions contract function setupSuperchain() public { console.log("Setting up Superchain"); + DeploySuperchain deploySuperchain = new DeploySuperchain(); + (DeploySuperchainInput dsi, DeploySuperchainOutput dso) = deploySuperchain.etchIOContracts(); - // Deploy the SuperchainConfigProxy - deployERC1967ProxyWithOwner("SuperchainConfigProxy", mustGetAddress("SuperchainProxyAdmin")); - deploySuperchainConfig(); - initializeSuperchainConfig(); + // Set the input values on the input contract. + dsi.set(dsi.superchainProxyAdminOwner.selector, mustGetAddress("SuperchainProxyAdmin")); + // TODO: when DeployAuthSystem is done, finalSystemOwner should be replaced with the Foundation Upgrades Safe + dsi.set(dsi.protocolVersionsOwner.selector, cfg.finalSystemOwner()); + dsi.set(dsi.guardian.selector, cfg.superchainConfigGuardian()); + dsi.set(dsi.paused.selector, false); - // Deploy the ProtocolVersionsProxy - deployERC1967ProxyWithOwner("ProtocolVersionsProxy", mustGetAddress("SuperchainProxyAdmin")); - deployProtocolVersions(); - initializeProtocolVersions(); + dsi.set(dsi.requiredProtocolVersion.selector, ProtocolVersion.wrap(cfg.requiredProtocolVersion())); + dsi.set(dsi.recommendedProtocolVersion.selector, ProtocolVersion.wrap(cfg.recommendedProtocolVersion())); + + // Run the deployment script. + deploySuperchain.run(dsi, dso); + save("superchainProxyAdmin", address(dso.superchainProxyAdmin())); + save("SuperchainConfigProxy", address(dso.superchainConfigProxy())); + save("SuperchainConfig", address(dso.superchainConfigImpl())); + save("ProtocolVersionsProxy", address(dso.protocolVersionsProxy())); + save("ProtocolVersions", address(dso.protocolVersionsImpl())); } /// @notice Deploy a new OP Chain, with an existing SuperchainConfig provided @@ -751,27 +762,6 @@ contract Deploy is Deployer { addr_ = address(weth); } - /// @notice Deploy the ProtocolVersions - function deployProtocolVersions() public broadcast returns (address addr_) { - IProtocolVersions versions = IProtocolVersions( - DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "ProtocolVersions", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProtocolVersions.__constructor__, ())) - }) - ); - - // Override the `ProtocolVersions` contract to the deployed implementation. This is necessary - // to check the `ProtocolVersions` implementation alongside dependent contracts, which - // are always proxies. - Types.ContractSet memory contracts = _proxiesUnstrict(); - contracts.ProtocolVersions = address(versions); - ChainAssertions.checkProtocolVersions({ _contracts: contracts, _cfg: cfg, _isProxy: false }); - - addr_ = address(versions); - } - /// @notice Deploy the PreimageOracle function deployPreimageOracle() public broadcast returns (address addr_) { IPreimageOracle preimageOracle = IPreimageOracle( @@ -923,21 +913,6 @@ contract Deploy is Deployer { // Initialize Functions // //////////////////////////////////////////////////////////////// - /// @notice Initialize the SuperchainConfig - function initializeSuperchainConfig() public broadcast { - address payable superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - address payable superchainConfig = mustGetAddress("SuperchainConfig"); - - IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("SuperchainProxyAdmin"))); - proxyAdmin.upgradeAndCall({ - _proxy: superchainConfigProxy, - _implementation: superchainConfig, - _data: abi.encodeCall(ISuperchainConfig.initialize, (cfg.superchainConfigGuardian(), false)) - }); - - ChainAssertions.checkSuperchainConfig({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isPaused: false }); - } - /// @notice Initialize the DisputeGameFactory function initializeDisputeGameFactory() public broadcast { console.log("Upgrading and initializing DisputeGameFactory proxy"); @@ -1331,36 +1306,6 @@ contract Deploy is Deployer { ChainAssertions.checkOptimismPortal2({ _contracts: _proxies(), _cfg: cfg, _isProxy: true }); } - function initializeProtocolVersions() public broadcast { - console.log("Upgrading and initializing ProtocolVersions proxy"); - address protocolVersionsProxy = mustGetAddress("ProtocolVersionsProxy"); - address protocolVersions = mustGetAddress("ProtocolVersions"); - - address finalSystemOwner = cfg.finalSystemOwner(); - uint256 requiredProtocolVersion = cfg.requiredProtocolVersion(); - uint256 recommendedProtocolVersion = cfg.recommendedProtocolVersion(); - - IProxyAdmin proxyAdmin = IProxyAdmin(payable(mustGetAddress("SuperchainProxyAdmin"))); - proxyAdmin.upgradeAndCall({ - _proxy: payable(protocolVersionsProxy), - _implementation: protocolVersions, - _data: abi.encodeCall( - IProtocolVersions.initialize, - ( - finalSystemOwner, - ProtocolVersion.wrap(requiredProtocolVersion), - ProtocolVersion.wrap(recommendedProtocolVersion) - ) - ) - }); - - IProtocolVersions versions = IProtocolVersions(protocolVersionsProxy); - string memory version = versions.version(); - console.log("ProtocolVersions version: %s", version); - - ChainAssertions.checkProtocolVersions({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isProxy: true }); - } - /// @notice Transfer ownership of the DisputeGameFactory contract to the final system owner function transferDisputeGameFactoryOwnership() public broadcast { console.log("Transferring DisputeGameFactory ownership to Safe"); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 73d8ba52c48b..cec1d51d210a 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -48,8 +48,8 @@ "sourceCodeHash": "0x6401b81f04093863557ef46192f56793daa0d412618065383ab353b2ed2929d8" }, "src/L1/ProtocolVersions.sol": { - "initCodeHash": "0x8f033874dd8b36615b2209d553660dcff1ff91ca2bad3ca1de7b441dbfba4842", - "sourceCodeHash": "0x5a7e91e02224e02a5a4bbf0fea7e9bd4a1168e2fe5e787023c9a75bcb6c26204" + "initCodeHash": "0xf7a9ed8c772cfb1234988fd6fd195dc21615b216bb39e728e7699b875040d902", + "sourceCodeHash": "0x92f15d62361bffc305f0db48a5676329f8e5ed2e454f8c8ff83ef7d3667d7f01" }, "src/L1/SuperchainConfig.sol": { "initCodeHash": "0xfca12d9016c746e5c275b186e0ca40cfd65cf45a5665aab7589a669fea3abb47", diff --git a/packages/contracts-bedrock/src/L1/ProtocolVersions.sol b/packages/contracts-bedrock/src/L1/ProtocolVersions.sol index f988d43e1697..2be060604c86 100644 --- a/packages/contracts-bedrock/src/L1/ProtocolVersions.sol +++ b/packages/contracts-bedrock/src/L1/ProtocolVersions.sol @@ -37,8 +37,8 @@ contract ProtocolVersions is OwnableUpgradeable, ISemver { event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.1 - string public constant version = "1.0.1-beta.1"; + /// @custom:semver 1.0.1-beta.2 + string public constant version = "1.0.1-beta.2"; /// @notice Constructs the ProtocolVersion contract. Cannot set /// the owner to `address(0)` due to the Ownable contract's From b1d119fce2ff8f110c243d98218e7ec03da447e1 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 3 Oct 2024 00:55:39 +1000 Subject: [PATCH 107/116] op-e2e: Retrieve AllocType from System instead of env (#12221) * op-e2e: Retrieve AllocType from System instead of env * Remove more uses of env. * op-e2e: Run mt-cannon as an allocType without needing special env vars. --- .circleci/config.yml | 19 +- op-e2e/config/init.go | 12 - op-e2e/e2eutils/challenger/helper.go | 14 +- op-e2e/e2eutils/disputegame/helper.go | 3 +- .../disputegame/output_cannon_helper.go | 4 +- .../disputegame/output_game_helper.go | 2 - op-e2e/faultproofs/cannon_benchmark_test.go | 12 +- op-e2e/faultproofs/multi_test.go | 4 +- op-e2e/faultproofs/output_cannon_test.go | 233 +++++++++++++++--- op-e2e/faultproofs/permissioned_test.go | 4 +- op-e2e/faultproofs/precompile_test.go | 29 ++- op-e2e/faultproofs/util.go | 8 +- op-e2e/system/e2esys/setup.go | 4 + 13 files changed, 255 insertions(+), 93 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 736d615bc058..2a71eab4e760 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -908,9 +908,6 @@ jobs: go-e2e-test: parameters: - variant: - type: string - default: '' module: description: Go Module Name type: string @@ -935,13 +932,6 @@ jobs: parallelism: <> steps: - checkout - - when: - condition: - equal: ['-mt-cannon', <>] - steps: - - run: - name: Set OP_E2E_ALLOC_TYPE = mt-cannon - command: echo 'export OP_E2E_ALLOC_TYPE=mt-cannon' >> $BASH_ENV - check-changed: patterns: op-(.+),cannon,contracts-bedrock - run: @@ -979,7 +969,7 @@ jobs: # want it to. export OP_E2E_CANNON_ENABLED="false" # Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional - # constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building. + # constraint that gotestsum does not currently (nor likely will) accept files from different packages when building. JUNIT_FILE=/tmp/test-results/<>_<>.xml JSON_LOG_FILE=/testlogs/test.log make <> working_directory: <> - store_artifacts: @@ -1685,13 +1675,10 @@ workflows: context: - slack - go-e2e-test: - name: op-e2e-cannon-tests<< matrix.variant >> - matrix: - parameters: - variant: ["", "-mt-cannon"] + name: op-e2e-cannon-tests module: op-e2e target: test-cannon - parallelism: 4 + parallelism: 8 notify: true mentions: "@proofs-squad" requires: diff --git a/op-e2e/config/init.go b/op-e2e/config/init.go index 6fe29126996a..a635c9efd7e5 100644 --- a/op-e2e/config/init.go +++ b/op-e2e/config/init.go @@ -212,15 +212,3 @@ func initAllocType(root string, allocType AllocType) { dc.SetDeployments(l1Deployments) deployConfigsByType[allocType] = dc } - -func AllocTypeFromEnv() AllocType { - allocType := os.Getenv("OP_E2E_ALLOC_TYPE") - if allocType == "" { - return DefaultAllocType - } - out := AllocType(allocType) - if err := out.Check(); err != nil { - panic(err) - } - return out -} diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index 2d72e53ecc62..8e31f1311e87 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -12,7 +12,6 @@ import ( "time" e2econfig "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-service/crypto" "github.com/ethereum/go-ethereum/ethclient" @@ -39,6 +38,11 @@ type EndpointProvider interface { L1BeaconEndpoint() endpoint.RestHTTP } +type System interface { + RollupCfg() *rollup.Config + L2Genesis() *core.Genesis + AllocType() e2econfig.AllocType +} type Helper struct { log log.Logger t *testing.T @@ -142,17 +146,17 @@ func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, c.Cannon.RollupConfigPath = rollupFile } -func WithCannon(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis, allocType e2econfig.AllocType) Option { +func WithCannon(t *testing.T, system System) Option { return func(c *config.Config) { c.TraceTypes = append(c.TraceTypes, types.TraceTypeCannon) - applyCannonConfig(c, t, rollupCfg, l2Genesis, allocType) + applyCannonConfig(c, t, system.RollupCfg(), system.L2Genesis(), system.AllocType()) } } -func WithPermissioned(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis, allocType e2econfig.AllocType) Option { +func WithPermissioned(t *testing.T, system System) Option { return func(c *config.Config) { c.TraceTypes = append(c.TraceTypes, types.TraceTypePermissioned) - applyCannonConfig(c, t, rollupCfg, l2Genesis, allocType) + applyCannonConfig(c, t, system.RollupCfg(), system.L2Genesis(), system.AllocType()) } } diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index 536c0fb8e907..07ee4c659ff9 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -83,6 +83,7 @@ type DisputeSystem interface { L1Deployments() *genesis.L1Deployments RollupCfg() *rollup.Config L2Genesis() *core.Genesis + AllocType() config.AllocType AdvanceTime(time.Duration) } @@ -117,7 +118,7 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, system DisputeSystem, o chainID, err := client.ChainID(ctx) require.NoError(err) - allocType := config.AllocTypeFromEnv() + allocType := system.AllocType() require.True(allocType.UsesProofs(), "AllocType %v does not support proofs", allocType) factoryCfg := &FactoryCfg{PrivKey: TestKey} diff --git a/op-e2e/e2eutils/disputegame/output_cannon_helper.go b/op-e2e/e2eutils/disputegame/output_cannon_helper.go index 5b0f923b0dc2..264742be194b 100644 --- a/op-e2e/e2eutils/disputegame/output_cannon_helper.go +++ b/op-e2e/e2eutils/disputegame/output_cannon_helper.go @@ -35,7 +35,7 @@ type OutputCannonGameHelper struct { func (g *OutputCannonGameHelper) StartChallenger(ctx context.Context, name string, options ...challenger.Option) *challenger.Helper { opts := []challenger.Option{ - challenger.WithCannon(g.T, g.System.RollupCfg(), g.System.L2Genesis(), g.AllocType), + challenger.WithCannon(g.T, g.System), challenger.WithFactoryAddress(g.FactoryAddr), challenger.WithGameAddress(g.Addr), } @@ -331,7 +331,7 @@ func (g *OutputCannonGameHelper) createCannonTraceProvider(ctx context.Context, func (g *OutputCannonGameHelper) defaultChallengerOptions() []challenger.Option { return []challenger.Option{ - challenger.WithCannon(g.T, g.System.RollupCfg(), g.System.L2Genesis(), g.AllocType), + challenger.WithCannon(g.T, g.System), challenger.WithFactoryAddress(g.FactoryAddr), challenger.WithGameAddress(g.Addr), } diff --git a/op-e2e/e2eutils/disputegame/output_game_helper.go b/op-e2e/e2eutils/disputegame/output_game_helper.go index 3914b3399b14..0468620537f1 100644 --- a/op-e2e/e2eutils/disputegame/output_game_helper.go +++ b/op-e2e/e2eutils/disputegame/output_game_helper.go @@ -42,7 +42,6 @@ type OutputGameHelper struct { Addr common.Address CorrectOutputProvider *outputs.OutputTraceProvider System DisputeSystem - AllocType config.AllocType } func NewOutputGameHelper(t *testing.T, require *require.Assertions, client *ethclient.Client, opts *bind.TransactOpts, privKey *ecdsa.PrivateKey, @@ -58,7 +57,6 @@ func NewOutputGameHelper(t *testing.T, require *require.Assertions, client *ethc Addr: addr, CorrectOutputProvider: correctOutputProvider, System: system, - AllocType: allocType, } } diff --git a/op-e2e/faultproofs/cannon_benchmark_test.go b/op-e2e/faultproofs/cannon_benchmark_test.go index 71b4d0f9a3b4..8a2592be59e7 100644 --- a/op-e2e/faultproofs/cannon_benchmark_test.go +++ b/op-e2e/faultproofs/cannon_benchmark_test.go @@ -33,12 +33,20 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testlog" ) -func TestBenchmarkCannon_FPP(t *testing.T) { +func TestBenchmarkCannonFPP_Standard(t *testing.T) { + testBenchmarkCannonFPP(t, config.AllocTypeStandard) +} + +func TestBenchmarkCannonFPP_Multithreaded(t *testing.T) { + testBenchmarkCannonFPP(t, config.AllocTypeMTCannon) +} + +func testBenchmarkCannonFPP(t *testing.T, allocType config.AllocType) { t.Skip("TODO(client-pod#906): Compare total witness size for assertions against pages allocated by the VM") op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(config.AllocTypeFromEnv())) + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) // We don't need a verifier - just the sequencer is enough delete(cfg.Nodes, "verifier") // Use a small sequencer window size to avoid test timeout while waiting for empty blocks diff --git a/op-e2e/faultproofs/multi_test.go b/op-e2e/faultproofs/multi_test.go index 2034e394842c..e8e87268ea52 100644 --- a/op-e2e/faultproofs/multi_test.go +++ b/op-e2e/faultproofs/multi_test.go @@ -4,8 +4,6 @@ import ( "context" "testing" - "github.com/ethereum-optimism/optimism/op-e2e/config" - op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" @@ -29,7 +27,7 @@ func TestMultipleGameTypes(t *testing.T) { // Start a challenger with both cannon and alphabet support gameFactory.StartChallenger(ctx, "TowerDefense", - challenger.WithCannon(t, sys.RollupConfig, sys.L2GenesisCfg, config.AllocTypeFromEnv()), + challenger.WithCannon(t, sys), challenger.WithAlphabet(), challenger.WithPrivKey(sys.Cfg.Secrets.Alice), ) diff --git a/op-e2e/faultproofs/output_cannon_test.go b/op-e2e/faultproofs/output_cannon_test.go index d0abbac7338c..80dea8bfb8dd 100644 --- a/op-e2e/faultproofs/output_cannon_test.go +++ b/op-e2e/faultproofs/output_cannon_test.go @@ -5,11 +5,11 @@ import ( "fmt" "testing" - op_e2e "github.com/ethereum-optimism/optimism/op-e2e" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame/preimage" @@ -18,10 +18,18 @@ import ( "github.com/stretchr/testify/require" ) -func TestOutputCannonGame(t *testing.T) { +func TestOutputCannonGame_Standard(t *testing.T) { + testOutputCannonGame(t, config.AllocTypeStandard) +} + +func TestOutputCannonGame_Multithreaded(t *testing.T) { + testOutputCannonGame(t, config.AllocTypeMTCannon) +} + +func testOutputCannonGame(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -73,11 +81,19 @@ func TestOutputCannonGame(t *testing.T) { game.WaitForGameStatus(ctx, gameTypes.GameStatusChallengerWon) } -func TestOutputCannon_ChallengeAllZeroClaim(t *testing.T) { +func TestOutputCannon_ChallengeAllZeroClaim_Standard(t *testing.T) { + testOutputCannonChallengeAllZeroClaim(t, config.AllocTypeStandard) +} + +func TestOutputCannon_ChallengeAllZeroClaim_Multithreaded(t *testing.T) { + testOutputCannonChallengeAllZeroClaim(t, config.AllocTypeMTCannon) +} + +func testOutputCannonChallengeAllZeroClaim(t *testing.T, allocType config.AllocType) { // The dishonest actor always posts claims with all zeros. op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -102,7 +118,15 @@ func TestOutputCannon_ChallengeAllZeroClaim(t *testing.T) { game.LogGameData(ctx) } -func TestOutputCannon_PublishCannonRootClaim(t *testing.T) { +func TestOutputCannon_PublishCannonRootClaim_Standard(t *testing.T) { + testOutputCannonPublishCannonRootClaim(t, config.AllocTypeStandard) +} + +func TestOutputCannon_PublishCannonRootClaim_Multithreaded(t *testing.T) { + testOutputCannonPublishCannonRootClaim(t, config.AllocTypeMTCannon) +} + +func testOutputCannonPublishCannonRootClaim(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) tests := []struct { disputeL2BlockNumber uint64 @@ -116,7 +140,7 @@ func TestOutputCannon_PublishCannonRootClaim(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, _ := StartFaultDisputeSystem(t) + sys, _ := StartFaultDisputeSystem(t, WithAllocType(allocType)) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", test.disputeL2BlockNumber, common.Hash{0x01}) @@ -131,7 +155,16 @@ func TestOutputCannon_PublishCannonRootClaim(t *testing.T) { } } -func TestOutputCannonDisputeGame(t *testing.T) { +func TestOutputCannonDisputeGame_Standard(t *testing.T) { + testOutputCannonDisputeGame(t, config.AllocTypeStandard) +} + +func TestOutputCannonDisputeGame_Multithreaded(t *testing.T) { + testOutputCannonDisputeGame(t, config.AllocTypeMTCannon) +} + +func testOutputCannonDisputeGame(t *testing.T, allocType config.AllocType) { + op_e2e.InitParallel(t, op_e2e.UsesCannon) tests := []struct { name string @@ -147,7 +180,7 @@ func TestOutputCannonDisputeGame(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -180,11 +213,19 @@ func TestOutputCannonDisputeGame(t *testing.T) { } } -func TestOutputCannonDefendStep(t *testing.T) { +func TestOutputCannonDefendStep_Standard(t *testing.T) { + testOutputCannonDefendStep(t, config.AllocTypeStandard) +} + +func TestOutputCannonDefendStep_Multithreaded(t *testing.T) { + testOutputCannonDefendStep(t, config.AllocTypeMTCannon) +} + +func testOutputCannonDefendStep(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -216,11 +257,19 @@ func TestOutputCannonDefendStep(t *testing.T) { require.EqualValues(t, gameTypes.GameStatusChallengerWon, game.Status(ctx)) } -func TestOutputCannonStepWithLargePreimage(t *testing.T) { +func TestOutputCannonStepWithLargePreimage_Standard(t *testing.T) { + testOutputCannonStepWithLargePreimage(t, config.AllocTypeStandard) +} + +func TestOutputCannonStepWithLargePreimage_Multithreaded(t *testing.T) { + testOutputCannonStepWithLargePreimage(t, config.AllocTypeMTCannon) +} + +func testOutputCannonStepWithLargePreimage(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, _ := StartFaultDisputeSystem(t, WithBatcherStopped()) + sys, _ := StartFaultDisputeSystem(t, WithBatcherStopped(), WithAllocType(allocType)) t.Cleanup(sys.Close) // Manually send a tx from the correct batcher key to the batcher input with very large (invalid) data @@ -257,13 +306,21 @@ func TestOutputCannonStepWithLargePreimage(t *testing.T) { // So we don't waste time resolving the game - that's tested elsewhere. } -func TestOutputCannonStepWithPreimage(t *testing.T) { +func TestOutputCannonStepWithPreimage_Standard(t *testing.T) { + testOutputCannonStepWithPreimage(t, config.AllocTypeStandard) +} + +func TestOutputCannonStepWithPreimage_Multithreaded(t *testing.T) { + testOutputCannonStepWithPreimage(t, config.AllocTypeMTCannon) +} + +func testOutputCannonStepWithPreimage(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) testPreimageStep := func(t *testing.T, preimageType utils.PreimageOpt, preloadPreimage bool) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, _ := StartFaultDisputeSystem(t, WithBlobBatches()) + sys, _ := StartFaultDisputeSystem(t, WithBlobBatches(), WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -299,14 +356,22 @@ func TestOutputCannonStepWithPreimage(t *testing.T) { }) } -func TestOutputCannonStepWithKZGPointEvaluation(t *testing.T) { +func TestOutputCannonStepWithKZGPointEvaluation_Standard(t *testing.T) { + testOutputCannonStepWithKzgPointEvaluation(t, config.AllocTypeStandard) +} + +func TestOutputCannonStepWithKZGPointEvaluation_Multithreaded(t *testing.T) { + testOutputCannonStepWithKzgPointEvaluation(t, config.AllocTypeMTCannon) +} + +func testOutputCannonStepWithKzgPointEvaluation(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) testPreimageStep := func(t *testing.T, preloadPreimage bool) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, _ := StartFaultDisputeSystem(t, WithEcotone()) + sys, _ := StartFaultDisputeSystem(t, WithEcotone(), WithAllocType(allocType)) t.Cleanup(sys.Close) // NOTE: Flake prevention @@ -347,7 +412,15 @@ func TestOutputCannonStepWithKZGPointEvaluation(t *testing.T) { }) } -func TestOutputCannonProposedOutputRootValid(t *testing.T) { +func TestOutputCannonProposedOutputRootValid_Standard(t *testing.T) { + testOutputCannonProposedOutputRootValid(t, config.AllocTypeStandard) +} + +func TestOutputCannonProposedOutputRootValid_Multithreaded(t *testing.T) { + testOutputCannonProposedOutputRootValid(t, config.AllocTypeMTCannon) +} + +func testOutputCannonProposedOutputRootValid(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) // honestStepsFail attempts to perform both an attack and defend step using the correct trace. honestStepsFail := func(ctx context.Context, game *disputegame.OutputCannonGameHelper, correctTrace *disputegame.OutputHonestHelper, parentClaimIdx int64) { @@ -406,7 +479,7 @@ func TestOutputCannonProposedOutputRootValid(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -436,11 +509,19 @@ func TestOutputCannonProposedOutputRootValid(t *testing.T) { } } -func TestOutputCannonPoisonedPostState(t *testing.T) { +func TestOutputCannonPoisonedPostState_Standard(t *testing.T) { + testOutputCannonPoisonedPostState(t, config.AllocTypeStandard) +} + +func TestOutputCannonPoisonedPostState_Multithreaded(t *testing.T) { + testOutputCannonPoisonedPostState(t, config.AllocTypeMTCannon) +} + +func testOutputCannonPoisonedPostState(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -500,11 +581,19 @@ func TestOutputCannonPoisonedPostState(t *testing.T) { game.WaitForGameStatus(ctx, gameTypes.GameStatusChallengerWon) } -func TestDisputeOutputRootBeyondProposedBlock_ValidOutputRoot(t *testing.T) { +func TestDisputeOutputRootBeyondProposedBlock_ValidOutputRoot_Standard(t *testing.T) { + testDisputeOutputRootBeyondProposedBlockValidOutputRoot(t, config.AllocTypeStandard) +} + +func TestDisputeOutputRootBeyondProposedBlock_ValidOutputRoot_Multithreaded(t *testing.T) { + testDisputeOutputRootBeyondProposedBlockValidOutputRoot(t, config.AllocTypeMTCannon) +} + +func testDisputeOutputRootBeyondProposedBlockValidOutputRoot(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -550,11 +639,19 @@ func TestDisputeOutputRootBeyondProposedBlock_ValidOutputRoot(t *testing.T) { game.LogGameData(ctx) } -func TestDisputeOutputRootBeyondProposedBlock_InvalidOutputRoot(t *testing.T) { +func TestDisputeOutputRootBeyondProposedBlock_InvalidOutputRoot_Standard(t *testing.T) { + testDisputeOutputRootBeyondProposedBlockInvalidOutputRoot(t, config.AllocTypeStandard) +} + +func TestDisputeOutputRootBeyondProposedBlock_InvalidOutputRoot_Multithreaded(t *testing.T) { + testDisputeOutputRootBeyondProposedBlockInvalidOutputRoot(t, config.AllocTypeMTCannon) +} + +func testDisputeOutputRootBeyondProposedBlockInvalidOutputRoot(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -601,11 +698,19 @@ func TestDisputeOutputRootBeyondProposedBlock_InvalidOutputRoot(t *testing.T) { game.LogGameData(ctx) } -func TestDisputeOutputRoot_ChangeClaimedOutputRoot(t *testing.T) { +func TestTestDisputeOutputRoot_ChangeClaimedOutputRoot_Standard(t *testing.T) { + testTestDisputeOutputRootChangeClaimedOutputRoot(t, config.AllocTypeStandard) +} + +func TestTestDisputeOutputRoot_ChangeClaimedOutputRoot_Multithreaded(t *testing.T) { + testTestDisputeOutputRootChangeClaimedOutputRoot(t, config.AllocTypeMTCannon) +} + +func testTestDisputeOutputRootChangeClaimedOutputRoot(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) @@ -661,7 +766,15 @@ func TestDisputeOutputRoot_ChangeClaimedOutputRoot(t *testing.T) { game.LogGameData(ctx) } -func TestInvalidateUnsafeProposal(t *testing.T) { +func TestInvalidateUnsafeProposal_Standard(t *testing.T) { + testInvalidateUnsafeProposal(t, config.AllocTypeStandard) +} + +func TestInvalidateUnsafeProposal_Multithreaded(t *testing.T) { + testInvalidateUnsafeProposal(t, config.AllocTypeMTCannon) +} + +func testInvalidateUnsafeProposal(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() @@ -693,7 +806,7 @@ func TestInvalidateUnsafeProposal(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) - sys, l1Client := StartFaultDisputeSystem(t, WithSequencerWindowSize(100000), WithBatcherStopped()) + sys, l1Client := StartFaultDisputeSystem(t, WithSequencerWindowSize(100000), WithBatcherStopped(), WithAllocType(allocType)) t.Cleanup(sys.Close) blockNum := uint64(1) @@ -723,7 +836,15 @@ func TestInvalidateUnsafeProposal(t *testing.T) { } } -func TestInvalidateProposalForFutureBlock(t *testing.T) { +func TestInvalidateProposalForFutureBlock_Standard(t *testing.T) { + testInvalidateProposalForFutureBlock(t, config.AllocTypeStandard) +} + +func TestInvalidateProposalForFutureBlock_Multithreaded(t *testing.T) { + testInvalidateProposalForFutureBlock(t, config.AllocTypeMTCannon) +} + +func testInvalidateProposalForFutureBlock(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() @@ -755,7 +876,7 @@ func TestInvalidateProposalForFutureBlock(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) - sys, l1Client := StartFaultDisputeSystem(t, WithSequencerWindowSize(100000)) + sys, l1Client := StartFaultDisputeSystem(t, WithSequencerWindowSize(100000), WithAllocType(allocType)) t.Cleanup(sys.Close) farFutureBlockNum := uint64(10_000_000) @@ -785,11 +906,19 @@ func TestInvalidateProposalForFutureBlock(t *testing.T) { } } -func TestInvalidateCorrectProposalFutureBlock(t *testing.T) { +func TestInvalidateCorrectProposalFutureBlock_Standard(t *testing.T) { + testInvalidateCorrectProposalFutureBlock(t, config.AllocTypeStandard) +} + +func TestInvalidateCorrectProposalFutureBlock_Multithreaded(t *testing.T) { + testInvalidateCorrectProposalFutureBlock(t, config.AllocTypeMTCannon) +} + +func testInvalidateCorrectProposalFutureBlock(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() // Spin up the system without the batcher so the safe head doesn't advance - sys, l1Client := StartFaultDisputeSystem(t, WithBatcherStopped(), WithSequencerWindowSize(100000)) + sys, l1Client := StartFaultDisputeSystem(t, WithBatcherStopped(), WithSequencerWindowSize(100000), WithAllocType(allocType)) t.Cleanup(sys.Close) // Create a dispute game factory helper. @@ -817,11 +946,19 @@ func TestInvalidateCorrectProposalFutureBlock(t *testing.T) { game.LogGameData(ctx) } -func TestOutputCannonHonestSafeTraceExtension_ValidRoot(t *testing.T) { +func TestOutputCannonHonestSafeTraceExtension_ValidRoot_Standard(t *testing.T) { + testOutputCannonHonestSafeTraceExtensionValidRoot(t, config.AllocTypeStandard) +} + +func TestOutputCannonHonestSafeTraceExtension_ValidRoot_Multithreaded(t *testing.T) { + testOutputCannonHonestSafeTraceExtensionValidRoot(t, config.AllocTypeMTCannon) +} + +func testOutputCannonHonestSafeTraceExtensionValidRoot(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) // Wait for there to be there are safe L2 blocks past the claimed safe head that have data available on L1 within @@ -871,11 +1008,19 @@ func TestOutputCannonHonestSafeTraceExtension_ValidRoot(t *testing.T) { require.EqualValues(t, gameTypes.GameStatusDefenderWon, game.Status(ctx)) } -func TestOutputCannonHonestSafeTraceExtension_InvalidRoot(t *testing.T) { +func TestOutputCannonHonestSafeTraceExtension_InvalidRoot_Standard(t *testing.T) { + testOutputCannonHonestSafeTraceExtensionInvalidRoot(t, config.AllocTypeStandard) +} + +func TestOutputCannonHonestSafeTraceExtension_InvalidRoot_Multithreaded(t *testing.T) { + testOutputCannonHonestSafeTraceExtensionInvalidRoot(t, config.AllocTypeMTCannon) +} + +func testOutputCannonHonestSafeTraceExtensionInvalidRoot(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, l1Client := StartFaultDisputeSystem(t) + sys, l1Client := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) // Wait for there to be there are safe L2 blocks past the claimed safe head that have data available on L1 within @@ -912,11 +1057,19 @@ func TestOutputCannonHonestSafeTraceExtension_InvalidRoot(t *testing.T) { require.EqualValues(t, gameTypes.GameStatusChallengerWon, game.Status(ctx)) } -func TestAgreeFirstBlockWithOriginOf1(t *testing.T) { +func TestAgreeFirstBlockWithOriginOf1_Standard(t *testing.T) { + testAgreeFirstBlockWithOriginOf1(t, config.AllocTypeStandard) +} + +func TestAgreeFirstBlockWithOriginOf1_Multithreaded(t *testing.T) { + testAgreeFirstBlockWithOriginOf1(t, config.AllocTypeMTCannon) +} + +func testAgreeFirstBlockWithOriginOf1(t *testing.T, allocType config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() - sys, _ := StartFaultDisputeSystem(t) + sys, _ := StartFaultDisputeSystem(t, WithAllocType(allocType)) t.Cleanup(sys.Close) rollupClient := sys.RollupClient("sequencer") diff --git a/op-e2e/faultproofs/permissioned_test.go b/op-e2e/faultproofs/permissioned_test.go index 98e4e2d9fc90..f6b0920b107b 100644 --- a/op-e2e/faultproofs/permissioned_test.go +++ b/op-e2e/faultproofs/permissioned_test.go @@ -4,8 +4,6 @@ import ( "context" "testing" - "github.com/ethereum-optimism/optimism/op-e2e/config" - op_e2e "github.com/ethereum-optimism/optimism/op-e2e" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" @@ -28,7 +26,7 @@ func TestPermissionedGameType(t *testing.T) { gameFactory.StartChallenger(ctx, "TowerDefense", challenger.WithValidPrestateRequired(), challenger.WithInvalidCannonPrestate(), - challenger.WithPermissioned(t, sys.RollupConfig, sys.L2GenesisCfg, config.AllocTypeFromEnv()), + challenger.WithPermissioned(t, sys), challenger.WithPrivKey(sys.Cfg.Secrets.Alice), ) diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index 78fcff01fc49..7fa37158fd16 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -7,9 +7,8 @@ import ( "path/filepath" "testing" - e2econfig "github.com/ethereum-optimism/optimism/op-e2e/config" - op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + e2e_config "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" @@ -31,7 +30,15 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testlog" ) -func TestPrecompiles(t *testing.T) { +func TestPrecompiles_Standard(t *testing.T) { + testPrecompiles(t, e2e_config.AllocTypeStandard) +} + +func TestPrecompiles_Multithreaded(t *testing.T) { + testPrecompiles(t, e2e_config.AllocTypeMTCannon) +} + +func testPrecompiles(t *testing.T, allocType e2e_config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) // precompile test vectors copied from go-ethereum tests := []struct { @@ -81,6 +88,7 @@ func TestPrecompiles(t *testing.T) { ctx := context.Background() genesisTime := hexutil.Uint64(0) cfg := e2esys.EcotoneSystemConfig(t, &genesisTime) + cfg.AllocType = allocType // We don't need a verifier - just the sequencer is enough delete(cfg.Nodes, "verifier") // Use a small sequencer window size to avoid test timeout while waiting for empty blocks @@ -141,7 +149,7 @@ func TestPrecompiles(t *testing.T) { t.Skipf("%v is not accelerated so no preimgae to upload", test.name) } ctx := context.Background() - sys, _ := StartFaultDisputeSystem(t, WithBlobBatches()) + sys, _ := StartFaultDisputeSystem(t, WithBlobBatches(), WithAllocType(allocType)) l2Seq := sys.NodeClient("sequencer") aliceKey := sys.Cfg.Secrets.Alice @@ -175,11 +183,20 @@ func TestPrecompiles(t *testing.T) { } } -func TestGranitePrecompiles(t *testing.T) { +func TestGranitePrecompiles_Standard(t *testing.T) { + testGranitePrecompiles(t, e2e_config.AllocTypeStandard) +} + +func TestGranitePrecompiles_Multithreaded(t *testing.T) { + testGranitePrecompiles(t, e2e_config.AllocTypeMTCannon) +} + +func testGranitePrecompiles(t *testing.T, allocType e2e_config.AllocType) { op_e2e.InitParallel(t, op_e2e.UsesCannon) ctx := context.Background() genesisTime := hexutil.Uint64(0) cfg := e2esys.GraniteSystemConfig(t, &genesisTime) + cfg.AllocType = allocType // We don't need a verifier - just the sequencer is enough delete(cfg.Nodes, "verifier") // Use a small sequencer window size to avoid test timeout while waiting for empty blocks @@ -252,7 +269,7 @@ func runCannon(t *testing.T, ctx context.Context, sys *e2esys.System, inputs uti l1Beacon := sys.L1BeaconEndpoint().RestHTTP() rollupEndpoint := sys.RollupEndpoint("sequencer").RPC() l2Endpoint := sys.NodeEndpoint("sequencer").RPC() - cannonOpts := challenger.WithCannon(t, sys.RollupCfg(), sys.L2Genesis(), e2econfig.AllocTypeFromEnv()) + cannonOpts := challenger.WithCannon(t, sys) dir := t.TempDir() proofsDir := filepath.Join(dir, "cannon-proofs") cfg := config.NewConfig(common.Address{}, l1Endpoint, l1Beacon, rollupEndpoint, l2Endpoint, dir) diff --git a/op-e2e/faultproofs/util.go b/op-e2e/faultproofs/util.go index bbe20b3cde16..66b9be0060e5 100644 --- a/op-e2e/faultproofs/util.go +++ b/op-e2e/faultproofs/util.go @@ -50,8 +50,14 @@ func WithSequencerWindowSize(size uint64) faultDisputeConfigOpts { } } +func WithAllocType(allocType config.AllocType) faultDisputeConfigOpts { + return func(cfg *e2esys.SystemConfig) { + cfg.AllocType = allocType + } +} + func StartFaultDisputeSystem(t *testing.T, opts ...faultDisputeConfigOpts) (*e2esys.System, *ethclient.Client) { - cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(config.AllocTypeFromEnv())) + cfg := e2esys.DefaultSystemConfig(t) delete(cfg.Nodes, "verifier") cfg.Nodes["sequencer"].SafeDBPath = t.TempDir() cfg.DeployConfig.SequencerWindowSize = 4 diff --git a/op-e2e/system/e2esys/setup.go b/op-e2e/system/e2esys/setup.go index d461cda61657..ad1969eeed8f 100644 --- a/op-e2e/system/e2esys/setup.go +++ b/op-e2e/system/e2esys/setup.go @@ -404,6 +404,10 @@ func (sys *System) L2Genesis() *core.Genesis { return sys.L2GenesisCfg } +func (sys *System) AllocType() config.AllocType { + return sys.Cfg.AllocType +} + func (sys *System) L1Slot(l1Timestamp uint64) uint64 { return (l1Timestamp - uint64(sys.Cfg.DeployConfig.L1GenesisBlockTimestamp)) / sys.Cfg.DeployConfig.L1BlockTime From a12738b7b17f5b2cc00d45c0545866f0b8cae5d4 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Wed, 2 Oct 2024 11:05:57 -0400 Subject: [PATCH 108/116] feat: deployImplementations and depImplementationsInterop (#12226) * feat: Use DeploySuperchain script in Deploy.s.sol Demonstrate that build breaks when using high level syntax * fix: Cannot set null protocol versions error * feat: Also save impls * fix: semver lock * fix: bump ProtocolVersions semver * feat: Add superchainProxyAdmin * feat: Undo removeing ProtocolVersion type from interface * fix: semver-lock * feat: remove setupOpChainAdmin * fix: transfer ProxyAdmin ownership after all setup is complete * feat: separate deployImplementations * feat: split up deployImplementations and deployImplementationsInterop * fix: lint --- .../scripts/deploy/Deploy.s.sol | 137 +++++++++++------- 1 file changed, 83 insertions(+), 54 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 2843b60be5d7..b625bdc31d59 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -269,8 +269,12 @@ contract Deploy is Deployer { setupSuperchain(); console.log("set up superchain!"); } - - setupOpChainAdmin(); + if (cfg.useInterop()) { + deployImplementationsInterop(); + } else { + deployImplementations(); + } + setupOpChain(); if (cfg.useAltDA()) { bytes32 typeHash = keccak256(bytes(cfg.daCommitmentType())); bytes32 keccakHash = keccak256(bytes("KeccakCommitment")); @@ -278,8 +282,7 @@ contract Deploy is Deployer { setupOpAltDA(); } } - - setupOpChain(); + transferProxyAdminOwnership({ _isSuperchain: false }); console.log("set up op chain!"); } @@ -287,12 +290,6 @@ contract Deploy is Deployer { // High Level Deployment Functions // //////////////////////////////////////////////////////////////// - /// @notice Deploy the address manager and proxy admin contracts. - function setupOpChainAdmin() public { - deployAddressManager(); - deployProxyAdmin({ _isSuperchain: false }); - } - /// @notice Deploy a full system with a new SuperchainConfig /// The Superchain system has 2 singleton contracts which lie outside of an OP Chain: /// 1. The SuperchainConfig contract @@ -324,14 +321,14 @@ contract Deploy is Deployer { /// @notice Deploy a new OP Chain, with an existing SuperchainConfig provided function setupOpChain() public { console.log("Deploying OP Chain"); + deployAddressManager(); + deployProxyAdmin({ _isSuperchain: false }); // Ensure that the requisite contracts are deployed mustGetAddress("SuperchainConfigProxy"); mustGetAddress("AddressManager"); mustGetAddress("ProxyAdmin"); - deployImplementations(); - deployOpChain(); initializeOpChain(); @@ -342,7 +339,6 @@ contract Deploy is Deployer { transferDisputeGameFactoryOwnership(); transferDelayedWETHOwnership(); - transferProxyAdminOwnership({ _isSuperchain: false }); } /// @notice Deploy all of the OP Chain specific contracts @@ -378,8 +374,9 @@ contract Deploy is Deployer { deploySystemConfig(); deployL1StandardBridge(); deployL1ERC721Bridge(); - deployOptimismPortal(); + deployOptimismPortal(); // todo: pull this out into an override option after DeployImplementations runs deployL2OutputOracle(); + // Fault proofs deployOptimismPortal2(); deployDisputeGameFactory(); @@ -388,6 +385,24 @@ contract Deploy is Deployer { deployMips(); } + /// @notice Deploy all of the implementations + function deployImplementationsInterop() public { + console.log("Deploying implementations"); + deployL1CrossDomainMessenger(); + deployOptimismMintableERC20Factory(); + deploySystemConfigInterop(); + deployL1StandardBridge(); + deployL1ERC721Bridge(); + deployL2OutputOracle(); + + // Fault proofs + deployOptimismPortalInterop(); + deployDisputeGameFactory(); + deployDelayedWETH(); + deployPreimageOracle(); + deployMips(); + } + /// @notice Initialize all of the proxies in an OP Chain by upgrading to the correct proxy and calling the /// initialize function function initializeOpChain() public { @@ -633,32 +648,45 @@ contract Deploy is Deployer { uint32(cfg.respectedGameType()) == cfg.respectedGameType(), "Deploy: respectedGameType must fit into uint32" ); - if (cfg.useInterop()) { - addr_ = DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "OptimismPortalInterop", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IOptimismPortalInterop.__constructor__, - (cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) - ) + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "OptimismPortal2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOptimismPortal2.__constructor__, + (cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) ) - }); - save("OptimismPortal2", addr_); - } else { - addr_ = DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "OptimismPortal2", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IOptimismPortal2.__constructor__, - (cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) - ) + ) + }); + + // Override the `OptimismPortal2` contract to the deployed implementation. This is necessary + // to check the `OptimismPortal2` implementation alongside dependent contracts, which + // are always proxies. + Types.ContractSet memory contracts = _proxiesUnstrict(); + contracts.OptimismPortal2 = addr_; + ChainAssertions.checkOptimismPortal2({ _contracts: contracts, _cfg: cfg, _isProxy: false }); + } + + /// @notice Deploy the OptimismPortalInterop contract + function deployOptimismPortalInterop() public broadcast returns (address addr_) { + // Could also verify this inside DeployConfig but doing it here is a bit more reliable. + require( + uint32(cfg.respectedGameType()) == cfg.respectedGameType(), "Deploy: respectedGameType must fit into uint32" + ); + + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "OptimismPortalInterop", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOptimismPortalInterop.__constructor__, + (cfg.proofMaturityDelaySeconds(), cfg.disputeGameFinalityDelaySeconds()) ) - }); - } + ) + }); + save("OptimismPortal2", addr_); // Override the `OptimismPortal2` contract to the deployed implementation. This is necessary // to check the `OptimismPortal2` implementation alongside dependent contracts, which @@ -814,22 +842,23 @@ contract Deploy is Deployer { /// @notice Deploy the SystemConfig function deploySystemConfig() public broadcast returns (address addr_) { - if (cfg.useInterop()) { - addr_ = DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "SystemConfigInterop", - _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) - }); - save("SystemConfig", addr_); - } else { - addr_ = DeployUtils.create2AndSave({ - _save: this, - _salt: _implSalt(), - _name: "SystemConfig", - _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) - }); - } + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "SystemConfig", + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) + }); + } + + /// @notice Deploy the SystemConfigInterop contract + function deploySystemConfigInterop() public broadcast returns (address addr_) { + addr_ = DeployUtils.create2AndSave({ + _save: this, + _salt: _implSalt(), + _name: "SystemConfigInterop", + _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) + }); + save("SystemConfig", addr_); // Override the `SystemConfig` contract to the deployed implementation. This is necessary // to check the `SystemConfig` implementation alongside dependent contracts, which From 40a70bda5e5e34c63a05678387264caccb33a3fb Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Wed, 2 Oct 2024 10:44:22 -0500 Subject: [PATCH 109/116] Eth: BlockRef (#12251) --- op-service/eth/id.go | 4 ++++ op-supervisor/supervisor/backend/db/db.go | 2 +- .../supervisor/backend/safety/safety.go | 24 +++++++++---------- .../supervisor/backend/safety/views.go | 4 ++-- .../backend/source/chain_processor.go | 10 ++++---- .../backend/source/log_processor.go | 6 ++--- .../backend/source/log_processor_test.go | 4 ++-- 7 files changed, 29 insertions(+), 25 deletions(-) diff --git a/op-service/eth/id.go b/op-service/eth/id.go index 7beeabfe329c..c323d1e69b9a 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -85,6 +85,10 @@ func (id L1BlockRef) ParentID() BlockID { } } +// BlockRef is a Block Ref indepdendent of L1 or L2 +// Because L1BlockRefs are strict subsets of L2BlockRefs, BlockRef is a direct alias of L1BlockRef +type BlockRef = L1BlockRef + func (id L2BlockRef) ID() BlockID { return BlockID{ Hash: id.Hash, diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index 8459266c0704..c4f8296d1ce0 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -167,7 +167,7 @@ func (db *ChainsDB) AddLog( func (db *ChainsDB) SealBlock( chain types.ChainID, - block eth.L2BlockRef) error { + block eth.BlockRef) error { logDB, ok := db.logDBs[chain] if !ok { return fmt.Errorf("%w: %v", ErrUnknownChain, chain) diff --git a/op-supervisor/supervisor/backend/safety/safety.go b/op-supervisor/supervisor/backend/safety/safety.go index c7828336ba57..326c72755e35 100644 --- a/op-supervisor/supervisor/backend/safety/safety.go +++ b/op-supervisor/supervisor/backend/safety/safety.go @@ -14,9 +14,9 @@ import ( type SafetyIndex interface { // Updaters for the latest local safety status of each chain - UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRef) error - UpdateLocalSafe(chainID types.ChainID, at eth.L1BlockRef, ref eth.L2BlockRef) error - UpdateFinalizeL1(ref eth.L1BlockRef) error + UpdateLocalUnsafe(chainID types.ChainID, ref eth.BlockRef) error + UpdateLocalSafe(chainID types.ChainID, at eth.BlockRef, ref eth.BlockRef) error + UpdateFinalizeL1(ref eth.BlockRef) error // Getters for the latest safety status of each chain UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) @@ -42,10 +42,10 @@ type safetyIndex struct { finalized map[types.ChainID]eth.BlockID // remember what each non-finalized L2 block is derived from - derivedFrom map[types.ChainID]map[common.Hash]eth.L1BlockRef + derivedFrom map[types.ChainID]map[common.Hash]eth.BlockRef // the last received L1 finality signal. - finalizedL1 eth.L1BlockRef + finalizedL1 eth.BlockRef } func NewSafetyIndex(log log.Logger, chains ChainsDBClient) *safetyIndex { @@ -55,12 +55,12 @@ func NewSafetyIndex(log log.Logger, chains ChainsDBClient) *safetyIndex { unsafe: make(map[types.ChainID]*View), safe: make(map[types.ChainID]*View), finalized: make(map[types.ChainID]eth.BlockID), - derivedFrom: make(map[types.ChainID]map[common.Hash]eth.L1BlockRef), + derivedFrom: make(map[types.ChainID]map[common.Hash]eth.BlockRef), } } // UpdateLocalUnsafe updates the local-unsafe view for the given chain, and advances the cross-unsafe status. -func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRef) error { +func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.BlockRef) error { view, ok := r.safe[chainID] if !ok { iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) @@ -76,11 +76,11 @@ func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.L2BlockRe LastSealedTimestamp: ref.Time, LogsSince: 0, }, - localDerivedFrom: eth.L1BlockRef{}, + localDerivedFrom: eth.BlockRef{}, validWithinView: r.ValidWithinUnsafeView, } r.unsafe[chainID] = view - } else if err := view.UpdateLocal(eth.L1BlockRef{}, ref); err != nil { + } else if err := view.UpdateLocal(eth.BlockRef{}, ref); err != nil { return fmt.Errorf("failed to update local-unsafe: %w", err) } local, _ := r.unsafe[chainID].Local() @@ -102,7 +102,7 @@ func (r *safetyIndex) advanceCrossUnsafe() { // UpdateLocalSafe updates the local-safe view for the given chain, and advances the cross-safe status. func (r *safetyIndex) UpdateLocalSafe( - chainID types.ChainID, at eth.L1BlockRef, ref eth.L2BlockRef) error { + chainID types.ChainID, at eth.BlockRef, ref eth.BlockRef) error { view, ok := r.safe[chainID] if !ok { iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0) @@ -129,7 +129,7 @@ func (r *safetyIndex) UpdateLocalSafe( // register what this L2 block is derived from m, ok := r.derivedFrom[chainID] if !ok { - m = make(map[common.Hash]eth.L1BlockRef) + m = make(map[common.Hash]eth.BlockRef) r.derivedFrom[chainID] = m } m[ref.Hash] = at @@ -152,7 +152,7 @@ func (r *safetyIndex) advanceCrossSafe() { } // UpdateFinalizeL1 updates the finalized L1 block, and advances the finalized safety status. -func (r *safetyIndex) UpdateFinalizeL1(ref eth.L1BlockRef) error { +func (r *safetyIndex) UpdateFinalizeL1(ref eth.BlockRef) error { if ref.Number <= r.finalizedL1.Number { return fmt.Errorf("ignoring old L1 finality signal of %s, already have %s", ref, r.finalizedL1) } diff --git a/op-supervisor/supervisor/backend/safety/views.go b/op-supervisor/supervisor/backend/safety/views.go index 98941dd7e6e9..e1c704fa260f 100644 --- a/op-supervisor/supervisor/backend/safety/views.go +++ b/op-supervisor/supervisor/backend/safety/views.go @@ -15,7 +15,7 @@ type View struct { iter logs.Iterator localView heads.HeadPointer - localDerivedFrom eth.L1BlockRef + localDerivedFrom eth.BlockRef validWithinView func(l1View uint64, execMsg *types.ExecutingMessage) error } @@ -31,7 +31,7 @@ func (vi *View) Local() (heads.HeadPointer, error) { return vi.localView, nil } -func (vi *View) UpdateLocal(at eth.L1BlockRef, ref eth.L2BlockRef) error { +func (vi *View) UpdateLocal(at eth.BlockRef, ref eth.BlockRef) error { vi.localView = heads.HeadPointer{ LastSealedBlockHash: ref.Hash, LastSealedBlockNum: ref.Number, diff --git a/op-supervisor/supervisor/backend/source/chain_processor.go b/op-supervisor/supervisor/backend/source/chain_processor.go index 60568fe296fb..9c63950a1629 100644 --- a/op-supervisor/supervisor/backend/source/chain_processor.go +++ b/op-supervisor/supervisor/backend/source/chain_processor.go @@ -21,7 +21,7 @@ type Source interface { } type LogProcessor interface { - ProcessLogs(ctx context.Context, block eth.L2BlockRef, receipts gethtypes.Receipts) error + ProcessLogs(ctx context.Context, block eth.BlockRef, receipts gethtypes.Receipts) error } type DatabaseRewinder interface { @@ -29,9 +29,9 @@ type DatabaseRewinder interface { LatestBlockNum(chain types.ChainID) (num uint64, ok bool) } -type BlockProcessorFn func(ctx context.Context, block eth.L1BlockRef) error +type BlockProcessorFn func(ctx context.Context, block eth.BlockRef) error -func (fn BlockProcessorFn) ProcessBlock(ctx context.Context, block eth.L1BlockRef) error { +func (fn BlockProcessorFn) ProcessBlock(ctx context.Context, block eth.BlockRef) error { return fn(ctx, block) } @@ -131,7 +131,7 @@ func (s *ChainProcessor) worker() { func (s *ChainProcessor) update(nextNum uint64) error { ctx, cancel := context.WithTimeout(s.ctx, time.Second*10) nextL1, err := s.client.L1BlockRefByNumber(ctx, nextNum) - next := eth.L2BlockRef{ + next := eth.BlockRef{ Hash: nextL1.Hash, ParentHash: nextL1.ParentHash, Number: nextL1.Number, @@ -166,7 +166,7 @@ func (s *ChainProcessor) update(nextNum uint64) error { return nil } -func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.L1BlockRef) error { +func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.BlockRef) error { // update the latest target s.lastHead.Store(head.Number) // signal that we have something to process diff --git a/op-supervisor/supervisor/backend/source/log_processor.go b/op-supervisor/supervisor/backend/source/log_processor.go index 8a815f7ca9e9..d7f7e1fbeae0 100644 --- a/op-supervisor/supervisor/backend/source/log_processor.go +++ b/op-supervisor/supervisor/backend/source/log_processor.go @@ -15,12 +15,12 @@ import ( ) type LogStorage interface { - SealBlock(chain types.ChainID, block eth.L2BlockRef) error + SealBlock(chain types.ChainID, block eth.BlockRef) error AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error } type ChainsDBClientForLogProcessor interface { - SealBlock(chain types.ChainID, block eth.L2BlockRef) error + SealBlock(chain types.ChainID, block eth.BlockRef) error AddLog(chain types.ChainID, logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error } @@ -44,7 +44,7 @@ func newLogProcessor(chain types.ChainID, logStore LogStorage) *logProcessor { // ProcessLogs processes logs from a block and stores them in the log storage // for any logs that are related to executing messages, they are decoded and stored -func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L2BlockRef, rcpts ethTypes.Receipts) error { +func (p *logProcessor) ProcessLogs(_ context.Context, block eth.BlockRef, rcpts ethTypes.Receipts) error { for _, rcpt := range rcpts { for _, l := range rcpt.Logs { // log hash represents the hash of *this* log as a potentially initiating message diff --git a/op-supervisor/supervisor/backend/source/log_processor_test.go b/op-supervisor/supervisor/backend/source/log_processor_test.go index 6e96d731fcff..2e1322f55aed 100644 --- a/op-supervisor/supervisor/backend/source/log_processor_test.go +++ b/op-supervisor/supervisor/backend/source/log_processor_test.go @@ -17,7 +17,7 @@ var logProcessorChainID = types.ChainIDFromUInt64(4) func TestLogProcessor(t *testing.T) { ctx := context.Background() - block1 := eth.L2BlockRef{ + block1 := eth.BlockRef{ ParentHash: common.Hash{0x42}, Number: 100, Hash: common.Hash{0x11}, @@ -205,7 +205,7 @@ type stubLogStorage struct { seals []storedSeal } -func (s *stubLogStorage) SealBlock(chainID types.ChainID, block eth.L2BlockRef) error { +func (s *stubLogStorage) SealBlock(chainID types.ChainID, block eth.BlockRef) error { if logProcessorChainID != chainID { return fmt.Errorf("chain id mismatch, expected %v but got %v", logProcessorChainID, chainID) } From 28283a927e3124fa0b2cf8d47d1a734e95478215 Mon Sep 17 00:00:00 2001 From: Blaine Malone Date: Wed, 2 Oct 2024 11:46:39 -0400 Subject: [PATCH 110/116] Configurable Dispute Game Parameters (#12228) * Configurable Dispute Game Parameters * fix: more assertions for DeployOPChain.t.sol. * fix: running pre-pr command. * fix: changing dispute game types. * fix: semver lock file change. * fix: uint64 for maxGameDepth and splitDepth on go side. * fix: safe casting in solidity --- op-chain-ops/deployer/opcm/opchain.go | 45 ++++++++++----- op-chain-ops/deployer/pipeline/opchain.go | 30 ++++++---- op-chain-ops/interopgen/configs.go | 12 +++- op-chain-ops/interopgen/deploy.go | 30 ++++++---- op-chain-ops/interopgen/recipe.go | 12 +++- .../scripts/DeployOPChain.s.sol | 57 ++++++++++++++++++- packages/contracts-bedrock/semver-lock.json | 4 +- .../snapshots/abi/OPContractsManager.json | 30 ++++++++++ .../abi/OPContractsManagerInterop.json | 30 ++++++++++ .../src/L1/OPContractsManager.sol | 23 +++++--- .../test/L1/OPContractsManager.t.sol | 15 ++++- .../test/opcm/DeployOPChain.t.sol | 21 ++++++- 12 files changed, 252 insertions(+), 57 deletions(-) diff --git a/op-chain-ops/deployer/opcm/opchain.go b/op-chain-ops/deployer/opcm/opchain.go index bf28ed0d4efd..7a750f72fb0d 100644 --- a/op-chain-ops/deployer/opcm/opchain.go +++ b/op-chain-ops/deployer/opcm/opchain.go @@ -36,6 +36,13 @@ type DeployOPChainInput struct { OpcmProxy common.Address SaltMixer string GasLimit uint64 + + DisputeGameType uint32 + DisputeAbsolutePrestate common.Hash + DisputeMaxGameDepth uint64 + DisputeSplitDepth uint64 + DisputeClockExtension uint64 + DisputeMaxClockDuration uint64 } func (input *DeployOPChainInput) InputSet() bool { @@ -119,13 +126,19 @@ type opcmRoles struct { // opcmDeployInput is the input struct for the deploy method of the OPStackManager contract. We // define a separate struct here to match what the OPSM contract expects. type opcmDeployInput struct { - Roles opcmRoles - BasefeeScalar uint32 - BlobBasefeeScalar uint32 - L2ChainId *big.Int - StartingAnchorRoots []byte - SaltMixer string - GasLimit uint64 + Roles opcmRoles + BasefeeScalar uint32 + BlobBasefeeScalar uint32 + L2ChainId *big.Int + StartingAnchorRoots []byte + SaltMixer string + GasLimit uint64 + DisputeGameType uint32 + DisputeAbsolutePrestate common.Hash + DisputeMaxGameDepth *big.Int + DisputeSplitDepth *big.Int + DisputeClockExtension uint64 + DisputeMaxClockDuration uint64 } // decodeOutputABIJSON defines an ABI for a fake method called "decodeOutput" that returns the @@ -240,12 +253,18 @@ func DeployOPChainRaw( Proposer: input.Proposer, Challenger: input.Challenger, }, - BasefeeScalar: input.BasefeeScalar, - BlobBasefeeScalar: input.BlobBaseFeeScalar, - L2ChainId: input.L2ChainId, - StartingAnchorRoots: input.StartingAnchorRoots(), - SaltMixer: input.SaltMixer, - GasLimit: input.GasLimit, + BasefeeScalar: input.BasefeeScalar, + BlobBasefeeScalar: input.BlobBaseFeeScalar, + L2ChainId: input.L2ChainId, + StartingAnchorRoots: input.StartingAnchorRoots(), + SaltMixer: input.SaltMixer, + GasLimit: input.GasLimit, + DisputeGameType: input.DisputeGameType, + DisputeAbsolutePrestate: input.DisputeAbsolutePrestate, + DisputeMaxGameDepth: new(big.Int).SetUint64(input.DisputeMaxGameDepth), + DisputeSplitDepth: new(big.Int).SetUint64(input.DisputeSplitDepth), + DisputeClockExtension: input.DisputeClockExtension, + DisputeMaxClockDuration: input.DisputeMaxClockDuration, }) if err != nil { return out, fmt.Errorf("failed to pack deploy input: %w", err) diff --git a/op-chain-ops/deployer/pipeline/opchain.go b/op-chain-ops/deployer/pipeline/opchain.go index 9117cddb2673..c97f162e9401 100644 --- a/op-chain-ops/deployer/pipeline/opchain.go +++ b/op-chain-ops/deployer/pipeline/opchain.go @@ -29,18 +29,24 @@ func DeployOPChain(ctx context.Context, env *Env, artifactsFS foundry.StatDirFs, } input := opcm.DeployOPChainInput{ - OpChainProxyAdminOwner: thisIntent.Roles.ProxyAdminOwner, - SystemConfigOwner: thisIntent.Roles.SystemConfigOwner, - Batcher: thisIntent.Roles.Batcher, - UnsafeBlockSigner: thisIntent.Roles.UnsafeBlockSigner, - Proposer: thisIntent.Roles.Proposer, - Challenger: thisIntent.Roles.Challenger, - BasefeeScalar: 1368, - BlobBaseFeeScalar: 801949, - L2ChainId: chainID.Big(), - OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, - SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization - GasLimit: 30_000_000, // TODO: make this configurable + OpChainProxyAdminOwner: thisIntent.Roles.ProxyAdminOwner, + SystemConfigOwner: thisIntent.Roles.SystemConfigOwner, + Batcher: thisIntent.Roles.Batcher, + UnsafeBlockSigner: thisIntent.Roles.UnsafeBlockSigner, + Proposer: thisIntent.Roles.Proposer, + Challenger: thisIntent.Roles.Challenger, + BasefeeScalar: 1368, + BlobBaseFeeScalar: 801949, + L2ChainId: chainID.Big(), + OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, + SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization + GasLimit: 30_000_000, + DisputeGameType: 1, // PERMISSIONED_CANNON Game Type + DisputeAbsolutePrestate: common.HexToHash("0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"), + DisputeMaxGameDepth: 73, + DisputeSplitDepth: 30, + DisputeClockExtension: 10800, // 3 hours (input in seconds) + DisputeMaxClockDuration: 302400, // 3.5 days (input in seconds) } var dco opcm.DeployOPChainOutput diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index 6310b3c068ef..1008014c437a 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -75,9 +75,15 @@ type L2Config struct { Challenger common.Address SystemConfigOwner common.Address genesis.L2InitializationConfig - Prefund map[common.Address]*big.Int - SaltMixer string - GasLimit uint64 + Prefund map[common.Address]*big.Int + SaltMixer string + GasLimit uint64 + DisputeGameType uint32 + DisputeAbsolutePrestate common.Hash + DisputeMaxGameDepth uint64 + DisputeSplitDepth uint64 + DisputeClockExtension uint64 + DisputeMaxClockDuration uint64 } func (c *L2Config) Check(log log.Logger) error { diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 1bcef25ea0f7..7f238023572a 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -197,18 +197,24 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme l1Host.SetTxOrigin(cfg.Deployer) output, err := opcm.DeployOPChain(l1Host, opcm.DeployOPChainInput{ - OpChainProxyAdminOwner: cfg.ProxyAdminOwner, - SystemConfigOwner: cfg.SystemConfigOwner, - Batcher: cfg.BatchSenderAddress, - UnsafeBlockSigner: cfg.P2PSequencerAddress, - Proposer: cfg.Proposer, - Challenger: cfg.Challenger, - BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, - BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, - L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), - OpcmProxy: superDeployment.OpcmProxy, - SaltMixer: cfg.SaltMixer, - GasLimit: cfg.GasLimit, + OpChainProxyAdminOwner: cfg.ProxyAdminOwner, + SystemConfigOwner: cfg.SystemConfigOwner, + Batcher: cfg.BatchSenderAddress, + UnsafeBlockSigner: cfg.P2PSequencerAddress, + Proposer: cfg.Proposer, + Challenger: cfg.Challenger, + BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, + BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, + L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), + OpcmProxy: superDeployment.OpcmProxy, + SaltMixer: cfg.SaltMixer, + GasLimit: cfg.GasLimit, + DisputeGameType: cfg.DisputeGameType, + DisputeAbsolutePrestate: cfg.DisputeAbsolutePrestate, + DisputeMaxGameDepth: cfg.DisputeMaxGameDepth, + DisputeSplitDepth: cfg.DisputeSplitDepth, + DisputeClockExtension: cfg.DisputeClockExtension, + DisputeMaxClockDuration: cfg.DisputeMaxClockDuration, }) if err != nil { return nil, fmt.Errorf("failed to deploy L2 OP chain: %w", err) diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index dc27e9d7d100..2d62de5c05c6 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -249,9 +249,15 @@ func InteropL2DevConfig(l1ChainID, l2ChainID uint64, addrs devkeys.Addresses) (* UseAltDA: false, }, }, - Prefund: make(map[common.Address]*big.Int), - SaltMixer: "", - GasLimit: 30_000_000, + Prefund: make(map[common.Address]*big.Int), + SaltMixer: "", + GasLimit: 30_000_000, + DisputeGameType: 1, // PERMISSIONED_CANNON Game Type + DisputeAbsolutePrestate: common.HexToHash("0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"), + DisputeMaxGameDepth: 73, + DisputeSplitDepth: 30, + DisputeClockExtension: 10800, // 3 hours (input in seconds) + DisputeMaxClockDuration: 302400, // 3.5 days (input in seconds) } // TODO(#11887): consider making the number of prefunded keys configurable. diff --git a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol index f9cc5d5875f3..ea285c95f4b3 100644 --- a/packages/contracts-bedrock/scripts/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/DeployOPChain.s.sol @@ -24,7 +24,7 @@ import { IDisputeGameFactory } from "src/dispute/interfaces/IDisputeGameFactory. import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IFaultDisputeGame } from "src/dispute/interfaces/IFaultDisputeGame.sol"; import { IPermissionedDisputeGame } from "src/dispute/interfaces/IPermissionedDisputeGame.sol"; -import { Claim, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; +import { Claim, Duration, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; @@ -50,6 +50,14 @@ contract DeployOPChainInput is BaseDeployIO { string internal _saltMixer; uint64 internal _gasLimit; + // Configurable dispute game inputs + GameType internal _disputeGameType; + Claim internal _disputeAbsolutePrestate; + uint256 internal _disputeMaxGameDepth; + uint256 internal _disputeSplitDepth; + Duration internal _disputeClockExtension; + Duration internal _disputeMaxClockDuration; + function set(bytes4 _sel, address _addr) public { require(_addr != address(0), "DeployOPChainInput: cannot set zero address"); if (_sel == this.opChainProxyAdminOwner.selector) _opChainProxyAdminOwner = _addr; @@ -72,6 +80,16 @@ contract DeployOPChainInput is BaseDeployIO { _l2ChainId = _value; } else if (_sel == this.gasLimit.selector) { _gasLimit = SafeCast.toUint64(_value); + } else if (_sel == this.disputeGameType.selector) { + _disputeGameType = GameType.wrap(SafeCast.toUint32(_value)); + } else if (_sel == this.disputeMaxGameDepth.selector) { + _disputeMaxGameDepth = SafeCast.toUint64(_value); + } else if (_sel == this.disputeSplitDepth.selector) { + _disputeSplitDepth = SafeCast.toUint64(_value); + } else if (_sel == this.disputeClockExtension.selector) { + _disputeClockExtension = Duration.wrap(SafeCast.toUint64(_value)); + } else if (_sel == this.disputeMaxClockDuration.selector) { + _disputeMaxClockDuration = Duration.wrap(SafeCast.toUint64(_value)); } else { revert("DeployOPChainInput: unknown selector"); } @@ -83,6 +101,11 @@ contract DeployOPChainInput is BaseDeployIO { else revert("DeployOPChainInput: unknown selector"); } + function set(bytes4 _sel, bytes32 _value) public { + if (_sel == this.disputeAbsolutePrestate.selector) _disputeAbsolutePrestate = Claim.wrap(_value); + else revert("DeployImplementationsInput: unknown selector"); + } + function opChainProxyAdminOwner() public view returns (address) { require(_opChainProxyAdminOwner != address(0), "DeployOPChainInput: not set"); return _opChainProxyAdminOwner; @@ -163,6 +186,30 @@ contract DeployOPChainInput is BaseDeployIO { function gasLimit() public view returns (uint64) { return _gasLimit; } + + function disputeGameType() public view returns (GameType) { + return _disputeGameType; + } + + function disputeAbsolutePrestate() public view returns (Claim) { + return _disputeAbsolutePrestate; + } + + function disputeMaxGameDepth() public view returns (uint256) { + return _disputeMaxGameDepth; + } + + function disputeSplitDepth() public view returns (uint256) { + return _disputeSplitDepth; + } + + function disputeClockExtension() public view returns (Duration) { + return _disputeClockExtension; + } + + function disputeMaxClockDuration() public view returns (Duration) { + return _disputeMaxClockDuration; + } } contract DeployOPChainOutput is BaseDeployIO { @@ -522,7 +569,13 @@ contract DeployOPChain is Script { l2ChainId: _doi.l2ChainId(), startingAnchorRoots: _doi.startingAnchorRoots(), saltMixer: _doi.saltMixer(), - gasLimit: _doi.gasLimit() + gasLimit: _doi.gasLimit(), + disputeGameType: _doi.disputeGameType(), + disputeAbsolutePrestate: _doi.disputeAbsolutePrestate(), + disputeMaxGameDepth: _doi.disputeMaxGameDepth(), + disputeSplitDepth: _doi.disputeSplitDepth(), + disputeClockExtension: _doi.disputeClockExtension(), + disputeMaxClockDuration: _doi.disputeMaxClockDuration() }); vm.broadcast(msg.sender); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index cec1d51d210a..5a41aac33f47 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -32,8 +32,8 @@ "sourceCodeHash": "0xde4df0f9633dc0cdb1c9f634003ea5b0f7c5c1aebc407bc1b2f44c0ecf938649" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0x44fa611dcacad2f61c8ca7ef970e580800b5070d10f9a2a4c04459d6cf4cd180", - "sourceCodeHash": "0xe66886dd90cef90525f5ba2310c9e9d2d910c81c283f9b7cbfcd57c5091473c6" + "initCodeHash": "0xd58cb3978affc5c1457cdd498ff8420c90aef804d4c3b62cf42ab2691986d6d2", + "sourceCodeHash": "0x7bfa6eff76176649fe600303cd60009a0f6e282cbaec55836b5ea1f8875cbeb5" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0xbe2c0c81b3459014f287d8c89cdc0d27dde5d1f44e5d024fa1e4773ddc47c190", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 31b7cb368409..7c478feb235d 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -172,6 +172,36 @@ "internalType": "uint64", "name": "gasLimit", "type": "uint64" + }, + { + "internalType": "GameType", + "name": "disputeGameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "disputeAbsolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "disputeMaxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "disputeSplitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "disputeClockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "disputeMaxClockDuration", + "type": "uint64" } ], "internalType": "struct OPContractsManager.DeployInput", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 31b7cb368409..7c478feb235d 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -172,6 +172,36 @@ "internalType": "uint64", "name": "gasLimit", "type": "uint64" + }, + { + "internalType": "GameType", + "name": "disputeGameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "disputeAbsolutePrestate", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "disputeMaxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "disputeSplitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "disputeClockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "disputeMaxClockDuration", + "type": "uint64" } ], "internalType": "struct OPContractsManager.DeployInput", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 248c1b340e4a..4bf52ff228a1 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -60,6 +60,13 @@ contract OPContractsManager is ISemver, Initializable { // The salt mixer is used as part of making the resulting salt unique. string saltMixer; uint64 gasLimit; + // Configurable dispute game parameters. + GameType disputeGameType; + Claim disputeAbsolutePrestate; + uint256 disputeMaxGameDepth; + uint256 disputeSplitDepth; + Duration disputeClockExtension; + Duration disputeMaxClockDuration; } /// @notice The full set of outputs from deploying a new OP Stack chain. @@ -122,8 +129,8 @@ contract OPContractsManager is ISemver, Initializable { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.19 - string public constant version = "1.0.0-beta.19"; + /// @custom:semver 1.0.0-beta.20 + string public constant version = "1.0.0-beta.20"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -586,12 +593,12 @@ contract OPContractsManager is ISemver, Initializable { returns (bytes memory) { return abi.encode( - GameType.wrap(1), // Permissioned Cannon - Claim.wrap(bytes32(hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c")), // absolutePrestate - 73, // maxGameDepth - 30, // splitDepth - Duration.wrap(3 hours), // clockExtension - Duration.wrap(3.5 days), // maxClockDuration + _input.disputeGameType, + _input.disputeAbsolutePrestate, + _input.disputeMaxGameDepth, + _input.disputeSplitDepth, + _input.disputeClockExtension, + _input.disputeMaxClockDuration, IBigStepper(getLatestImplementation("MIPS").logic), IDelayedWETH(payable(address(_output.delayedWETHPermissionedGameProxy))), IAnchorStateRegistry(address(_output.anchorStateRegistryProxy)), diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 53c0650835f0..fb008d4aa8d0 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -51,6 +51,13 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { doi.set(doi.l2ChainId.selector, l2ChainId); doi.set(doi.opcmProxy.selector, address(opcm)); doi.set(doi.gasLimit.selector, gasLimit); + + doi.set(doi.disputeGameType.selector, disputeGameType); + doi.set(doi.disputeAbsolutePrestate.selector, disputeAbsolutePrestate); + doi.set(doi.disputeMaxGameDepth.selector, disputeMaxGameDepth); + doi.set(doi.disputeSplitDepth.selector, disputeSplitDepth); + doi.set(doi.disputeClockExtension.selector, disputeClockExtension); + doi.set(doi.disputeMaxClockDuration.selector, disputeMaxClockDuration); } // This helper function is used to convert the input struct type defined in DeployOPChain.s.sol @@ -70,7 +77,13 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { l2ChainId: _doi.l2ChainId(), startingAnchorRoots: _doi.startingAnchorRoots(), saltMixer: _doi.saltMixer(), - gasLimit: _doi.gasLimit() + gasLimit: _doi.gasLimit(), + disputeGameType: _doi.disputeGameType(), + disputeAbsolutePrestate: _doi.disputeAbsolutePrestate(), + disputeMaxGameDepth: _doi.disputeMaxGameDepth(), + disputeSplitDepth: _doi.disputeSplitDepth(), + disputeClockExtension: _doi.disputeClockExtension(), + disputeMaxClockDuration: _doi.disputeMaxClockDuration() }); } diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index b7a676d2a94b..ec93c0ab2464 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -33,7 +33,7 @@ import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; import { IProxy } from "src/universal/interfaces/IProxy.sol"; -import { GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; +import { Claim, Duration, GameType, GameTypes, Hash, OutputRoot } from "src/dispute/lib/Types.sol"; contract DeployOPChainInput_Test is Test { DeployOPChainInput doi; @@ -370,6 +370,13 @@ contract DeployOPChain_TestBase is Test { OPContractsManager opcm = OPContractsManager(address(0)); string saltMixer = "defaultSaltMixer"; uint64 gasLimit = 30_000_000; + // Configurable dispute game parameters. + uint32 disputeGameType = GameType.unwrap(GameTypes.PERMISSIONED_CANNON); + bytes32 disputeAbsolutePrestate = hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"; + uint256 disputeMaxGameDepth = 73; + uint256 disputeSplitDepth = 30; + uint64 disputeClockExtension = Duration.unwrap(Duration.wrap(3 hours)); + uint64 disputeMaxClockDuration = Duration.unwrap(Duration.wrap(3.5 days)); function setUp() public virtual { // Set defaults for reference types @@ -490,6 +497,12 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.opcmProxy.selector, address(opcm)); // Not fuzzed since it must be an actual instance. doi.set(doi.saltMixer.selector, saltMixer); doi.set(doi.gasLimit.selector, gasLimit); + doi.set(doi.disputeGameType.selector, disputeGameType); + doi.set(doi.disputeAbsolutePrestate.selector, disputeAbsolutePrestate); + doi.set(doi.disputeMaxGameDepth.selector, disputeMaxGameDepth); + doi.set(doi.disputeSplitDepth.selector, disputeSplitDepth); + doi.set(doi.disputeClockExtension.selector, disputeClockExtension); + doi.set(doi.disputeMaxClockDuration.selector, disputeMaxClockDuration); deployOPChain.run(doi, doo); @@ -507,6 +520,12 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { assertEq(l2ChainId, doi.l2ChainId(), "900"); assertEq(saltMixer, doi.saltMixer(), "1000"); assertEq(gasLimit, doi.gasLimit(), "1100"); + assertEq(disputeGameType, GameType.unwrap(doi.disputeGameType()), "1200"); + assertEq(disputeAbsolutePrestate, Claim.unwrap(doi.disputeAbsolutePrestate()), "1300"); + assertEq(disputeMaxGameDepth, doi.disputeMaxGameDepth(), "1400"); + assertEq(disputeSplitDepth, doi.disputeSplitDepth(), "1500"); + assertEq(disputeClockExtension, Duration.unwrap(doi.disputeClockExtension()), "1600"); + assertEq(disputeMaxClockDuration, Duration.unwrap(doi.disputeMaxClockDuration()), "1700"); // Assert inputs were properly passed through to the contract initializers. assertEq(address(doo.opChainProxyAdmin().owner()), opChainProxyAdminOwner, "2100"); From ef46b0583cacb35c98396d619d3153e6cd715872 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Wed, 2 Oct 2024 12:06:12 -0400 Subject: [PATCH 111/116] feat: Add a test to simplify deploy script testing (#12235) * feat: Add a test to simplify deploy script testing * fix: lint * feat: Add clarifying comments to DeployVariations_Test --- .../test/setup/DeployVariations.sol | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 packages/contracts-bedrock/test/setup/DeployVariations.sol diff --git a/packages/contracts-bedrock/test/setup/DeployVariations.sol b/packages/contracts-bedrock/test/setup/DeployVariations.sol new file mode 100644 index 000000000000..2257e9905514 --- /dev/null +++ b/packages/contracts-bedrock/test/setup/DeployVariations.sol @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing utilities +import { CommonTest } from "test/setup/CommonTest.sol"; +import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +contract DeployVariations_Test is CommonTest { + function setUp() public override { + // Prevent calling the base CommonTest.setUp() function, as we will run it within the test functions + // after setting the feature flags + } + + // Enable features which should be possible to enable or disable regardless of other options. + function enableAddOns(bool _enableCGT, bool _enableAltDa) public { + if (_enableCGT) { + ERC20 token = new ERC20("Silly", "SIL"); + super.enableCustomGasToken(address(token)); + } + if (_enableAltDa) { + super.enableAltDA(); + } + } + + /// @dev It should be possible to enable Fault Proofs with any mix of CGT and Alt-DA. + function testFuzz_enableFaultProofs(bool _enableCGT, bool _enableAltDa) public virtual { + enableAddOns(_enableCGT, _enableAltDa); + super.enableFaultProofs(); + super.setUp(); + } + + /// @dev It should be possible to enable Fault Proofs and Interop with any mix of CGT and Alt-DA. + function test_enableInteropAndFaultProofs(bool _enableCGT, bool _enableAltDa) public virtual { + enableAddOns(_enableCGT, _enableAltDa); + super.enableInterop(); + super.enableFaultProofs(); + super.setUp(); + } +} From 1217d4ac74e4dfddc23391f41e50e51f7c64a772 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 2 Oct 2024 10:18:22 -0600 Subject: [PATCH 112/116] op-deployer: Custom gas price estimator (#12239) * op-deployer: Custom gas price estimator Sepolia's fees are super high and extremely volatile right now. As a result, it became difficult for users to deploy new chains using op-deployer. The OPCM's deploy transaction buys most of the gas in the block, and the default gas price estimation logic in the transaction manager wasn't aggressive enough for the transactions to land on chain in a timely manner. This PR adds the ability to customize the transaction manager with a custom `GasPriceEstimator` function that returns the tip, base fee, and blob fee. I extracted the original logic in the transaction manager into a default estimator that will be used if one isn't specified. For op-deployer, I built a custom estimator that pads the head block's base fee by 20%, and multiplies the suggested tip by 10. After testing this, I was able to get transactions onto Sepolia reliably. The algorithm is pretty simple and overpays for transactions by a lot, but for op-deployer's use case it's more important that transactions land quickly than it is they be cheap. Deployments are a one-time thing. * code review updates * better default support * specific test for extension --- .../deployer/broadcaster/gas_estimator.go | 38 ++++++++++++++ op-chain-ops/deployer/broadcaster/keyed.go | 4 +- .../deployer/integration_test/apply_test.go | 1 + op-service/txmgr/cli.go | 4 ++ op-service/txmgr/estimator.go | 33 +++++++++++++ op-service/txmgr/txmgr.go | 49 +++++++------------ op-service/txmgr/txmgr_test.go | 45 +++++++++++------ 7 files changed, 127 insertions(+), 47 deletions(-) create mode 100644 op-chain-ops/deployer/broadcaster/gas_estimator.go create mode 100644 op-service/txmgr/estimator.go diff --git a/op-chain-ops/deployer/broadcaster/gas_estimator.go b/op-chain-ops/deployer/broadcaster/gas_estimator.go new file mode 100644 index 000000000000..dc877bed0dc8 --- /dev/null +++ b/op-chain-ops/deployer/broadcaster/gas_estimator.go @@ -0,0 +1,38 @@ +package broadcaster + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-service/txmgr" +) + +var ( + // baseFeePadFactor = 20% as a divisor + baseFeePadFactor = big.NewInt(5) + // tipMulFactor = 10 as a multiplier + tipMulFactor = big.NewInt(10) + // dummyBlobFee is a dummy value for the blob fee. Since this gas estimator will never + // post blobs, it's just set to 1. + dummyBlobFee = big.NewInt(1) +) + +// DeployerGasPriceEstimator is a custom gas price estimator for use with op-deployer. +// It pads the base fee by 20% and multiplies the suggested tip by 10. +func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*big.Int, *big.Int, *big.Int, error) { + chainHead, err := client.HeaderByNumber(ctx, nil) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get block: %w", err) + } + + tip, err := client.SuggestGasTipCap(ctx) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get gas tip cap: %w", err) + } + + baseFeePad := new(big.Int).Div(chainHead.BaseFee, baseFeePadFactor) + paddedBaseFee := new(big.Int).Add(chainHead.BaseFee, baseFeePad) + paddedTip := new(big.Int).Mul(tip, tipMulFactor) + return paddedTip, paddedBaseFee, dummyBlobFee, nil +} diff --git a/op-chain-ops/deployer/broadcaster/keyed.go b/op-chain-ops/deployer/broadcaster/keyed.go index 879d38b329b9..4768f31afc4a 100644 --- a/op-chain-ops/deployer/broadcaster/keyed.go +++ b/op-chain-ops/deployer/broadcaster/keyed.go @@ -6,9 +6,10 @@ import ( "math/big" "time" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" "github.com/ethereum/go-ethereum/common" @@ -51,6 +52,7 @@ func NewKeyedBroadcaster(cfg KeyedBroadcasterOpts) (*KeyedBroadcaster, error) { SafeAbortNonceTooLowCount: 3, Signer: cfg.Signer, From: cfg.From, + GasPriceEstimatorFn: DeployerGasPriceEstimator, } minTipCap, err := eth.GweiToWei(1.0) diff --git a/op-chain-ops/deployer/integration_test/apply_test.go b/op-chain-ops/deployer/integration_test/apply_test.go index be4ef80e6374..184269618f0e 100644 --- a/op-chain-ops/deployer/integration_test/apply_test.go +++ b/op-chain-ops/deployer/integration_test/apply_test.go @@ -29,6 +29,7 @@ participants: - el_type: geth el_extra_params: - "--gcmode=archive" + - "--rpc.txfeecap=0" cl_type: lighthouse network_params: prefunded_accounts: '{ "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { "balance": "1000000ETH" } }' diff --git a/op-service/txmgr/cli.go b/op-service/txmgr/cli.go index fe65b6dd126e..2390933d79ca 100644 --- a/op-service/txmgr/cli.go +++ b/op-service/txmgr/cli.go @@ -418,6 +418,10 @@ type Config struct { // Signer is used to sign transactions when the gas price is increased. Signer opcrypto.SignerFn From common.Address + + // GasPriceEstimatorFn is used to estimate the gas price for a transaction. + // If nil, DefaultGasPriceEstimatorFn is used. + GasPriceEstimatorFn GasPriceEstimatorFn } func (m *Config) Check() error { diff --git a/op-service/txmgr/estimator.go b/op-service/txmgr/estimator.go new file mode 100644 index 000000000000..c9968a1018a7 --- /dev/null +++ b/op-service/txmgr/estimator.go @@ -0,0 +1,33 @@ +package txmgr + +import ( + "context" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" +) + +type GasPriceEstimatorFn func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) + +func DefaultGasPriceEstimatorFn(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) { + tip, err := backend.SuggestGasTipCap(ctx) + if err != nil { + return nil, nil, nil, err + } + + head, err := backend.HeaderByNumber(ctx, nil) + if err != nil { + return nil, nil, nil, err + } + if head.BaseFee == nil { + return nil, nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a base fee") + } + + var blobFee *big.Int + if head.ExcessBlobGas != nil { + blobFee = eip4844.CalcBlobFee(*head.ExcessBlobGas) + } + + return tip, head.BaseFee, blobFee, nil +} diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index 4e4c3e633f87..643337a147e7 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -137,9 +137,10 @@ type SimpleTxManager struct { name string chainID *big.Int - backend ETHBackend - l log.Logger - metr metrics.TxMetricer + backend ETHBackend + l log.Logger + metr metrics.TxMetricer + gasPriceEstimatorFn GasPriceEstimatorFn nonce *uint64 nonceLock sync.RWMutex @@ -163,13 +164,15 @@ func NewSimpleTxManagerFromConfig(name string, l log.Logger, m metrics.TxMetrice if err := conf.Check(); err != nil { return nil, fmt.Errorf("invalid config: %w", err) } + return &SimpleTxManager{ - chainID: conf.ChainID, - name: name, - cfg: conf, - backend: conf.Backend, - l: l.New("service", name), - metr: m, + chainID: conf.ChainID, + name: name, + cfg: conf, + backend: conf.Backend, + l: l.New("service", name), + metr: m, + gasPriceEstimatorFn: conf.GasPriceEstimatorFn, }, nil } @@ -876,27 +879,18 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, error) { cCtx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout) defer cancel() - tip, err := m.backend.SuggestGasTipCap(cCtx) - if err != nil { - m.metr.RPCError() - return nil, nil, nil, fmt.Errorf("failed to fetch the suggested gas tip cap: %w", err) - } else if tip == nil { - return nil, nil, nil, errors.New("the suggested tip was nil") + + estimatorFn := m.gasPriceEstimatorFn + if estimatorFn == nil { + estimatorFn = DefaultGasPriceEstimatorFn } - cCtx, cancel = context.WithTimeout(ctx, m.cfg.NetworkTimeout) - defer cancel() - head, err := m.backend.HeaderByNumber(cCtx, nil) + + tip, baseFee, blobFee, err := estimatorFn(cCtx, m.backend) if err != nil { m.metr.RPCError() - return nil, nil, nil, fmt.Errorf("failed to fetch the suggested base fee: %w", err) - } else if head.BaseFee == nil { - return nil, nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a base fee") + return nil, nil, nil, fmt.Errorf("failed to get gas price estimates: %w", err) } - baseFee := head.BaseFee - m.metr.RecordBaseFee(baseFee) - m.metr.RecordTipCap(tip) - // Enforce minimum base fee and tip cap minTipCap := m.cfg.MinTipCap.Load() minBaseFee := m.cfg.MinBaseFee.Load() @@ -910,11 +904,6 @@ func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *b baseFee = new(big.Int).Set(minBaseFee) } - var blobFee *big.Int - if head.ExcessBlobGas != nil { - blobFee = eip4844.CalcBlobFee(*head.ExcessBlobGas) - m.metr.RecordBlobBaseFee(blobFee) - } return tip, baseFee, blobFee, nil } diff --git a/op-service/txmgr/txmgr_test.go b/op-service/txmgr/txmgr_test.go index 6bafa69464b6..0b246fd93238 100644 --- a/op-service/txmgr/txmgr_test.go +++ b/op-service/txmgr/txmgr_test.go @@ -1079,7 +1079,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) { require.Equal(t, receipt.TxHash, txHash) } -func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int64) (*types.Transaction, *types.Transaction, error) { +func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int64, estimator GasPriceEstimatorFn) (*types.Transaction, *types.Transaction, error) { borkedBackend := failingBackend{ gasTip: big.NewInt(newTip), baseFee: big.NewInt(newBaseFee), @@ -1100,11 +1100,12 @@ func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int cfg.MinBlobTxFee.Store(defaultMinBlobTxFee) mgr := &SimpleTxManager{ - cfg: &cfg, - name: "TEST", - backend: &borkedBackend, - l: testlog.Logger(t, log.LevelCrit), - metr: &metrics.NoopTxMetrics{}, + cfg: &cfg, + name: "TEST", + backend: &borkedBackend, + l: testlog.Logger(t, log.LevelCrit), + metr: &metrics.NoopTxMetrics{}, + gasPriceEstimatorFn: estimator, } tx := types.NewTx(&types.DynamicFeeTx{ @@ -1125,7 +1126,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "bump at least 1", run: func(t *testing.T) { - tx, newTx, err := doGasPriceIncrease(t, 1, 3, 1, 1) + tx, newTx, err := doGasPriceIncrease(t, 1, 3, 1, 1, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasFeeCap().Cmp(tx.GasFeeCap()) > 0, "new tx fee cap must be larger") require.True(t, newTx.GasTipCap().Cmp(tx.GasTipCap()) > 0, "new tx tip must be larger") require.NoError(t, err) @@ -1134,7 +1135,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "enforces min bump", run: func(t *testing.T) { - tx, newTx, err := doGasPriceIncrease(t, 100, 1000, 101, 460) + tx, newTx, err := doGasPriceIncrease(t, 100, 1000, 101, 460, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasFeeCap().Cmp(tx.GasFeeCap()) > 0, "new tx fee cap must be larger") require.True(t, newTx.GasTipCap().Cmp(tx.GasTipCap()) > 0, "new tx tip must be larger") require.NoError(t, err) @@ -1143,7 +1144,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "enforces min bump on only tip increase", run: func(t *testing.T) { - tx, newTx, err := doGasPriceIncrease(t, 100, 1000, 101, 440) + tx, newTx, err := doGasPriceIncrease(t, 100, 1000, 101, 440, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasFeeCap().Cmp(tx.GasFeeCap()) > 0, "new tx fee cap must be larger") require.True(t, newTx.GasTipCap().Cmp(tx.GasTipCap()) > 0, "new tx tip must be larger") require.NoError(t, err) @@ -1152,7 +1153,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "enforces min bump on only base fee increase", run: func(t *testing.T) { - tx, newTx, err := doGasPriceIncrease(t, 100, 1000, 99, 460) + tx, newTx, err := doGasPriceIncrease(t, 100, 1000, 99, 460, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasFeeCap().Cmp(tx.GasFeeCap()) > 0, "new tx fee cap must be larger") require.True(t, newTx.GasTipCap().Cmp(tx.GasTipCap()) > 0, "new tx tip must be larger") require.NoError(t, err) @@ -1161,7 +1162,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "uses L1 values when larger", run: func(t *testing.T) { - _, newTx, err := doGasPriceIncrease(t, 10, 100, 50, 200) + _, newTx, err := doGasPriceIncrease(t, 10, 100, 50, 200, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasFeeCap().Cmp(big.NewInt(450)) == 0, "new tx fee cap must be equal L1") require.True(t, newTx.GasTipCap().Cmp(big.NewInt(50)) == 0, "new tx tip must be equal L1") require.NoError(t, err) @@ -1170,7 +1171,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "uses L1 tip when larger and threshold FC", run: func(t *testing.T) { - _, newTx, err := doGasPriceIncrease(t, 100, 2200, 120, 1050) + _, newTx, err := doGasPriceIncrease(t, 100, 2200, 120, 1050, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasTipCap().Cmp(big.NewInt(120)) == 0, "new tx tip must be equal L1") require.True(t, newTx.GasFeeCap().Cmp(big.NewInt(2420)) == 0, "new tx fee cap must be equal to the threshold value") require.NoError(t, err) @@ -1179,7 +1180,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "bumped fee above multiplier limit", run: func(t *testing.T) { - _, _, err := doGasPriceIncrease(t, 1, 9999, 1, 1) + _, _, err := doGasPriceIncrease(t, 1, 9999, 1, 1, DefaultGasPriceEstimatorFn) require.ErrorContains(t, err, "fee cap") require.NotContains(t, err.Error(), "tip cap") }, @@ -1187,7 +1188,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "bumped tip above multiplier limit", run: func(t *testing.T) { - _, _, err := doGasPriceIncrease(t, 9999, 0, 0, 9999) + _, _, err := doGasPriceIncrease(t, 9999, 0, 0, 9999, DefaultGasPriceEstimatorFn) require.ErrorContains(t, err, "tip cap") require.NotContains(t, err.Error(), "fee cap") }, @@ -1195,7 +1196,7 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "bumped fee and tip above multiplier limit", run: func(t *testing.T) { - _, _, err := doGasPriceIncrease(t, 9999, 9999, 1, 1) + _, _, err := doGasPriceIncrease(t, 9999, 9999, 1, 1, DefaultGasPriceEstimatorFn) require.ErrorContains(t, err, "tip cap") require.ErrorContains(t, err, "fee cap") }, @@ -1203,13 +1204,25 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "uses L1 FC when larger and threshold tip", run: func(t *testing.T) { - _, newTx, err := doGasPriceIncrease(t, 100, 2200, 100, 2000) + _, newTx, err := doGasPriceIncrease(t, 100, 2200, 100, 2000, DefaultGasPriceEstimatorFn) require.True(t, newTx.GasTipCap().Cmp(big.NewInt(110)) == 0, "new tx tip must be equal the threshold value") t.Log("Vals:", newTx.GasFeeCap()) require.True(t, newTx.GasFeeCap().Cmp(big.NewInt(4110)) == 0, "new tx fee cap must be equal L1") require.NoError(t, err) }, }, + { + name: "supports extension through custom estimator", + run: func(t *testing.T) { + estimator := func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) { + return big.NewInt(100), big.NewInt(3000), big.NewInt(100), nil + } + _, newTx, err := doGasPriceIncrease(t, 70, 2000, 80, 2100, estimator) + require.NoError(t, err) + require.True(t, newTx.GasFeeCap().Cmp(big.NewInt(6100)) == 0) + require.True(t, newTx.GasTipCap().Cmp(big.NewInt(100)) == 0) + }, + }, } for _, test := range tests { test := test From d1f6501b5b878990093fbc65eca2d75ff99b9a0f Mon Sep 17 00:00:00 2001 From: George Knee Date: Wed, 2 Oct 2024 18:42:29 +0100 Subject: [PATCH 113/116] batcher: use abstract Queue type for blocks state (#12180) * op-service: add queue package * batcher: use Queue type for blocks * revert changes to errors.As/Is * implement and use Peek operation * queue: add unit tests * add godoc * add more test cases permute expected / got * ensure enqueue and prepend are noops when args is empty * use queue.PeekN and queue.DequeueN * typo * queue: simplify method implementations * revert to old dequeue impl --- op-batcher/batcher/channel_manager.go | 45 ++++---- op-batcher/batcher/channel_manager_test.go | 6 +- op-service/queue/queue.go | 75 +++++++++++++ op-service/queue/queue_test.go | 122 +++++++++++++++++++++ 4 files changed, 225 insertions(+), 23 deletions(-) create mode 100644 op-service/queue/queue.go create mode 100644 op-service/queue/queue_test.go diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 23e8f7843696..f33c9d3b5448 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/queue" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -32,7 +33,7 @@ type channelManager struct { rollupCfg *rollup.Config // All blocks since the last request for new tx data. - blocks []*types.Block + blocks queue.Queue[*types.Block] // The latest L1 block from all the L2 blocks in the most recently closed channel l1OriginLastClosedChannel eth.BlockID // The default ChannelConfig to use for the next channel @@ -68,7 +69,7 @@ func (s *channelManager) Clear(l1OriginLastClosedChannel eth.BlockID) { s.mu.Lock() defer s.mu.Unlock() s.log.Trace("clearing channel manager state") - s.blocks = s.blocks[:0] + s.blocks.Clear() s.l1OriginLastClosedChannel = l1OriginLastClosedChannel s.tip = common.Hash{} s.closed = false @@ -106,9 +107,11 @@ func (s *channelManager) TxConfirmed(_id txID, inclusionBlock eth.BlockID) { if channel, ok := s.txChannels[id]; ok { delete(s.txChannels, id) done, blocks := channel.TxConfirmed(id, inclusionBlock) - s.blocks = append(blocks, s.blocks...) if done { s.removePendingChannel(channel) + if len(blocks) > 0 { + s.blocks.Prepend(blocks...) + } } } else { s.log.Warn("transaction from unknown channel marked as confirmed", "id", id) @@ -208,7 +211,7 @@ func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { } dataPending := firstWithTxData != nil - s.log.Debug("Requested tx data", "l1Head", l1Head, "txdata_pending", dataPending, "blocks_pending", len(s.blocks)) + s.log.Debug("Requested tx data", "l1Head", l1Head, "txdata_pending", dataPending, "blocks_pending", s.blocks.Len()) // Short circuit if there is pending tx data or the channel manager is closed if dataPending { @@ -222,7 +225,7 @@ func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { // No pending tx data, so we have to add new blocks to the channel // If we have no saved blocks, we will not be able to create valid frames - if len(s.blocks) == 0 { + if s.blocks.Len() == 0 { return nil, io.EOF } @@ -274,14 +277,14 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { "id", pc.ID(), "l1Head", l1Head, "l1OriginLastClosedChannel", s.l1OriginLastClosedChannel, - "blocks_pending", len(s.blocks), + "blocks_pending", s.blocks.Len(), "batch_type", cfg.BatchType, "compression_algo", cfg.CompressorConfig.CompressionAlgo, "target_num_frames", cfg.TargetNumFrames, "max_frame_size", cfg.MaxFrameSize, "use_blobs", cfg.UseBlobs, ) - s.metr.RecordChannelOpened(pc.ID(), len(s.blocks)) + s.metr.RecordChannelOpened(pc.ID(), s.blocks.Len()) return nil } @@ -304,7 +307,13 @@ func (s *channelManager) processBlocks() error { _chFullErr *ChannelFullError // throw away, just for type checking latestL2ref eth.L2BlockRef ) - for i, block := range s.blocks { + + for i := 0; ; i++ { + block, ok := s.blocks.PeekN(i) + if !ok { + break + } + l1info, err := s.currentChannel.AddBlock(block) if errors.As(err, &_chFullErr) { // current block didn't get added because channel is already full @@ -323,22 +332,16 @@ func (s *channelManager) processBlocks() error { } } - if blocksAdded == len(s.blocks) { - // all blocks processed, reuse slice - s.blocks = s.blocks[:0] - } else { - // remove processed blocks - s.blocks = s.blocks[blocksAdded:] - } + _, _ = s.blocks.DequeueN(blocksAdded) s.metr.RecordL2BlocksAdded(latestL2ref, blocksAdded, - len(s.blocks), + s.blocks.Len(), s.currentChannel.InputBytes(), s.currentChannel.ReadyBytes()) s.log.Debug("Added blocks to channel", "blocks_added", blocksAdded, - "blocks_pending", len(s.blocks), + "blocks_pending", s.blocks.Len(), "channel_full", s.currentChannel.IsFull(), "input_bytes", s.currentChannel.InputBytes(), "ready_bytes", s.currentChannel.ReadyBytes(), @@ -363,7 +366,7 @@ func (s *channelManager) outputFrames() error { inBytes, outBytes := s.currentChannel.InputBytes(), s.currentChannel.OutputBytes() s.metr.RecordChannelClosed( s.currentChannel.ID(), - len(s.blocks), + s.blocks.Len(), s.currentChannel.TotalFrames(), inBytes, outBytes, @@ -377,7 +380,7 @@ func (s *channelManager) outputFrames() error { s.log.Info("Channel closed", "id", s.currentChannel.ID(), - "blocks_pending", len(s.blocks), + "blocks_pending", s.blocks.Len(), "num_frames", s.currentChannel.TotalFrames(), "input_bytes", inBytes, "output_bytes", outBytes, @@ -404,7 +407,7 @@ func (s *channelManager) AddL2Block(block *types.Block) error { } s.metr.RecordL2BlockInPendingQueue(block) - s.blocks = append(s.blocks, block) + s.blocks.Enqueue(block) s.tip = block.Hash() return nil @@ -489,7 +492,7 @@ func (s *channelManager) Requeue(newCfg ChannelConfig) { } // We put the blocks back at the front of the queue: - s.blocks = append(blocksToRequeue, s.blocks...) + s.blocks.Prepend(blocksToRequeue...) // Channels which where already being submitted are put back s.channelQueue = newChannelQueue s.currentChannel = nil diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index dc913505c05f..fac34f8c931e 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/derive" derivetest "github.com/ethereum-optimism/optimism/op-node/rollup/derive/test" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/queue" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -87,7 +88,7 @@ func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) { require.NoError(t, m.AddL2Block(c)) require.ErrorIs(t, m.AddL2Block(x), ErrReorg) - require.Equal(t, []*types.Block{a, b, c}, m.blocks) + require.Equal(t, queue.Queue[*types.Block]{a, b, c}, m.blocks) } // ChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager @@ -626,7 +627,7 @@ func TestChannelManager_Requeue(t *testing.T) { // This is the snapshot of channel manager state we want to reinstate // when we requeue - stateSnapshot := []*types.Block{blockA, blockB} + stateSnapshot := queue.Queue[*types.Block]{blockA, blockB} m.blocks = stateSnapshot require.Empty(t, m.channelQueue) @@ -664,5 +665,6 @@ func TestChannelManager_Requeue(t *testing.T) { // The requeue shouldn't affect the pending channel require.Contains(t, m.channelQueue, channel0) + require.NotContains(t, m.blocks, blockA) } diff --git a/op-service/queue/queue.go b/op-service/queue/queue.go new file mode 100644 index 000000000000..8dd0d5e16d0d --- /dev/null +++ b/op-service/queue/queue.go @@ -0,0 +1,75 @@ +package queue + +// Queue implements a FIFO queue. +type Queue[T any] []T + +// Enqueue adds the elements to the back of the queue. +func (q *Queue[T]) Enqueue(t ...T) { + if len(t) == 0 { + return + } + *q = append(*q, t...) +} + +// Dequeue removes a single element from the front of the queue +// (if there is one) and returns it. Returns a zero value and false +// if there is no element to dequeue. +func (q *Queue[T]) Dequeue() (T, bool) { + if len(*q) == 0 { + var zeroValue T + return zeroValue, false + } + t := (*q)[0] + *q = (*q)[1:] + return t, true +} + +// DequeueN removes N elements from the front of the queue +// (if there are enough) and returns a slice of those elements. Returns +// a nil slice and false if there are insufficient elements to dequeue. +func (q *Queue[T]) DequeueN(N int) ([]T, bool) { + if len(*q) < N { + return nil, false + } + t := (*q)[0:N] + *q = (*q)[N:] + return t, true +} + +// Prepend inserts the elements at the front of the queue, +// preserving their order. A noop if t is empty. +func (q *Queue[T]) Prepend(t ...T) { + if len(t) == 0 { + return + } + *q = append(t, *q...) +} + +// Clear removes all elements from the queue. +func (q *Queue[T]) Clear() { + *q = (*q)[:0] +} + +// Len returns the number of elements in the queue. +func (q *Queue[T]) Len() int { + return len(*q) +} + +// Peek returns the single element at the front of the queue +// (if there is one) without removing it. Returns a zero value and +// false if there is no element to peek at. +func (q *Queue[T]) Peek() (T, bool) { + return q.PeekN(0) +} + +// PeekN returns the element in Nth position in the queue +// Returns a zero value and false if there are insufficient elements +// in the queue. +func (q *Queue[T]) PeekN(N int) (T, bool) { + if len(*q) <= N { + var zeroValue T + return zeroValue, false + } + t := (*q)[N] + return t, true +} diff --git a/op-service/queue/queue_test.go b/op-service/queue/queue_test.go new file mode 100644 index 000000000000..deca8ab411a5 --- /dev/null +++ b/op-service/queue/queue_test.go @@ -0,0 +1,122 @@ +package queue + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestQueue(t *testing.T) { + t.Run("enqueue amd dequeue", func(t *testing.T) { + q := Queue[int]{} + q.Enqueue(1, 2, 3, 4) + + p, peekOk := q.Peek() + require.True(t, peekOk) + require.Equal(t, 1, p) + + d, dequeueOk := q.Dequeue() + require.Equal(t, 1, d) + require.True(t, dequeueOk) + require.Equal(t, 3, q.Len()) + p, peekOk = q.Peek() + require.True(t, peekOk) + require.Equal(t, 2, p) + + d, dequeueOk = q.Dequeue() + require.Equal(t, 2, d) + require.True(t, dequeueOk) + require.Equal(t, 2, q.Len()) + p, peekOk = q.Peek() + require.True(t, peekOk) + require.Equal(t, 3, p) + + d, dequeueOk = q.Dequeue() + require.Equal(t, 3, d) + require.True(t, dequeueOk) + require.Equal(t, 1, q.Len()) + p, peekOk = q.Peek() + require.True(t, peekOk) + require.Equal(t, 4, p) + + d, dequeueOk = q.Dequeue() + require.Equal(t, 4, d) + require.True(t, dequeueOk) + require.Equal(t, 0, q.Len()) + p, peekOk = q.Peek() + require.False(t, peekOk) + require.Equal(t, 0, p) + + d, dequeueOk = q.Dequeue() + require.Equal(t, 0, d) + require.False(t, dequeueOk) + require.Equal(t, 0, q.Len()) + p, peekOk = q.Peek() + require.False(t, peekOk) + require.Equal(t, 0, p) + p, peekOk = q.Peek() + require.False(t, peekOk) + require.Equal(t, 0, p) + }) + + t.Run("peekN and deqeueueN", func(t *testing.T) { + q := Queue[int]{} + q.Enqueue(1, 2, 3, 4) + + p, peekOk := q.PeekN(1) + require.True(t, peekOk) + require.Equal(t, 2, p) + + p, peekOk = q.PeekN(2) + require.Equal(t, 3, p) + require.True(t, peekOk) + require.Equal(t, 4, q.Len()) + + p, peekOk = q.PeekN(4) + require.Equal(t, 0, p) + require.False(t, peekOk) + + d, dequeueOk := q.DequeueN(1) + require.Equal(t, []int{1}, d) + require.True(t, dequeueOk) + require.Equal(t, 3, q.Len()) + + d, dequeueOk = q.DequeueN(3) + require.Equal(t, []int{2, 3, 4}, d) + require.True(t, dequeueOk) + require.Equal(t, 0, q.Len()) + }) + + t.Run("enqueue and clear", func(t *testing.T) { + q := Queue[int]{} + q.Enqueue(5, 6, 7) + + q.Clear() + require.Equal(t, 0, q.Len()) + + d, ok := q.Dequeue() + require.Equal(t, 0, d) + require.False(t, ok) + }) + + t.Run("prepend", func(t *testing.T) { + var q, r Queue[int] + q.Enqueue(5, 6, 7) + r.Enqueue(8, 9) + + q.Prepend(r...) + require.Equal(t, 5, q.Len()) + + d, ok := q.Dequeue() + require.Equal(t, 8, d) + require.True(t, ok) + require.Equal(t, 4, q.Len()) + + q.Prepend() + require.Equal(t, 4, q.Len()) + + d, ok = q.Dequeue() + require.Equal(t, 9, d) + require.True(t, ok) + }) +} From 2c7de99fe7a128bd24bfc6ebde3d23860b7f426e Mon Sep 17 00:00:00 2001 From: Inphi Date: Wed, 2 Oct 2024 13:44:01 -0400 Subject: [PATCH 114/116] cannon: Define 64-bit syscalls (#12256) * cannon: Define 64-bit syscalls Also add SYS_FSTAT to the list of no-op syscalls emulated by MT-Cannon * update semver-lock --- cannon/mipsevm/arch/arch32.go | 55 +++++++++++ cannon/mipsevm/arch/arch64.go | 61 ++++++++++++ cannon/mipsevm/exec/mips_syscalls.go | 55 ----------- .../multithreaded/instrumented_test.go | 5 +- cannon/mipsevm/multithreaded/mips.go | 97 ++++++++++--------- cannon/mipsevm/singlethreaded/mips.go | 14 +-- cannon/mipsevm/tests/evm_common_test.go | 4 +- .../mipsevm/tests/evm_multithreaded_test.go | 33 ++++--- .../mipsevm/tests/evm_singlethreaded_test.go | 2 +- cannon/mipsevm/tests/fuzz_evm_common_test.go | 16 +-- .../tests/fuzz_evm_multithreaded_test.go | 3 +- .../tests/fuzz_evm_singlethreaded_test.go | 4 +- packages/contracts-bedrock/semver-lock.json | 8 +- .../contracts-bedrock/src/cannon/MIPS.sol | 4 +- .../contracts-bedrock/src/cannon/MIPS2.sol | 16 +-- .../src/cannon/libraries/MIPSSyscalls.sol | 1 + 16 files changed, 222 insertions(+), 156 deletions(-) diff --git a/cannon/mipsevm/arch/arch32.go b/cannon/mipsevm/arch/arch32.go index 98a22c8382bc..87cad3cf504d 100644 --- a/cannon/mipsevm/arch/arch32.go +++ b/cannon/mipsevm/arch/arch32.go @@ -31,6 +31,61 @@ const ( HighMemoryStart = 0x7f_ff_d0_00 ) +// 32-bit Syscall codes +const ( + SysMmap = 4090 + SysBrk = 4045 + SysClone = 4120 + SysExitGroup = 4246 + SysRead = 4003 + SysWrite = 4004 + SysFcntl = 4055 + SysExit = 4001 + SysSchedYield = 4162 + SysGetTID = 4222 + SysFutex = 4238 + SysOpen = 4005 + SysNanosleep = 4166 + SysClockGetTime = 4263 + SysGetpid = 4020 +) + +// Noop Syscall codes +const ( + SysMunmap = 4091 + SysGetAffinity = 4240 + SysMadvise = 4218 + SysRtSigprocmask = 4195 + SysSigaltstack = 4206 + SysRtSigaction = 4194 + SysPrlimit64 = 4338 + SysClose = 4006 + SysPread64 = 4200 + SysFstat = 4108 + SysFstat64 = 4215 + SysOpenAt = 4288 + SysReadlink = 4085 + SysReadlinkAt = 4298 + SysIoctl = 4054 + SysEpollCreate1 = 4326 + SysPipe2 = 4328 + SysEpollCtl = 4249 + SysEpollPwait = 4313 + SysGetRandom = 4353 + SysUname = 4122 + SysStat64 = 4213 + SysGetuid = 4024 + SysGetgid = 4047 + SysLlseek = 4140 + SysMinCore = 4217 + SysTgkill = 4266 + // Profiling-related syscalls + SysSetITimer = 4104 + SysTimerCreate = 4257 + SysTimerSetTime = 4258 + SysTimerDelete = 4261 +) + var ByteOrderWord = byteOrder32{} type byteOrder32 struct{} diff --git a/cannon/mipsevm/arch/arch64.go b/cannon/mipsevm/arch/arch64.go index e01b44c50bab..a9b7df70c583 100644 --- a/cannon/mipsevm/arch/arch64.go +++ b/cannon/mipsevm/arch/arch64.go @@ -31,6 +31,67 @@ const ( HighMemoryStart = 0x7F_FF_FF_FF_D0_00_00_00 ) +// MIPS64 syscall table - https://github.com/torvalds/linux/blob/3efc57369a0ce8f76bf0804f7e673982384e4ac9/arch/mips/kernel/syscalls/syscall_n64.tbl. Generate the syscall numbers using the Makefile in that directory. +// See https://gpages.juszkiewicz.com.pl/syscalls-table/syscalls.html for the generated syscalls + +// 64-bit Syscall numbers - new +const ( + SysMmap = 5009 + SysBrk = 5012 + SysClone = 5055 + SysExitGroup = 5205 + SysRead = 5000 + SysWrite = 5001 + SysFcntl = 5070 + SysExit = 5058 + SysSchedYield = 5023 + SysGetTID = 5178 + SysFutex = 5194 + SysOpen = 5002 + SysNanosleep = 5034 + SysClockGetTime = 5222 + SysGetpid = 5038 +) + +// Noop Syscall numbers +const ( + // UndefinedSysNr is the value used for 32-bit syscall numbers that aren't supported for 64-bits + UndefinedSysNr = ^Word(0) + + SysMunmap = 5011 + SysGetAffinity = 5196 + SysMadvise = 5027 + SysRtSigprocmask = 5014 + SysSigaltstack = 5129 + SysRtSigaction = 5013 + SysPrlimit64 = 5297 + SysClose = 5003 + SysPread64 = 5016 + SysFstat = 5005 + SysFstat64 = UndefinedSysNr + SysOpenAt = 5247 + SysReadlink = 5087 + SysReadlinkAt = 5257 + SysIoctl = 5015 + SysEpollCreate1 = 5285 + SysPipe2 = 5287 + SysEpollCtl = 5208 + SysEpollPwait = 5272 + SysGetRandom = 5313 + SysUname = 5061 + SysStat64 = UndefinedSysNr + SysGetuid = 5100 + SysGetgid = 5102 + SysLlseek = UndefinedSysNr + SysMinCore = 5026 + SysTgkill = 5225 + // Profiling-related syscalls + SysSetITimer = 5036 + SysTimerCreate = 5216 + SysTimerSetTime = 5217 + SysTimerDelete = 5220 +) + var ByteOrderWord = byteOrder64{} type byteOrder64 struct{} diff --git a/cannon/mipsevm/exec/mips_syscalls.go b/cannon/mipsevm/exec/mips_syscalls.go index abb186266b71..8679a39b773c 100644 --- a/cannon/mipsevm/exec/mips_syscalls.go +++ b/cannon/mipsevm/exec/mips_syscalls.go @@ -19,61 +19,6 @@ const ( AddressMask = arch.AddressMask ) -// TODO(#12205): redefine syscalls for MIPS64 -// Syscall codes -const ( - SysMmap = 4090 - SysBrk = 4045 - SysClone = 4120 - SysExitGroup = 4246 - SysRead = 4003 - SysWrite = 4004 - SysFcntl = 4055 - SysExit = 4001 - SysSchedYield = 4162 - SysGetTID = 4222 - SysFutex = 4238 - SysOpen = 4005 - SysNanosleep = 4166 - SysClockGetTime = 4263 - SysGetpid = 4020 -) - -// Noop Syscall codes -const ( - SysMunmap = 4091 - SysGetAffinity = 4240 - SysMadvise = 4218 - SysRtSigprocmask = 4195 - SysSigaltstack = 4206 - SysRtSigaction = 4194 - SysPrlimit64 = 4338 - SysClose = 4006 - SysPread64 = 4200 - SysFstat64 = 4215 - SysOpenAt = 4288 - SysReadlink = 4085 - SysReadlinkAt = 4298 - SysIoctl = 4054 - SysEpollCreate1 = 4326 - SysPipe2 = 4328 - SysEpollCtl = 4249 - SysEpollPwait = 4313 - SysGetRandom = 4353 - SysUname = 4122 - SysStat64 = 4213 - SysGetuid = 4024 - SysGetgid = 4047 - SysLlseek = 4140 - SysMinCore = 4217 - SysTgkill = 4266 - // Profiling-related syscalls - SysSetITimer = 4104 - SysTimerCreate = 4257 - SysTimerSetTime = 4258 - SysTimerDelete = 4261 -) - // File descriptors const ( FdStdin = 0 diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index f0b005257f7c..dd4635668458 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -78,10 +78,11 @@ func TestInstrumentedState_Alloc(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { t.Parallel() - state, _ := testutil.LoadELFProgram(t, "../../testdata/example/bin/alloc.elf", CreateInitialState, false) + state, meta := testutil.LoadELFProgram(t, "../../testdata/example/bin/alloc.elf", CreateInitialState, false) oracle := testutil.AllocOracle(t, test.numAllocs, test.allocSize) - us := NewInstrumentedState(state, oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), nil) + us := NewInstrumentedState(state, oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), meta) + require.NoError(t, us.InitDebug()) // emulation shouldn't take more than 20 B steps for i := 0; i < 20_000_000_000; i++ { if us.GetState().GetExited() { diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index 43abdbf57157..4acd278ebd94 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -25,13 +25,13 @@ func (m *InstrumentedState) handleSyscall() error { //fmt.Printf("syscall: %d\n", syscallNum) switch syscallNum { - case exec.SysMmap: + case arch.SysMmap: var newHeap Word v0, v1, newHeap = exec.HandleSysMmap(a0, a1, m.state.Heap) m.state.Heap = newHeap - case exec.SysBrk: + case arch.SysBrk: v0 = program.PROGRAM_BREAK - case exec.SysClone: // clone + case arch.SysClone: // clone // a0 = flag bitmask, a1 = stack pointer if exec.ValidCloneFlags != a0 { m.state.Exited = true @@ -72,11 +72,11 @@ func (m *InstrumentedState) handleSyscall() error { // to ensure we are tracking in the context of the new thread m.stackTracker.PushStack(stackCaller, stackTarget) return nil - case exec.SysExitGroup: + case arch.SysExitGroup: m.state.Exited = true m.state.ExitCode = uint8(a0) return nil - case exec.SysRead: + case arch.SysRead: var newPreimageOffset Word var memUpdated bool var memAddr Word @@ -85,7 +85,7 @@ func (m *InstrumentedState) handleSyscall() error { if memUpdated { m.handleMemoryUpdate(memAddr) } - case exec.SysWrite: + case arch.SysWrite: var newLastHint hexutil.Bytes var newPreimageKey common.Hash var newPreimageOffset Word @@ -93,12 +93,12 @@ func (m *InstrumentedState) handleSyscall() error { m.state.LastHint = newLastHint m.state.PreimageKey = newPreimageKey m.state.PreimageOffset = newPreimageOffset - case exec.SysFcntl: + case arch.SysFcntl: v0, v1 = exec.HandleSysFcntl(a0, a1) - case exec.SysGetTID: + case arch.SysGetTID: v0 = thread.ThreadId v1 = 0 - case exec.SysExit: + case arch.SysExit: thread.Exited = true thread.ExitCode = uint8(a0) if m.lastThreadRemaining() { @@ -106,7 +106,7 @@ func (m *InstrumentedState) handleSyscall() error { m.state.ExitCode = uint8(a0) } return nil - case exec.SysFutex: + case arch.SysFutex: // args: a0 = addr, a1 = op, a2 = val, a3 = timeout effAddr := a0 & arch.AddressMask switch a1 { @@ -143,16 +143,16 @@ func (m *InstrumentedState) handleSyscall() error { v0 = exec.SysErrorSignal v1 = exec.MipsEINVAL } - case exec.SysSchedYield, exec.SysNanosleep: + case arch.SysSchedYield, arch.SysNanosleep: v0 = 0 v1 = 0 exec.HandleSyscallUpdates(&thread.Cpu, &thread.Registers, v0, v1) m.preemptThread(thread) return nil - case exec.SysOpen: + case arch.SysOpen: v0 = exec.SysErrorSignal v1 = exec.MipsEBADF - case exec.SysClockGetTime: + case arch.SysClockGetTime: switch a0 { case exec.ClockGettimeRealtimeFlag, exec.ClockGettimeMonotonicFlag: v0, v1 = 0, 0 @@ -175,44 +175,45 @@ func (m *InstrumentedState) handleSyscall() error { v0 = exec.SysErrorSignal v1 = exec.MipsEINVAL } - case exec.SysGetpid: + case arch.SysGetpid: v0 = 0 v1 = 0 - case exec.SysMunmap: - case exec.SysGetAffinity: - case exec.SysMadvise: - case exec.SysRtSigprocmask: - case exec.SysSigaltstack: - case exec.SysRtSigaction: - case exec.SysPrlimit64: - // TODO(#12205): may be needed for 64-bit Cannon - // case exec.SysGetRtLimit: - case exec.SysClose: - case exec.SysPread64: - case exec.SysFstat64: - case exec.SysOpenAt: - case exec.SysReadlink: - case exec.SysReadlinkAt: - case exec.SysIoctl: - case exec.SysEpollCreate1: - case exec.SysPipe2: - case exec.SysEpollCtl: - case exec.SysEpollPwait: - case exec.SysGetRandom: - case exec.SysUname: - case exec.SysStat64: - case exec.SysGetuid: - case exec.SysGetgid: - case exec.SysLlseek: - case exec.SysMinCore: - case exec.SysTgkill: - case exec.SysSetITimer: - case exec.SysTimerCreate: - case exec.SysTimerSetTime: - case exec.SysTimerDelete: + case arch.SysMunmap: + case arch.SysGetAffinity: + case arch.SysMadvise: + case arch.SysRtSigprocmask: + case arch.SysSigaltstack: + case arch.SysRtSigaction: + case arch.SysPrlimit64: + case arch.SysClose: + case arch.SysPread64: + case arch.SysFstat: + case arch.SysOpenAt: + case arch.SysReadlink: + case arch.SysReadlinkAt: + case arch.SysIoctl: + case arch.SysEpollCreate1: + case arch.SysPipe2: + case arch.SysEpollCtl: + case arch.SysEpollPwait: + case arch.SysGetRandom: + case arch.SysUname: + case arch.SysGetuid: + case arch.SysGetgid: + case arch.SysMinCore: + case arch.SysTgkill: + case arch.SysSetITimer: + case arch.SysTimerCreate: + case arch.SysTimerSetTime: + case arch.SysTimerDelete: default: - m.Traceback() - panic(fmt.Sprintf("unrecognized syscall: %d", syscallNum)) + // These syscalls have the same values on 64-bit. So we use if-stmts here to avoid "duplicate case" compiler error for the cannon64 build + if arch.IsMips32 && syscallNum == arch.SysFstat64 || syscallNum == arch.SysStat64 || syscallNum == arch.SysLlseek { + // noop + } else { + m.Traceback() + panic(fmt.Sprintf("unrecognized syscall: %d", syscallNum)) + } } exec.HandleSyscallUpdates(&thread.Cpu, &thread.Registers, v0, v1) diff --git a/cannon/mipsevm/singlethreaded/mips.go b/cannon/mipsevm/singlethreaded/mips.go index afef3fb41586..cbd00a84cd80 100644 --- a/cannon/mipsevm/singlethreaded/mips.go +++ b/cannon/mipsevm/singlethreaded/mips.go @@ -20,23 +20,23 @@ func (m *InstrumentedState) handleSyscall() error { //fmt.Printf("syscall: %d\n", syscallNum) switch syscallNum { - case exec.SysMmap: + case arch.SysMmap: var newHeap Word v0, v1, newHeap = exec.HandleSysMmap(a0, a1, m.state.Heap) m.state.Heap = newHeap - case exec.SysBrk: + case arch.SysBrk: v0 = arch.ProgramBreak - case exec.SysClone: // clone (not supported) + case arch.SysClone: // clone (not supported) v0 = 1 - case exec.SysExitGroup: + case arch.SysExitGroup: m.state.Exited = true m.state.ExitCode = uint8(a0) return nil - case exec.SysRead: + case arch.SysRead: var newPreimageOffset Word v0, v1, newPreimageOffset, _, _ = exec.HandleSysRead(a0, a1, a2, m.state.PreimageKey, m.state.PreimageOffset, m.preimageOracle, m.state.Memory, m.memoryTracker) m.state.PreimageOffset = newPreimageOffset - case exec.SysWrite: + case arch.SysWrite: var newLastHint hexutil.Bytes var newPreimageKey common.Hash var newPreimageOffset Word @@ -44,7 +44,7 @@ func (m *InstrumentedState) handleSyscall() error { m.state.LastHint = newLastHint m.state.PreimageKey = newPreimageKey m.state.PreimageOffset = newPreimageOffset - case exec.SysFcntl: + case arch.SysFcntl: v0, v1 = exec.HandleSysFcntl(a0, a1) } diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 890feca9ee03..403c770a1826 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -346,7 +346,7 @@ func TestEVM_MMap(t *testing.T) { state := goVm.GetState() state.GetMemory().SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysMmap + state.GetRegistersRef()[2] = arch.SysMmap state.GetRegistersRef()[4] = c.address state.GetRegistersRef()[5] = c.size step := state.GetStep() @@ -546,7 +546,7 @@ func TestEVMSysWriteHint(t *testing.T) { oracle := testutil.HintTrackingOracle{} goVm := v.VMFactory(&oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(int64(i)), testutil.WithLastHint(tt.lastHint)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysWrite + state.GetRegistersRef()[2] = arch.SysWrite state.GetRegistersRef()[4] = exec.FdHintWrite state.GetRegistersRef()[5] = arch.Word(tt.memOffset) state.GetRegistersRef()[6] = arch.Word(tt.bytesToWrite) diff --git a/cannon/mipsevm/tests/evm_multithreaded_test.go b/cannon/mipsevm/tests/evm_multithreaded_test.go index d0da7910fa0a..f3216b6aab10 100644 --- a/cannon/mipsevm/tests/evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded_test.go @@ -261,7 +261,7 @@ func TestEVM_MT_SysRead_Preimage(t *testing.T) { // Set up state state.PreimageKey = preimageKey state.PreimageOffset = c.preimageOffset - state.GetRegistersRef()[2] = exec.SysRead + state.GetRegistersRef()[2] = arch.SysRead state.GetRegistersRef()[4] = exec.FdPreimageRead state.GetRegistersRef()[5] = c.addr state.GetRegistersRef()[6] = c.count @@ -414,7 +414,7 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) { t.Run(c.name, func(t *testing.T) { state := multithreaded.CreateEmptyState() state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysClone // Set syscall number + state.GetRegistersRef()[2] = arch.SysClone // Set syscall number state.GetRegistersRef()[4] = c.flags // Set first argument curStep := state.Step @@ -467,7 +467,7 @@ func TestEVM_SysClone_Successful(t *testing.T) { goVm, state, contracts := setup(t, i, nil) mttestutil.InitializeSingleThread(i*333, state, c.traverseRight) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysClone // the syscall number + state.GetRegistersRef()[2] = arch.SysClone // the syscall number state.GetRegistersRef()[4] = exec.ValidCloneFlags // a0 - first argument, clone flags state.GetRegistersRef()[5] = stackPtr // a1 - the stack pointer step := state.GetStep() @@ -530,7 +530,7 @@ func TestEVM_SysGetTID(t *testing.T) { state.GetCurrentThread().ThreadId = c.threadId state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysGetTID // Set syscall number + state.GetRegistersRef()[2] = arch.SysGetTID // Set syscall number step := state.Step // Set up post-state expectations @@ -573,7 +573,7 @@ func TestEVM_SysExit(t *testing.T) { mttestutil.SetupThreads(int64(i*1111), state, i%2 == 0, c.threadCount, 0) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysExit // Set syscall number + state.GetRegistersRef()[2] = arch.SysExit // Set syscall number state.GetRegistersRef()[4] = Word(exitCode) // The first argument (exit code) step := state.Step @@ -682,7 +682,7 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) { state.Memory.SetMemory(state.GetPC(), syscallInsn) state.Memory.SetWord(c.effAddr, c.actualValue) - state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number + state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number state.GetRegistersRef()[4] = c.addressParam state.GetRegistersRef()[5] = exec.FutexWaitPrivate state.GetRegistersRef()[6] = c.targetValue @@ -752,7 +752,7 @@ func TestEVM_SysFutex_WakePrivate(t *testing.T) { step := state.Step state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number + state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number state.GetRegistersRef()[4] = c.addressParam state.GetRegistersRef()[5] = exec.FutexWakePrivate @@ -837,7 +837,7 @@ func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { step := state.GetStep() state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysFutex // Set syscall number + state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number state.GetRegistersRef()[5] = op // Setup expectations @@ -863,11 +863,11 @@ func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { } func TestEVM_SysYield(t *testing.T) { - runPreemptSyscall(t, "SysSchedYield", exec.SysSchedYield) + runPreemptSyscall(t, "SysSchedYield", arch.SysSchedYield) } func TestEVM_SysNanosleep(t *testing.T) { - runPreemptSyscall(t, "SysNanosleep", exec.SysNanosleep) + runPreemptSyscall(t, "SysNanosleep", arch.SysNanosleep) } func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) { @@ -922,7 +922,7 @@ func TestEVM_SysOpen(t *testing.T) { goVm, state, contracts := setup(t, 5512, nil) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysOpen // Set syscall number + state.GetRegistersRef()[2] = arch.SysOpen // Set syscall number step := state.Step // Set up post-state expectations @@ -947,7 +947,7 @@ func TestEVM_SysGetPID(t *testing.T) { goVm, state, contracts := setup(t, 1929, nil) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysGetpid // Set syscall number + state.GetRegistersRef()[2] = arch.SysGetpid // Set syscall number step := state.Step // Set up post-state expectations @@ -1030,7 +1030,7 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) { } state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysClockGetTime // Set syscall number + state.GetRegistersRef()[2] = arch.SysClockGetTime // Set syscall number state.GetRegistersRef()[4] = clkid // a0 state.GetRegistersRef()[5] = c.timespecAddr // a1 state.LLReservationActive = v.llReservationActive @@ -1074,7 +1074,7 @@ func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) { timespecAddr := Word(0x1000) state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysClockGetTime // Set syscall number + state.GetRegistersRef()[2] = arch.SysClockGetTime // Set syscall number state.GetRegistersRef()[4] = 0xDEAD // a0 - invalid clockid state.GetRegistersRef()[5] = timespecAddr // a1 step := state.Step @@ -1103,6 +1103,7 @@ var NoopSyscalls = map[string]uint32{ "SysPrlimit64": 4338, "SysClose": 4006, "SysPread64": 4200, + "SysFstat": 4108, "SysFstat64": 4215, "SysOpenAt": 4288, "SysReadlink": 4085, @@ -1162,7 +1163,7 @@ func TestEVM_UnsupportedSyscall(t *testing.T) { var tracer *tracing.Hooks var NoopSyscallNums = maps.Values(NoopSyscalls) - var SupportedSyscalls = []uint32{exec.SysMmap, exec.SysBrk, exec.SysClone, exec.SysExitGroup, exec.SysRead, exec.SysWrite, exec.SysFcntl, exec.SysExit, exec.SysSchedYield, exec.SysGetTID, exec.SysFutex, exec.SysOpen, exec.SysNanosleep, exec.SysClockGetTime, exec.SysGetpid} + var SupportedSyscalls = []uint32{arch.SysMmap, arch.SysBrk, arch.SysClone, arch.SysExitGroup, arch.SysRead, arch.SysWrite, arch.SysFcntl, arch.SysExit, arch.SysSchedYield, arch.SysGetTID, arch.SysFutex, arch.SysOpen, arch.SysNanosleep, arch.SysClockGetTime, arch.SysGetpid} unsupportedSyscalls := make([]uint32, 0, 400) for i := 4000; i < 4400; i++ { candidate := uint32(i) @@ -1476,7 +1477,7 @@ func TestEVM_SchedQuantumThreshold(t *testing.T) { goVm, state, contracts := setup(t, i*789, nil) // Setup basic getThreadId syscall instruction state.Memory.SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysGetTID // Set syscall number + state.GetRegistersRef()[2] = arch.SysGetTID // Set syscall number state.StepsSinceLastContextSwitch = c.stepsSinceLastContextSwitch step := state.Step diff --git a/cannon/mipsevm/tests/evm_singlethreaded_test.go b/cannon/mipsevm/tests/evm_singlethreaded_test.go index 73613d3590f4..dc50a95d77de 100644 --- a/cannon/mipsevm/tests/evm_singlethreaded_test.go +++ b/cannon/mipsevm/tests/evm_singlethreaded_test.go @@ -166,7 +166,7 @@ func TestEVM_SysRead_Preimage(t *testing.T) { step := state.GetStep() // Set up state - state.GetRegistersRef()[2] = exec.SysRead + state.GetRegistersRef()[2] = arch.SysRead state.GetRegistersRef()[4] = exec.FdPreimageRead state.GetRegistersRef()[5] = c.addr state.GetRegistersRef()[6] = c.count diff --git a/cannon/mipsevm/tests/fuzz_evm_common_test.go b/cannon/mipsevm/tests/fuzz_evm_common_test.go index 712b7d4875d3..e9cb5b453dea 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common_test.go @@ -27,7 +27,7 @@ func FuzzStateSyscallBrk(f *testing.F) { t.Run(v.Name, func(t *testing.T) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysBrk + state.GetRegistersRef()[2] = arch.SysBrk state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() @@ -65,7 +65,7 @@ func FuzzStateSyscallMmap(f *testing.F) { state := goVm.GetState() step := state.GetStep() - state.GetRegistersRef()[2] = exec.SysMmap + state.GetRegistersRef()[2] = arch.SysMmap state.GetRegistersRef()[4] = addr state.GetRegistersRef()[5] = siz state.GetMemory().SetMemory(state.GetPC(), syscallInsn) @@ -112,7 +112,7 @@ func FuzzStateSyscallExitGroup(f *testing.F) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysExitGroup + state.GetRegistersRef()[2] = arch.SysExitGroup state.GetRegistersRef()[4] = Word(exitCode) state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() @@ -141,7 +141,7 @@ func FuzzStateSyscallFcntl(f *testing.F) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysFcntl + state.GetRegistersRef()[2] = arch.SysFcntl state.GetRegistersRef()[4] = fd state.GetRegistersRef()[5] = cmd state.GetMemory().SetMemory(state.GetPC(), syscallInsn) @@ -201,7 +201,7 @@ func FuzzStateHintRead(f *testing.F) { goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed), testutil.WithPreimageKey(preimageKey)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysRead + state.GetRegistersRef()[2] = arch.SysRead state.GetRegistersRef()[4] = exec.FdHintRead state.GetRegistersRef()[5] = addr state.GetRegistersRef()[6] = count @@ -245,7 +245,7 @@ func FuzzStatePreimageRead(f *testing.F) { goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed), testutil.WithPreimageKey(preimageKey), testutil.WithPreimageOffset(preimageOffset), testutil.WithPCAndNextPC(pc)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysRead + state.GetRegistersRef()[2] = arch.SysRead state.GetRegistersRef()[4] = exec.FdPreimageRead state.GetRegistersRef()[5] = addr state.GetRegistersRef()[6] = count @@ -324,7 +324,7 @@ func FuzzStateHintWrite(f *testing.F) { goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(randSeed), testutil.WithLastHint(lastHint), testutil.WithPCAndNextPC(pc)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysWrite + state.GetRegistersRef()[2] = arch.SysWrite state.GetRegistersRef()[4] = exec.FdHintWrite state.GetRegistersRef()[5] = addr state.GetRegistersRef()[6] = count @@ -390,7 +390,7 @@ func FuzzStatePreimageWrite(f *testing.F) { goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed), testutil.WithPreimageKey(preimageKey), testutil.WithPreimageOffset(128), testutil.WithPCAndNextPC(pc)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysWrite + state.GetRegistersRef()[2] = arch.SysWrite state.GetRegistersRef()[4] = exec.FdPreimageWrite state.GetRegistersRef()[5] = addr state.GetRegistersRef()[6] = count diff --git a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go index c64658934421..8082782f483c 100644 --- a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mttestutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" @@ -27,7 +28,7 @@ func FuzzStateSyscallCloneMT(f *testing.F) { // Setup state.NextThreadId = nextThreadId state.GetMemory().SetMemory(state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = exec.SysClone + state.GetRegistersRef()[2] = arch.SysClone state.GetRegistersRef()[4] = exec.ValidCloneFlags state.GetRegistersRef()[5] = stackPtr step := state.GetStep() diff --git a/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go index cc30c0040196..503f4b7bd2e8 100644 --- a/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_singlethreaded_test.go @@ -7,7 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/require" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) @@ -16,7 +16,7 @@ func FuzzStateSyscallCloneST(f *testing.F) { f.Fuzz(func(t *testing.T, seed int64) { goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), testutil.WithRandomization(seed)) state := goVm.GetState() - state.GetRegistersRef()[2] = exec.SysClone + state.GetRegistersRef()[2] = arch.SysClone state.GetMemory().SetMemory(state.GetPC(), syscallInsn) step := state.GetStep() diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index 5a41aac33f47..dc5c466bcf64 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -140,12 +140,12 @@ "sourceCodeHash": "0x2ab6be69795109a1ee04c5693a34d6ce0ff90b62e404cdeb18178bab18d06784" }, "src/cannon/MIPS.sol": { - "initCodeHash": "0x3992081512da36af76b707aee7d8ef9e084c54fb1dc9f8ce9989ed16d1216f01", - "sourceCodeHash": "0x7630362c20fbca071452031b88c9384d3215c4f2cbee24c7989901de63b0c178" + "initCodeHash": "0xa9a9db7bedf25800f20c947df10310c64beb2ead8eb6be991c83189e975df0fe", + "sourceCodeHash": "0x83aabf115ac0ad407868e633a521602c41d86864d82198e6abbf69d33daaea65" }, "src/cannon/MIPS2.sol": { - "initCodeHash": "0x590be819d8f02a7f9eb04ddc447f93ccbfd8bc9339f7c2e65336f9805b6c9a66", - "sourceCodeHash": "0x5bc0ab24cf926953b2ea9eb40b929821e280a7181c6cb18e7954bc3f7dc59be1" + "initCodeHash": "0xbb203b0d83efddfa0f664dbc63ec55844318b48fe8133758307f64e87c892a47", + "sourceCodeHash": "0x16614cc0e6abf7e81e1e5dc2c0773ee7101cb38af40e0907a8800ca7eddd3b5a" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0xa0b19e18561da9990c95ebea9750dd901f73147b32b8b234eca0f35073c5a970", diff --git a/packages/contracts-bedrock/src/cannon/MIPS.sol b/packages/contracts-bedrock/src/cannon/MIPS.sol index 603ead867284..e6c8d02cc349 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS.sol @@ -45,8 +45,8 @@ contract MIPS is ISemver { } /// @notice The semantic version of the MIPS contract. - /// @custom:semver 1.2.1-beta.1 - string public constant version = "1.2.1-beta.1"; + /// @custom:semver 1.2.1-beta.2 + string public constant version = "1.2.1-beta.2"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; diff --git a/packages/contracts-bedrock/src/cannon/MIPS2.sol b/packages/contracts-bedrock/src/cannon/MIPS2.sol index ebbf9302c1de..77d3530e0001 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS2.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS2.sol @@ -57,8 +57,8 @@ contract MIPS2 is ISemver { } /// @notice The semantic version of the MIPS2 contract. - /// @custom:semver 1.0.0-beta.13 - string public constant version = "1.0.0-beta.13"; + /// @custom:semver 1.0.0-beta.14 + string public constant version = "1.0.0-beta.14"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -534,7 +534,7 @@ contract MIPS2 is ISemver { // ignored } else if (syscall_no == sys.SYS_PREAD64) { // ignored - } else if (syscall_no == sys.SYS_FSTAT64) { + } else if (syscall_no == sys.SYS_FSTAT) { // ignored } else if (syscall_no == sys.SYS_OPENAT) { // ignored @@ -556,14 +556,10 @@ contract MIPS2 is ISemver { // ignored } else if (syscall_no == sys.SYS_UNAME) { // ignored - } else if (syscall_no == sys.SYS_STAT64) { - // ignored } else if (syscall_no == sys.SYS_GETUID) { // ignored } else if (syscall_no == sys.SYS_GETGID) { // ignored - } else if (syscall_no == sys.SYS_LLSEEK) { - // ignored } else if (syscall_no == sys.SYS_MINCORE) { // ignored } else if (syscall_no == sys.SYS_TGKILL) { @@ -577,7 +573,11 @@ contract MIPS2 is ISemver { } else if (syscall_no == sys.SYS_TIMERDELETE) { // ignored } else { - revert("MIPS2: unimplemented syscall"); + if (syscall_no == sys.SYS_FSTAT64 || syscall_no == sys.SYS_STAT64 || syscall_no == sys.SYS_LLSEEK) { + // noop + } else { + revert("MIPS2: unimplemented syscall"); + } } st.CpuScalars memory cpu = getCpuScalars(thread); diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol index 1b5fddaba7fd..968faaf9aea7 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPSSyscalls.sol @@ -53,6 +53,7 @@ library MIPSSyscalls { uint32 internal constant SYS_PRLIMIT64 = 4338; uint32 internal constant SYS_CLOSE = 4006; uint32 internal constant SYS_PREAD64 = 4200; + uint32 internal constant SYS_FSTAT = 4108; uint32 internal constant SYS_FSTAT64 = 4215; uint32 internal constant SYS_OPENAT = 4288; uint32 internal constant SYS_READLINK = 4085; From 33bc0bec8012a9000565057f9b406384b05b8726 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Wed, 2 Oct 2024 14:19:06 -0400 Subject: [PATCH 115/116] feat: Extract deployment of pre-v1.6.0 code (#12233) * feat: Add a test to simplify deploy script testing * fix: lint * feat: Add clarifying comments to DeployVariations_Test * feat: Extract deployment of pre-v1.6.0 code * feat: combine setupOPChain and DeployOpChain This will help to clarify where the DeployOpcChain script can be used. * fix: op-e2e issues with unfound contracts * fix: Undo conditional init of L2OutputOracle It needs to be in the state no matter what for op-e2e * fix: correct deploy ordering of legacy contracts * ugly but working * clean up legacy deployments * enable FP on system config custom gas token tests * op-e2e: allow no OptimismPortal impl if useFaultProofs * fix: lint --- op-chain-ops/genesis/config.go | 4 + .../scripts/deploy/Deploy.s.sol | 104 ++++++++---------- .../test/L1/SystemConfig.t.sol | 1 + 3 files changed, 49 insertions(+), 60 deletions(-) diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 26f675b30e7f..40a26ecd5192 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -1017,6 +1017,10 @@ func (d *L1Deployments) Check(deployConfig *DeployConfig) error { name == "DisputeGameFactoryProxy") { continue } + if deployConfig.UseFaultProofs && + (name == "OptimismPortal") { + continue + } if !deployConfig.UseAltDA && (name == "DataAvailabilityChallenge" || name == "DataAvailabilityChallengeProxy") { diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index b625bdc31d59..b7410ec1efed 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -144,26 +144,6 @@ contract Deploy is Deployer { return keccak256(bytes(Config.implSalt())); } - /// @notice Returns the proxy addresses. If a proxy is not found, it will have address(0). - function _proxies() internal view returns (Types.ContractSet memory proxies_) { - proxies_ = Types.ContractSet({ - L1CrossDomainMessenger: mustGetAddress("L1CrossDomainMessengerProxy"), - L1StandardBridge: mustGetAddress("L1StandardBridgeProxy"), - L2OutputOracle: mustGetAddress("L2OutputOracleProxy"), - DisputeGameFactory: mustGetAddress("DisputeGameFactoryProxy"), - DelayedWETH: mustGetAddress("DelayedWETHProxy"), - PermissionedDelayedWETH: mustGetAddress("PermissionedDelayedWETHProxy"), - AnchorStateRegistry: mustGetAddress("AnchorStateRegistryProxy"), - OptimismMintableERC20Factory: mustGetAddress("OptimismMintableERC20FactoryProxy"), - OptimismPortal: mustGetAddress("OptimismPortalProxy"), - OptimismPortal2: mustGetAddress("OptimismPortalProxy"), - SystemConfig: mustGetAddress("SystemConfigProxy"), - L1ERC721Bridge: mustGetAddress("L1ERC721BridgeProxy"), - ProtocolVersions: mustGetAddress("ProtocolVersionsProxy"), - SuperchainConfig: mustGetAddress("SuperchainConfigProxy") - }); - } - /// @notice Returns the proxy addresses, not reverting if any are unset. function _proxiesUnstrict() internal view returns (Types.ContractSet memory proxies_) { proxies_ = Types.ContractSet({ @@ -274,7 +254,23 @@ contract Deploy is Deployer { } else { deployImplementations(); } - setupOpChain(); + + // Deploy Current OPChain Contracts + deployOpChain(); + + // Deploy and setup the legacy (pre-faultproofs) contracts + deployERC1967Proxy("L2OutputOracleProxy"); + deployL2OutputOracle(); + initializeL2OutputOracle(); + + // The OptimismPortalProxy contract is used both with and without Fault Proofs enabled, and is deployed by + // deployOPChain. So we only need to deploy the legacy OptimismPortal implementation and initialize with it + // when Fault Proofs are disabled. + if (!cfg.useFaultProofs()) { + deployOptimismPortal(); + initializeOptimismPortal(); + } + if (cfg.useAltDA()) { bytes32 typeHash = keccak256(bytes(cfg.daCommitmentType())); bytes32 keccakHash = keccak256(bytes("KeccakCommitment")); @@ -318,33 +314,18 @@ contract Deploy is Deployer { save("ProtocolVersions", address(dso.protocolVersionsImpl())); } - /// @notice Deploy a new OP Chain, with an existing SuperchainConfig provided - function setupOpChain() public { + /// @notice Deploy all of the OP Chain specific contracts + function deployOpChain() public { console.log("Deploying OP Chain"); deployAddressManager(); deployProxyAdmin({ _isSuperchain: false }); + transferAddressManagerOwnership(); // to the ProxyAdmin // Ensure that the requisite contracts are deployed mustGetAddress("SuperchainConfigProxy"); mustGetAddress("AddressManager"); mustGetAddress("ProxyAdmin"); - deployOpChain(); - initializeOpChain(); - - setAlphabetFaultGameImplementation({ _allowUpgrade: false }); - setFastFaultGameImplementation({ _allowUpgrade: false }); - setCannonFaultGameImplementation({ _allowUpgrade: false }); - setPermissionedCannonFaultGameImplementation({ _allowUpgrade: false }); - - transferDisputeGameFactoryOwnership(); - transferDelayedWETHOwnership(); - } - - /// @notice Deploy all of the OP Chain specific contracts - function deployOpChain() public { - console.log("Deploying OP Chain contracts"); - deployERC1967Proxy("OptimismPortalProxy"); deployERC1967Proxy("SystemConfigProxy"); deployL1StandardBridgeProxy(); @@ -356,26 +337,32 @@ contract Deploy is Deployer { // enabled to prevent a nastier refactor to the deploy scripts. In the future, the L2OutputOracle will be // removed. If fault proofs are not enabled, the DisputeGameFactory proxy will be unused. deployERC1967Proxy("DisputeGameFactoryProxy"); - deployERC1967Proxy("L2OutputOracleProxy"); deployERC1967Proxy("DelayedWETHProxy"); deployERC1967Proxy("PermissionedDelayedWETHProxy"); deployERC1967Proxy("AnchorStateRegistryProxy"); deployAnchorStateRegistry(); - transferAddressManagerOwnership(); // to the ProxyAdmin + initializeOpChain(); + + setAlphabetFaultGameImplementation({ _allowUpgrade: false }); + setFastFaultGameImplementation({ _allowUpgrade: false }); + setCannonFaultGameImplementation({ _allowUpgrade: false }); + setPermissionedCannonFaultGameImplementation({ _allowUpgrade: false }); + + transferDisputeGameFactoryOwnership(); + transferDelayedWETHOwnership(); } /// @notice Deploy all of the implementations function deployImplementations() public { + // TODO: Replace the actions in this function with a call to DeployImplementationsInterop.run() console.log("Deploying implementations"); deployL1CrossDomainMessenger(); deployOptimismMintableERC20Factory(); deploySystemConfig(); deployL1StandardBridge(); deployL1ERC721Bridge(); - deployOptimismPortal(); // todo: pull this out into an override option after DeployImplementations runs - deployL2OutputOracle(); // Fault proofs deployOptimismPortal2(); @@ -387,13 +374,13 @@ contract Deploy is Deployer { /// @notice Deploy all of the implementations function deployImplementationsInterop() public { + // TODO: Replace the actions in this function with a call to DeployImplementationsInterop.run() console.log("Deploying implementations"); deployL1CrossDomainMessenger(); deployOptimismMintableERC20Factory(); deploySystemConfigInterop(); deployL1StandardBridge(); deployL1ERC721Bridge(); - deployL2OutputOracle(); // Fault proofs deployOptimismPortalInterop(); @@ -407,13 +394,11 @@ contract Deploy is Deployer { /// initialize function function initializeOpChain() public { console.log("Initializing Op Chain proxies"); - // Selectively initialize either the original OptimismPortal or the new OptimismPortal2. Since this will upgrade - // the proxy, we cannot initialize both. + // The OptimismPortal Proxy is shared between the legacy and current deployment path, so we should initialize + // the OptimismPortal2 only if using FaultProofs. if (cfg.useFaultProofs()) { console.log("Fault proofs enabled. Initializing the OptimismPortal proxy with the OptimismPortal2."); initializeOptimismPortal2(); - } else { - initializeOptimismPortal(); } initializeSystemConfig(); @@ -421,7 +406,6 @@ contract Deploy is Deployer { initializeL1ERC721Bridge(); initializeOptimismMintableERC20Factory(); initializeL1CrossDomainMessenger(); - initializeL2OutputOracle(); initializeDisputeGameFactory(); initializeDelayedWETH(); initializePermissionedDelayedWETH(); @@ -1108,7 +1092,7 @@ contract Deploy is Deployer { string memory version = config.version(); console.log("SystemConfig version: %s", version); - ChainAssertions.checkSystemConfig({ _contracts: _proxies(), _cfg: cfg, _isProxy: true }); + ChainAssertions.checkSystemConfig({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isProxy: true }); } /// @notice Initialize the L1StandardBridge @@ -1143,7 +1127,7 @@ contract Deploy is Deployer { string memory version = IL1StandardBridge(payable(l1StandardBridgeProxy)).version(); console.log("L1StandardBridge version: %s", version); - ChainAssertions.checkL1StandardBridge({ _contracts: _proxies(), _isProxy: true }); + ChainAssertions.checkL1StandardBridge({ _contracts: _proxiesUnstrict(), _isProxy: true }); } /// @notice Initialize the L1ERC721Bridge @@ -1168,7 +1152,7 @@ contract Deploy is Deployer { string memory version = bridge.version(); console.log("L1ERC721Bridge version: %s", version); - ChainAssertions.checkL1ERC721Bridge({ _contracts: _proxies(), _isProxy: true }); + ChainAssertions.checkL1ERC721Bridge({ _contracts: _proxiesUnstrict(), _isProxy: true }); } /// @notice Initialize the OptimismMintableERC20Factory @@ -1189,7 +1173,7 @@ contract Deploy is Deployer { string memory version = factory.version(); console.log("OptimismMintableERC20Factory version: %s", version); - ChainAssertions.checkOptimismMintableERC20Factory({ _contracts: _proxies(), _isProxy: true }); + ChainAssertions.checkOptimismMintableERC20Factory({ _contracts: _proxiesUnstrict(), _isProxy: true }); } /// @notice initializeL1CrossDomainMessenger @@ -1235,7 +1219,7 @@ contract Deploy is Deployer { string memory version = messenger.version(); console.log("L1CrossDomainMessenger version: %s", version); - ChainAssertions.checkL1CrossDomainMessenger({ _contracts: _proxies(), _vm: vm, _isProxy: true }); + ChainAssertions.checkL1CrossDomainMessenger({ _contracts: _proxiesUnstrict(), _vm: vm, _isProxy: true }); } /// @notice Initialize the L2OutputOracle @@ -1267,7 +1251,7 @@ contract Deploy is Deployer { console.log("L2OutputOracle version: %s", version); ChainAssertions.checkL2OutputOracle({ - _contracts: _proxies(), + _contracts: _proxiesUnstrict(), _cfg: cfg, _l2OutputOracleStartingTimestamp: cfg.l2OutputOracleStartingTimestamp(), _isProxy: true @@ -1301,7 +1285,7 @@ contract Deploy is Deployer { string memory version = portal.version(); console.log("OptimismPortal version: %s", version); - ChainAssertions.checkOptimismPortal({ _contracts: _proxies(), _cfg: cfg, _isProxy: true }); + ChainAssertions.checkOptimismPortal({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isProxy: true }); } /// @notice Initialize the OptimismPortal2 @@ -1332,7 +1316,7 @@ contract Deploy is Deployer { string memory version = portal.version(); console.log("OptimismPortal2 version: %s", version); - ChainAssertions.checkOptimismPortal2({ _contracts: _proxies(), _cfg: cfg, _isProxy: true }); + ChainAssertions.checkOptimismPortal2({ _contracts: _proxiesUnstrict(), _cfg: cfg, _isProxy: true }); } /// @notice Transfer ownership of the DisputeGameFactory contract to the final system owner @@ -1346,7 +1330,7 @@ contract Deploy is Deployer { disputeGameFactory.transferOwnership(finalSystemOwner); console.log("DisputeGameFactory ownership transferred to final system owner at: %s", finalSystemOwner); } - ChainAssertions.checkDisputeGameFactory({ _contracts: _proxies(), _expectedOwner: finalSystemOwner }); + ChainAssertions.checkDisputeGameFactory({ _contracts: _proxiesUnstrict(), _expectedOwner: finalSystemOwner }); } /// @notice Transfer ownership of the DelayedWETH contract to the final system owner @@ -1361,7 +1345,7 @@ contract Deploy is Deployer { console.log("DelayedWETH ownership transferred to final system owner at: %s", finalSystemOwner); } ChainAssertions.checkDelayedWETH({ - _contracts: _proxies(), + _contracts: _proxiesUnstrict(), _cfg: cfg, _isProxy: true, _expectedOwner: finalSystemOwner @@ -1380,7 +1364,7 @@ contract Deploy is Deployer { console.log("DelayedWETH ownership transferred to final system owner at: %s", finalSystemOwner); } ChainAssertions.checkPermissionedDelayedWETH({ - _contracts: _proxies(), + _contracts: _proxiesUnstrict(), _cfg: cfg, _isProxy: true, _expectedOwner: finalSystemOwner diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index 91819d8ef70a..aad093e3283a 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -312,6 +312,7 @@ contract SystemConfig_Init_CustomGasToken is SystemConfig_Init { function setUp() public override { token = new ERC20("Silly", "SIL"); super.enableCustomGasToken(address(token)); + super.enableFaultProofs(); super.setUp(); } From 619f23c098e8e79a52029f527b7335f8e14a71d1 Mon Sep 17 00:00:00 2001 From: agusduha Date: Wed, 2 Oct 2024 17:07:17 -0300 Subject: [PATCH 116/116] fix: pre pr --- packages/contracts-bedrock/.gas-snapshot | 6 +++--- packages/contracts-bedrock/lib/forge-std | 2 +- .../scripts/ops/FeeVaultWithdrawal.s.sol | 2 +- packages/contracts-bedrock/semver-lock.json | 10 +++++----- .../src/L2/interfaces/ISuperchainWETH.sol | 2 -- 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/packages/contracts-bedrock/.gas-snapshot b/packages/contracts-bedrock/.gas-snapshot index 8e43cb748941..4c8038a0ac68 100644 --- a/packages/contracts-bedrock/.gas-snapshot +++ b/packages/contracts-bedrock/.gas-snapshot @@ -9,9 +9,9 @@ GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2967382 GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 564356) GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4076571) GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 467019) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3512723) +GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3512701) GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 72618) GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 92973) -GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68357) -GasBenchMark_OptimismPortal:test_depositTransaction_benchmark_1() (gas: 68921) +GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 68312) +GasBenchMark_OptimismPortal:test_depositTransaction_benchmark_1() (gas: 68943) GasBenchMark_OptimismPortal:test_proveWithdrawalTransaction_benchmark() (gas: 155610) \ No newline at end of file diff --git a/packages/contracts-bedrock/lib/forge-std b/packages/contracts-bedrock/lib/forge-std index 8f24d6b04c92..2d8b7b876a5b 160000 --- a/packages/contracts-bedrock/lib/forge-std +++ b/packages/contracts-bedrock/lib/forge-std @@ -1 +1 @@ -Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa +Subproject commit 2d8b7b876a5b328d6a73e13c4740ed7a0d72d5f4 diff --git a/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol b/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol index 5a7b48847614..e19cd7e994bd 100644 --- a/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol +++ b/packages/contracts-bedrock/scripts/ops/FeeVaultWithdrawal.s.sol @@ -65,7 +65,7 @@ contract FeeVaultWithdrawal is Script { } /// @notice Logs the information relevant to the user. - function log(uint256 _balance, address _recipient, address _vault) internal pure { + function log(uint256 _balance, address _recipient, address _vault) internal view { string memory logline = string.concat( "Withdrawing ", vm.toString(_balance), " to ", vm.toString(_recipient), " from ", vm.toString(_vault) ); diff --git a/packages/contracts-bedrock/semver-lock.json b/packages/contracts-bedrock/semver-lock.json index c0cbb7726450..adae1ff092bf 100644 --- a/packages/contracts-bedrock/semver-lock.json +++ b/packages/contracts-bedrock/semver-lock.json @@ -117,7 +117,7 @@ }, "src/L2/OptimismSuperchainERC20.sol": { "initCodeHash": "0xc6452d9aef6d76bdc789f3cddac6862658a481c619e6a2e7a74f6d61147f927b", - "sourceCodeHash": "0x2502433e4b622e1697ca071f91a95b08fa40fdb03bfd958c44b2033a47df2010" + "sourceCodeHash": "0x4463e49c98ceb3327bd768579341d1e0863c8c3925d4b533fbc0f7951306261f" }, "src/L2/OptimismSuperchainERC20Beacon.sol": { "initCodeHash": "0x99ce8095b23c124850d866cbc144fee6cee05dbc6bb5d83acadfe00b90cf42c7", @@ -125,7 +125,7 @@ }, "src/L2/OptimismSuperchainERC20Factory.sol": { "initCodeHash": "0x43ec413140b05bfb83ec453b0d4f82b33a2d560bf8c76405d08de17565b87053", - "sourceCodeHash": "0x1e8e1262a549ce7e24e19174a998716ceb9a3034296b456914d74b4cb4f40caa" + "sourceCodeHash": "0x04a88ee6c4cf68becf8727b53cbc56ab6cfbaac9dbeb61083f63613dbf823a76" }, "src/L2/SequencerFeeVault.sol": { "initCodeHash": "0x2e6551705e493bacba8cffe22e564d5c401ae5bb02577a5424e0d32784e13e74", @@ -136,11 +136,11 @@ "sourceCodeHash": "0x9bc2e208774eb923894dbe391a5038a6189d7d36c202f4bf3e2c4dd332b0adf0" }, "src/L2/SuperchainERC20Bridge.sol": { - "initCodeHash": "0xa21232df1d7239fd20e7eaa320cfc91efc76343c93d833d8060a58b54ac5c8bf", + "initCodeHash": "0xea7eb314f96cd2520a58012ff7cc376c82c5a95612187ff6bb96ace4f095ebc4", "sourceCodeHash": "0x83188d878ce0b2890a7f7f41d09a8807f94a126e0ea274f0dac8b93f77217d3b" }, "src/L2/SuperchainWETH.sol": { - "initCodeHash": "0xf30071df59d85e0e8a552845031aca8d6f0261762e1b4ea1b28ff30379eaa20e", + "initCodeHash": "0x5db03c5c4cd6ea9e4b3e74e28f50d04fd3e130af5109b34fa208808fa9ba7742", "sourceCodeHash": "0xdafbb056dbc6198ade27a0ee051e9cd1c8f03084beb50821dc93c82d710ef2b4" }, "src/L2/WETH.sol": { @@ -235,4 +235,4 @@ "initCodeHash": "0x21b3059e9b13b330f76d02b61f61dcfa3abf3517a0b56afa0895c4b8291740bf", "sourceCodeHash": "0xc1ea12a87e3a7ef9c950f0a41a4e35b60d4d9c4c816ff671dbfca663861c16f4" } -} +} \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol index 61728a8ff73d..bccab456f5fd 100644 --- a/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol +++ b/packages/contracts-bedrock/src/L2/interfaces/ISuperchainWETH.sol @@ -39,8 +39,6 @@ interface ISuperchainWETH { /// @param _dst Address to relay tokens to. /// @param _wad Amount of tokens to relay. function relayERC20(address _from, address _dst, uint256 _wad) external; - - function __constructor__() external; } interface ISuperchainWETHERC20 is IWETH, ISuperchainWETH { }